repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
rijobro/CCPi-Framework
[ "ff08216d4e6fef84659b43155c5c52484b1dc543", "ff08216d4e6fef84659b43155c5c52484b1dc543" ]
[ "Wrappers/Python/test/test_TranslateFunction.py", "Wrappers/Python/ccpi/optimisation/functions/Rosenbrock.py" ]
[ "# -*- coding: utf-8 -*-\n# CCP in Tomographic Imaging (CCPi) Core Imaging Library (CIL).\n\n# Copyright 2017 UKRI-STFC\n# Copyright 2017 University of Manchester\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom ccpi.optimisation.functions import Function, L1Norm, ScaledFunction, \\\n LeastSquares, L2NormSquared, \\\n KullbackLeibler, ZeroFunction, ConstantFunction, TranslateFunction\nfrom ccpi.optimisation.operators import Identity \nfrom ccpi.framework import ImageGeometry, BlockGeometry\n\nimport unittest\nimport numpy\nfrom numbers import Number\n\n\n''' Here we test SumFunction class for different function\n\nL2Norm, L1Norm, KullbackLeibler, ZeroFunction, ConstantFunction, Scalar\n\nfor call method\nfor gradient method\n\n\n\n'''\n\n\n\nclass TestFunction(unittest.TestCase):\n \n def assertBlockDataContainerEqual(self, container1, container2):\n print (\"assert Block Data Container Equal\")\n self.assertTrue(issubclass(container1.__class__, container2.__class__))\n for col in range(container1.shape[0]):\n if issubclass(container1.get_item(col).__class__, DataContainer):\n print (\"Checking col \", col)\n self.assertNumpyArrayEqual(\n container1.get_item(col).as_array(), \n container2.get_item(col).as_array()\n )\n else:\n self.assertBlockDataContainerEqual(container1.get_item(col),container2.get_item(col))\n \n def assertNumpyArrayEqual(self, first, second):\n res = True\n try:\n numpy.testing.assert_array_equal(first, second)\n except AssertionError as err:\n res = False\n print(err)\n self.assertTrue(res)\n\n def assertNumpyArrayAlmostEqual(self, first, second, decimal=6):\n res = True\n try:\n numpy.testing.assert_array_almost_equal(first, second, decimal)\n except AssertionError as err:\n res = False\n print(err)\n print(\"expected \" , second)\n print(\"actual \" , first)\n\n self.assertTrue(res)\n \n def test_TranslateFunction(self): \n \n # Test TranslationFunction\n \n ig = ImageGeometry(4,4)\n tmp = ig.allocate('random_int')\n b = ig.allocate('random_int')\n scalar = 0.4\n tau = 0.05\n \n list1 = [ L2NormSquared(), scalar * L2NormSquared(), scalar * L2NormSquared(b=b), \n L1Norm(), scalar * L1Norm(), scalar * L1Norm(b=b)]\n \n list1_shift = [ L2NormSquared().centered_at(ig.allocate()), scalar * L2NormSquared().centered_at(ig.allocate()), scalar * L2NormSquared().centered_at(b),\n L1Norm().centered_at(ig.allocate()), scalar * L1Norm().centered_at(ig.allocate()), scalar * L1Norm().centered_at(b)] \n \n out_gradient1 = ig.allocate()\n out_gradient2 = ig.allocate()\n \n out_proximal1 = ig.allocate()\n out_proximal2 = ig.allocate() \n \n out_proximal_conj1 = ig.allocate()\n out_proximal_conj2 = ig.allocate() \n \n for func, func_shift in zip(list1, list1_shift):\n\n # check call\n res1 = func(tmp)\n res2 = func_shift(tmp)\n self.assertNumpyArrayAlmostEqual(res1, res2)\n \n try:\n # check gradient\n res1_gradient = func.gradient(tmp)\n res2_gradient = func_shift.gradient(tmp)\n self.assertNumpyArrayAlmostEqual(res1_gradient.as_array(), res2_gradient.as_array()) \n \n # check gradient out\n func.gradient(tmp, out = out_gradient1)\n func_shift.gradient(tmp, out = out_gradient2)\n self.assertNumpyArrayAlmostEqual(out_gradient1.as_array(), out_gradient2.as_array())\n \n except NotImplementedError:\n print('Function is not differentiable')\n \n # check proximal \n func.proximal(tmp, tau, out = out_proximal1)\n func_shift.proximal(tmp, tau, out = out_proximal2)\n self.assertNumpyArrayAlmostEqual(out_proximal1.as_array(), out_proximal2.as_array()) \n\n # check proximal conjugate\n func.proximal_conjugate(tmp, tau, out = out_proximal_conj1)\n func_shift.proximal_conjugate(tmp, tau, out = out_proximal_conj2)\n self.assertNumpyArrayAlmostEqual(out_proximal_conj1.as_array(), out_proximal_conj1.as_array()) \n \n \nif __name__ == '__main__':\n# \n t = TestFunction()\n t.test_TranslateFunction() \n\n\n# ig = ImageGeometry(4,4)\n# tmp = ig.allocate('random_int')\n# b = ig.allocate('random_int')\n# scalar = 0.4\n# \n## f = scalar * L2NormSquared().centered_at(b)\n## print(f.function.function)\n# list1 = [ L2NormSquared(), scalar * L2NormSquared(), scalar * L2NormSquared(b=b)] \n# \n## for func in list_functions:\n## \n### if isinstance(func, ScaledFunction):\n### func_tmp = func.function\n### else:\n### func_tmp = func \n### \n### if func_tmp.b is None:\n### tmp_data = ig.allocate()\n### else:\n### tmp_data = b \n## \n## func_tmp = func\n## tmp_data = ig.allocate()\n## \n## res1 = func_tmp(tmp) \n## res2 = func_tmp.centered_at(tmp_data)(tmp)\n## \n## self.assertNumpyArrayAlmostEqual(res1, res2)\n \n \n\n \n \n# \n# for i in list_functions:\n# \n# print('Test Translation for Function {} '.format(type(i).__name__))\n# \n# if isinstance(i, L2NormSquared):\n# \n# f = L2NormSquared(b = b) \n# g = TranslateFunction(L2NormSquared(), b)\n# \n# elif isinstance(i, L1Norm):\n# \n# f = L1Norm(b = b) \n# g = TranslateFunction(L1Norm(), b)\n# \n# elif isinstance(i, ScaledFunction):\n# \n# if isinstance(i.function, L2NormSquared):\n# f = scalar * L2NormSquared(b = b) \n# g = scalar * TranslateFunction(L2NormSquared(), b)\n# \n# if isinstance(i.function, L1Norm):\n# f = scalar * L1Norm(b = b) \n# g = scalar * TranslateFunction(L1Norm(), b) \n# \n# # check call\n# res1 = f(tmp)\n# res2 = g(tmp) \n# numpy.testing.assert_equal(res1, res2)\n# \n# # check gradient\n# \n# if not isinstance(i, L1Norm):\n# \n# res1 = f.gradient(tmp)\n# res2 = g.gradient(tmp) \n# numpy.testing.assert_equal(res1.as_array(), res2.as_array()) \n# \n# # check gradient out\n# res3 = ig.allocate()\n# res4 = ig.allocate()\n# f.gradient(tmp, out = res3)\n# g.gradient(tmp, out = res4)\n# numpy.testing.assert_equal(res3.as_array(), res4.as_array())\n# \n# # check convex conjugate\n# res1 = f.convex_conjugate(tmp)\n# res2 = g.convex_conjugate(tmp)\n# numpy.testing.assert_equal(res1, res2) \n# \n# # check proximal \n# tau = 0.5\n# res1 = f.proximal(tmp, tau)\n# res2 = g.proximal(tmp, tau)\n# numpy.testing.assert_equal(res1.as_array(), res2.as_array()) \n# \n# # check proximal out \n# res3 = ig.allocate()\n# res4 = ig.allocate()\n# f.proximal(tmp, tau, out = res3)\n# g.proximal(tmp, tau, out = res4)\n# numpy.testing.assert_array_almost_equal(res3.as_array(), res4.as_array(),decimal = decimal) \n# \n# # check proximal conjugate \n# tau = 0.4\n# res1 = f.proximal_conjugate(tmp, tau)\n# res2 = g.proximal_conjugate(tmp, tau)\n# numpy.testing.assert_array_almost_equal(res1.as_array(), res2.as_array(),decimal = decimal) \n# \n# # check proximal out \n# res3 = ig.allocate()\n# res4 = ig.allocate()\n# f.proximal_conjugate(tmp, tau, out = res3)\n# g.proximal_conjugate(tmp, tau, out = res4)\n# numpy.testing.assert_array_almost_equal(res3.as_array(), res4.as_array(),decimal = decimal) \n# \n# \n# f = L2NormSquared() + 1\n# print(f(tmp))\n# \n# \n \n# \n# \n# # tau = 0.5 \n# # f = L2NormSquared(b=b) \n# # g = TranslateFunction(f, b)\n# # res1 = f.proximal_conjugate(tmp, tau) \n# # res2 = tmp - tau * f.proximal(tmp/tau, 1/tau)\n# # res3 = g.proximal_conjugate(tmp, tau)\n# \n# # print(res1.as_array())\n# # print(res3.as_array())\n# # numpy.testing.assert_equal(res1.as_array(), res2.as_array()) \n# # numpy.testing.assert_equal(res1.as_array(), res3.as_array()) \n# \n# \n# \n", "# -*- coding: utf-8 -*-\n#========================================================================\n# Copyright 2019 Science Technology Facilities Council\n# Copyright 2019 University of Manchester\n#\n# This work is part of the Core Imaging Library developed by Science Technology\n# Facilities Council and University of Manchester\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0.txt\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#=========================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport numpy\nfrom ccpi.optimisation.functions import Function\nfrom ccpi.framework import VectorData, VectorGeometry\n\nclass Rosenbrock(Function):\n r'''Rosenbrock function\n\n .. math:: \n\n F(x,y) = (\\alpha - x)^2 + \\beta(y-x^2)^2\n\n The function has a global minimum at .. math:: (x,y)=(\\alpha, \\alpha^2)\n\n '''\n def __init__(self, alpha, beta):\n super(Rosenbrock, self).__init__()\n\n self.alpha = alpha\n self.beta = beta\n\n def __call__(self, x):\n if not isinstance(x, VectorData):\n raise TypeError('Rosenbrock function works on VectorData only')\n vec = x.as_array()\n a = (self.alpha - vec[0])\n b = (vec[1] - (vec[0]*vec[0]))\n return a * a + self.beta * b * b\n\n def gradient(self, x, out=None):\n r'''Gradient of the Rosenbrock function\n \n .. math::\n\n \\nabla f(x,y) = \\left[ 2*((x-\\alpha) - 2\\beta x(y-x^2)) ; 2\\beta (y - x^2) \\right]\n\n '''\n if not isinstance(x, VectorData):\n raise TypeError('Rosenbrock function works on VectorData only')\n\n vec = x.as_array()\n a = (vec[0] - self.alpha)\n b = (vec[1] - (vec[0]*vec[0]))\n\n res = numpy.empty_like(vec)\n res[0] = 2 * ( a - 2 * self.beta * vec[0] * b)\n res[1] = 2 * self.beta * b\n\n if out is not None:\n out.fill (res)\n else:\n return VectorData(res) \n\n" ]
[ [ "numpy.testing.assert_array_equal", "numpy.testing.assert_array_almost_equal" ], [ "numpy.empty_like" ] ]
pingrunhuang/mars
[ "cde691285d921add5460944764c7278e7ddec8ff" ]
[ "mars/scheduler/tests/test_graph.py" ]
[ "# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport contextlib\nimport uuid\nimport unittest\n\nimport mars.tensor as mt\nfrom mars.cluster_info import ClusterInfoActor\nfrom mars.scheduler import GraphActor, GraphMetaActor, ResourceActor, ChunkMetaActor, \\\n AssignerActor, GraphState\nfrom mars.utils import serialize_graph, get_next_port\nfrom mars.actors import create_actor_pool\nfrom mars.tests.core import patch_method\n\n\nclass Test(unittest.TestCase):\n @contextlib.contextmanager\n def prepare_graph_in_pool(self, expr, clean_io_meta=True, compose=False):\n session_id = str(uuid.uuid4())\n graph_key = str(uuid.uuid4())\n\n graph = expr.build_graph(compose=compose)\n serialized_graph = serialize_graph(graph)\n chunked_graph = expr.build_graph(compose=compose, tiled=True)\n\n addr = '127.0.0.1:%d' % get_next_port()\n with create_actor_pool(n_process=1, backend='gevent', address=addr) as pool:\n pool.create_actor(ClusterInfoActor, [pool.cluster_info.address],\n uid=ClusterInfoActor.default_name())\n resource_ref = pool.create_actor(ResourceActor, uid=ResourceActor.default_name())\n pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_name())\n pool.create_actor(AssignerActor, uid=AssignerActor.default_name())\n graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialized_graph,\n uid=GraphActor.gen_name(session_id, graph_key))\n\n graph_ref.prepare_graph(compose=compose)\n fetched_graph = graph_ref.get_chunk_graph()\n self.assertIsNotNone(fetched_graph)\n self.assertEqual(len(chunked_graph), len(fetched_graph))\n\n graph_ref.scan_node()\n op_infos = graph_ref.get_operand_info()\n for n in fetched_graph:\n depth = op_infos[n.op.key]['optimize']['depth']\n self.assertIsNotNone(depth)\n successor_size = op_infos[n.op.key]['optimize']['successor_size']\n self.assertIsNotNone(successor_size)\n descendant_size = op_infos[n.op.key]['optimize']['descendant_size']\n self.assertIsNotNone(descendant_size)\n\n resource_ref.set_worker_meta('localhost:12345', dict(hardware=dict(cpu_total=4)))\n resource_ref.set_worker_meta('localhost:23456', dict(hardware=dict(cpu_total=4)))\n\n graph_ref.place_initial_chunks()\n op_infos = graph_ref.get_operand_info()\n\n for n in fetched_graph:\n if fetched_graph.count_predecessors(n) != 0:\n continue\n target_worker = op_infos[n.op.key]['target_worker']\n self.assertIsNotNone(target_worker)\n\n graph_ref.create_operand_actors(_clean_io_meta=clean_io_meta)\n op_infos = graph_ref.get_operand_info()\n\n if not clean_io_meta:\n orig_metas = dict()\n for n in fetched_graph:\n try:\n meta = orig_metas[n.op.key]\n except KeyError:\n meta = orig_metas[n.op.key] = dict(\n predecessors=set(), successors=set(), input_chunks=set(), chunks=set()\n )\n meta['predecessors'].update([pn.op.key for pn in fetched_graph.iter_predecessors(n)])\n meta['successors'].update([sn.op.key for sn in fetched_graph.iter_successors(n)])\n meta['input_chunks'].update([pn.key for pn in fetched_graph.iter_predecessors(n)])\n meta['chunks'].update([c.key for c in n.op.outputs])\n\n for n in fetched_graph:\n self.assertEqual(op_infos[n.op.key]['op_name'], type(n.op).__name__)\n\n io_meta = op_infos[n.op.key]['io_meta']\n orig_io_meta = orig_metas[n.op.key]\n\n self.assertSetEqual(set(io_meta['predecessors']), set(orig_io_meta['predecessors']))\n self.assertSetEqual(set(io_meta['successors']), set(orig_io_meta['successors']))\n self.assertSetEqual(set(io_meta['input_chunks']), set(orig_io_meta['input_chunks']))\n self.assertSetEqual(set(io_meta['chunks']), set(orig_io_meta['chunks']))\n\n self.assertEqual(op_infos[n.op.key]['output_size'], sum(ch.nbytes for ch in n.op.outputs))\n\n yield pool, graph_ref\n\n def testSimpleGraphPreparation(self, *_):\n arr = mt.random.randint(10, size=(10, 8), chunk_size=4)\n arr_add = mt.random.randint(10, size=(10, 8), chunk_size=4)\n arr2 = arr + arr_add\n with self.prepare_graph_in_pool(arr2, clean_io_meta=False):\n pass\n\n def testSplitPreparation(self, *_):\n arr = mt.ones(12, chunk_size=4)\n arr_split = mt.split(arr, 2)\n arr_sum = arr_split[0] + arr_split[1]\n with self.prepare_graph_in_pool(arr_sum, clean_io_meta=False):\n pass\n\n def testSameKeyPreparation(self, *_):\n arr = mt.ones((5, 5), chunk_size=3)\n arr2 = mt.concatenate((arr, arr))\n with self.prepare_graph_in_pool(arr2, clean_io_meta=False):\n pass\n\n def testFusePreparation(self, *_):\n from mars.tensor.expressions.fuse.core import TensorFuseChunk\n arr = mt.ones((5, 5), chunk_size=3)\n arr2 = (arr + 5) * 2\n with self.prepare_graph_in_pool(arr2, compose=True) as (pool, graph_ref):\n out_graph = graph_ref.get_chunk_graph()\n self.assertTrue(all(isinstance(v.op, TensorFuseChunk) for v in out_graph))\n\n def testMultipleAddPreparation(self, *_):\n import numpy as np\n import operator\n from mars.compat import reduce\n\n base_arr = np.random.random((100, 100))\n a = mt.array(base_arr)\n sumv = reduce(operator.add, [a[:10, :10] for _ in range(10)])\n with self.prepare_graph_in_pool(sumv):\n pass\n\n def testGraphTermination(self, *_):\n from mars.tensor.expressions.arithmetic.add import TensorAddConstant\n arr = mt.random.random((8, 2), chunk_size=2)\n arr2 = arr + 1\n with self.prepare_graph_in_pool(arr2) as (pool, graph_ref):\n out_graph = graph_ref.get_chunk_graph()\n for c in out_graph:\n if not isinstance(c.op, TensorAddConstant):\n continue\n self.assertNotEqual(graph_ref.get_state(), GraphState.SUCCEEDED)\n graph_ref.mark_terminal_finished(c.op.key)\n\n self.assertEqual(graph_ref.get_state(), GraphState.SUCCEEDED)\n\n arr = mt.random.random((8, 2), chunk_size=2)\n arr2 = arr + 1\n with self.prepare_graph_in_pool(arr2) as (pool, graph_ref):\n out_graph = graph_ref.get_chunk_graph()\n for c in out_graph:\n if not isinstance(c.op, TensorAddConstant):\n continue\n self.assertNotEqual(graph_ref.get_state(), GraphState.FAILED)\n graph_ref.mark_terminal_finished(c.op.key, GraphState.FAILED)\n\n self.assertEqual(graph_ref.get_state(), GraphState.FAILED)\n\n def testErrorOnPrepare(self, *_):\n session_id = str(uuid.uuid4())\n\n addr = '127.0.0.1:%d' % get_next_port()\n with create_actor_pool(n_process=1, backend='gevent', address=addr) as pool:\n pool.create_actor(ClusterInfoActor, [pool.cluster_info.address],\n uid=ClusterInfoActor.default_name())\n resource_ref = pool.create_actor(ResourceActor, uid=ResourceActor.default_name())\n pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_name())\n pool.create_actor(AssignerActor, uid=AssignerActor.default_name())\n\n resource_ref.set_worker_meta('localhost:12345', dict(hardware=dict(cpu_total=4)))\n resource_ref.set_worker_meta('localhost:23456', dict(hardware=dict(cpu_total=4)))\n\n # error occurred in create_operand_actors\n graph_key = str(uuid.uuid4())\n expr = mt.random.random((8, 2), chunk_size=2) + 1\n graph = expr.build_graph(compose=False)\n serialized_graph = serialize_graph(graph)\n\n graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialized_graph,\n uid=GraphActor.gen_name(session_id, graph_key))\n\n def _mock_raises(*_):\n raise RuntimeError\n\n with patch_method(GraphActor.create_operand_actors, new=_mock_raises):\n with self.assertRaises(RuntimeError):\n graph_ref.execute_graph()\n self.assertEqual(graph_ref.get_state(), GraphState.FAILED)\n graph_ref.destroy()\n\n # interrupted during create_operand_actors\n graph_key = str(uuid.uuid4())\n graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialized_graph,\n uid=GraphActor.gen_name(session_id, graph_key))\n\n def _mock_cancels(*_):\n graph_meta_ref = pool.actor_ref(GraphMetaActor.gen_name(session_id, graph_key))\n graph_meta_ref.set_state(GraphState.CANCELLING)\n\n with patch_method(GraphActor.create_operand_actors, new=_mock_cancels):\n graph_ref.execute_graph()\n self.assertEqual(graph_ref.get_state(), GraphState.CANCELLED)\n\n # interrupted during previous steps\n graph_key = str(uuid.uuid4())\n graph_ref = pool.create_actor(GraphActor, session_id, graph_key, serialized_graph,\n uid=GraphActor.gen_name(session_id, graph_key))\n\n def _mock_cancels(*_):\n graph_meta_ref = pool.actor_ref(GraphMetaActor.gen_name(session_id, graph_key))\n graph_meta_ref.set_state(GraphState.CANCELLING)\n\n with patch_method(GraphActor.place_initial_chunks, new=_mock_cancels):\n graph_ref.execute_graph()\n self.assertEqual(graph_ref.get_state(), GraphState.CANCELLED)\n" ]
[ [ "numpy.random.random" ] ]
EEdwardsA/Twitoff
[ "e1c2613c233e81c5aa50fecb89e90c75b9bbdd01" ]
[ "twitoff/predict.py" ]
[ "\"\"\"Prediction of Users based on tweet embeddings\"\"\"\n\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom .models import User\nfrom .twitter import vectorize_tweet\n\ndef predict_user(user0_name, user1_name, hypo_tweet_text):\n \"\"\"\n Determine and return which user is more likely to say a hypothetical tweet\n \n Example run: predict_user('elonmusk', 'nasa', 'Tesla cars are rad')\n returns 0 (user0_name) or 1 (user1_name)\n \"\"\"\n user0 = User.query.filter(User.name == user0_name).one()\n # TODO: create try/except block\n user1 = User.query.filter(User.name == user1_name).one()\n user0_vects = np.array([tweet.vect for tweet in user0.tweets])\n user1_vects = np.array([tweet.vect for tweet in user1.tweets])\n vects = np.vstack([user0_vects, user1_vects])\n labels = np.concatenate(\n [np.zeros(len(user0.tweets)), np.ones(len(user1.tweets))])\n hypo_tweet_vect = vectorize_tweet(hypo_tweet_text)\n\n log_reg = LogisticRegression().fit(vects, labels)\n\n return log_reg.predict(hypo_tweet_vect.reshape(1, -1))\n \n\n" ]
[ [ "numpy.array", "sklearn.linear_model.LogisticRegression", "numpy.vstack" ] ]
darrelrobinson/rdd
[ "54b9c328087ae22ac38073aab2ee930459b2364a" ]
[ "build/lib/rdd/test.py" ]
[ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport functions as rdd\n\n'''\nTo Do:\n - Put testing functions in another folder\n - test different input types, combos of bad items, etc\n'''\n\n# Set seed\nnp.random.seed(42)\n\n# Simulate data\nN = 10000\n# x = np.random.uniform(-.5, .5, N)\nx = np.random.normal(0, 1, N)\nepsilon = np.random.normal(0, 1, N)\nforcing = np.round(x+.5)\ny = .5 * forcing + 2 * x + 1 + epsilon\nw1 = np.random.normal(0, 1, N)\nw2 = np.random.normal(0, 4, N)\n\ndata = pd.DataFrame({'y':y, 'x': x, 'w1':w1, 'w2':w2})\nprint(data.head())\n\nh = rdd.optimal_bandwidth(data['y'], data['x'])\nprint(h)\n\n# data_rdd = rdd.truncated_data(data, 'x', h)\n\n# results = rdd.rdd(data_rdd, 'x', 'y')\n\n# print(results.summary())\n\n# data_binned = rdd.bin_data(data, 'y', 'x', 100)\n\n# plt.figure()\n# plt.scatter(data_binned['x'], data_binned['y'],\n# s = data_binned['n_obs'], facecolors='none', edgecolors='r')\n# plt.show()\n# plt.close()\n\n# print(data_binned['n_obs'].describe())\n\n# Show a spline\n# show placebo with different cuts" ]
[ [ "numpy.round", "numpy.random.normal", "numpy.random.seed", "pandas.DataFrame" ] ]
georgegunter/flow
[ "15848ec9bafd250364a51fa162786037645b19bf" ]
[ "examples/Old Code/run_follower_stopper_ring.py" ]
[ "from flow.controllers import FollowerStopper, IDMController, ContinuousRouter, OVMController\nfrom flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams\nfrom flow.core.params import VehicleParams\nfrom flow.envs.ring.accel import AccelEnv, ADDITIONAL_ENV_PARAMS\nfrom flow.networks.ring import RingNetwork, ADDITIONAL_NET_PARAMS\n\n# For running a simulation:\nfrom flow.core.experiment import Experiment\n\n# For data processing:\nimport pandas as pd\nimport numpy as np\nimport os\nimport sys\nimport time\n\ndef get_flow_dict(v_des,model_params,emission_path):\n\n\talpha = model_params[0]\n\tbeta = model_params[1]\n\tv_max = model_params[2]\n\ts_star = model_params[3]\n\ts0 = model_params[4]\n\n\thuman_accel = (OVMController,{'alpha':alpha,'beta':beta,'v_max':v_max,'s_star':s_star,'s0':s0,'noise':.1})\n\n\tvehicles = VehicleParams()\n\tvehicles.add(\n\t veh_id=\"human\",\n\t acceleration_controller=human_accel,\n\t routing_controller=(ContinuousRouter, {}),\n\t num_vehicles=20)\n\n\tvehicles.add(\n\t color='red',\n\t veh_id=\"AV\",\n\t acceleration_controller=(FollowerStopper, {'v_des':v_des}),\n\t routing_controller=(ContinuousRouter, {}),\n\t num_vehicles=1)\n\n\n\tflow_params = dict(\n\t # name of the experiment\n\t exp_tag='ring',\n\n\t # name of the flow environment the experiment is running on\n\t env_name=AccelEnv,\n\n\t # name of the network class the experiment is running on\n\t network=RingNetwork,\n\n\t # simulator that is used by the experiment\n\t simulator='traci',\n\n\t # sumo-related parameters (see flow.core.params.SumoParams)\n\t sim=SumoParams(\n\t render=False,\n\t sim_step=0.1,\n\t emission_path=emission_path,\n\t ),\n\n\t # environment related parameters (see flow.core.params.EnvParams)\n\t env=EnvParams(\n\t horizon=3000,\n\t warmup_steps=750,\n\t additional_params=ADDITIONAL_ENV_PARAMS,\n\t ),\n\n\t # network-related parameters (see flow.core.params.NetParams and the\n\t # network's documentation or ADDITIONAL_NET_PARAMS component)\n\t net=NetParams(\n\t additional_params={\n\t \"length\": 260,\n\t \"lanes\": 1,\n\t \"speed_limit\": 30,\n\t \"resolution\": 40,\n\t }, ),\n\n\t # vehicles to be placed in the network at the start of a rollout (see\n\t # flow.core.params.VehicleParams)\n\t veh=vehicles,\n\n\t # parameters specifying the positioning of vehicles upon initialization/\n\t # reset (see flow.core.params.InitialConfig)\n\t # initial=InitialConfig(\n\t # bunching=20,\n\t # ),\n\t)\n\n\treturn flow_params\n\ndef run_sim(v_des,model_params,emission_path):\n\n\tflow_params = get_flow_dict(v_des,model_params,emission_path)\n\n\texp = Experiment(flow_params)\n\n\t[emission_files,info_dict] = exp.run(num_runs=1,convert_to_csv=True)\n\n\tcsv_path = emission_files[0]\n\n\treturn csv_path\n\ndef get_sim_results(csv_path):\n\tdata = pd.read_csv(csv_path,delimiter=',')\n\tids = data.id.unique() #numpy array\n\tids = list(ids)\n\n\tsim_time = np.array(data[data['id']==ids[0]]['time'])\n\tsim_length = sim_time[-1]\n\n\ttime_threshold = sim_length/2\n\n\tspeed_measurements = data[data['time'] > time_threshold]['speed']\n\n\tspeed_measurements = np.array(speed_measurements)\n\n\tave_speed = np.mean(speed_measurements)\n\n\tstd_speed = np.std(speed_measurements)\n\n\treturn [ave_speed,std_speed]\n\n\nif __name__ == \"__main__\":\n\temission_path = '/Users/vanderbilt/Desktop/Research_2020/CIRCLES/Official_Flow/flow/examples/follower_stopper_sims/'\n\n\tmodel_params = [0.6660,21.5975,8.9368,2.2146,2.8150]\n\n\tsim_results = []\n\n\tv_des_vals = np.linspace(1.0,9.0,25)\n\tv_des_vals = list(v_des_vals)\n\n\tstart_time = time.time()\n\n\tfor v_des in v_des_vals:\n\t\tsys.stdout.write('\\r'+'Simulating v_des: '+str(v_des))\n\n\t\tcsv_path = run_sim(v_des,model_params,emission_path)\n\n\t\tsim_data = get_sim_results(csv_path)\n\n\t\tsim_results.append([v_des,sim_data[0],sim_data[1]])\n\n\t\tos.remove(csv_path)\n\n\tsim_time = time.time() - start_time\n\n\tsim_results = np.array(sim_results)\n\n\tnp.savetxt('follower_stopper_sweep.csv',sim_results)\n\n\tprint('Simulation sweep finished, time to complete: '+str(sim_time))\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "pandas.read_csv", "numpy.linspace", "numpy.std", "numpy.mean", "numpy.savetxt", "numpy.array" ] ]
pdatlab/rpy2_utils
[ "8d563592550272604cf6453c6d4dd121f3da49b6" ]
[ "rpy2_utils/robjects.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n\n\"\"\"\nimport rpy2\nimport pandas as pd\n\n\nclass DataFrame():\n \n def __init__(self,r_df):\n \"\"\"\n import rpy2_utils as ru\n dfh = ru.robjects.DataFrame(r_df)\n \"\"\"\n \n #TODO: Verify data type\n self.r = r_df\n \n def __contains__(self, name):\n return name in self.r.names\n \n def renameColumn(self,old_name,new_name):\n names = list(self.r.names)\n I = names.index(old_name)\n self.r.names[I] = new_name\n \n @property\n def names(self):\n return self.r.names\n \n def __getitem__(self,name):\n names = list(self.r.names)\n I = names.index(name)\n r_column = self.r[I]\n \n #Had isinstance, but factor is subclass of int\n #to generally avoid subclass comparisons, switched to type()\n if type(r_column) == rpy2.robjects.vectors.IntVector: \n return IntVector(r_column)\n elif type(r_column) == rpy2.robjects.vectors.StrVector: \n return StrVector(r_column)\n elif type(r_column) == rpy2.robjects.vectors.FloatVector:\n return FloatVector(r_column)\n elif type(r_column) == rpy2.robjects.vectors.FactorVector:\n return FactorVector(r_column)\n else:\n raise Exception('Unhandled case')\n \n def __setitem__(self, name, new_value):\n names = list(self.r.names)\n I = names.index(name)\n self.r[I] = new_value.r\n \n\n\nclass IntVector():\n def __init__(self,r):\n self.r = r\n \n def as_factor(self,levels=None,ordered=False,na=None):\n \n if na is not None:\n raise Exception('NA option not yet handled for int vector')\n \n if levels is None:\n r = rpy2.robjects.vectors.FactorVector(self.r,ordered=ordered)\n else:\n levels_r = rpy2.robjects.vectors.IntVector(levels)\n r = rpy2.robjects.vectors.FactorVector(self.r,levels=levels_r,ordered=ordered)\n \n return FactorVector(r)\n\nclass StrVector():\n \n def __init__(self,r):\n self.r = r\n \n def as_factor(self,levels=None,ordered=False,na=None):\n \n if levels is None:\n if na is not None:\n raise Exception('NA for no levels specified not yet handled')\n r = rpy2.robjects.vectors.FactorVector(self.r,ordered=ordered)\n else:\n if na is not None:\n levels.remove(na)\n \n levels_r = rpy2.robjects.vectors.StrVector(levels)\n r = rpy2.robjects.vectors.FactorVector(self.r,levels=levels_r,ordered=ordered)\n \n # if na is not None:\n # #TODO: Not sure if there is a better way of doing this ...\n # final_levels = list(r.levels)\n # I = final_levels.index(na)\n # #Note, level values are 1 based, not 0 based\n # I = I + 1\n # r_column = self.r[I]\n \n # #r_train_data.rx[r_train_data.ro == -1] = robjects.NA_Integer\n \n # import pdb\n # pdb.set_trace()\n # pass\n \n return FactorVector(r)\n\nclass FloatVector():\n def __init__(self,r):\n self.r = r\n \nclass FactorVector():\n def __init__(self,r):\n self.r = r\n \n @property\n def levels(self):\n return self.r.levels\n \n def as_factor(self,levels=None,ordered=False,na=None):\n #TODO: it is possible this changes the levels\n #Right now this occurs when we rerun code that has \n #already been converted\n return self\n \n \nclass FloatMatrix():\n def __init__(self,r):\n self.r = r \n \n def as_dataframe(self):\n #TODO: Clean this up, can we just extract column values\n # rather than build by row? Yes, slice by column\n # n_rows = 5\n # col1 = self.r[0:4]\n # col2 = self.r[5:9]\n # etc\n #\n #- make it so rownames is either a column (default) or index\n \n \n \n \n data = self.r\n col_names = ['rownames'] + list(data.colnames)\n row_names = data.rownames\n num_cols = data.ncol\n num_rows = data.nrow\n col_range = range(num_cols)\n row_range = range(num_rows)\n \n \n rows = []\n for x in row_range:\n index = [x+p*num_rows for p in col_range]\n row_values = [data[p] for p in index]\n row_values = [row_names[x]] + row_values\n row = dict(zip(col_names,row_values))\n row = pd.DataFrame(row, index=[x])\n rows.append(row)\n \n output = pd.concat(rows)\n \n return output\n " ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
TKone7/python-docs-samples
[ "ef3dd032d6fde6a47b944604788bb674e8e51b66" ]
[ "data-science-onramp/ai-platform/modules/trainer/tfkeras_model/task.py" ]
[ "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\")\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https: // www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# [START ai_platform_tfkeras_task]\n\"\"\"Trains a Keras model to predict number of trips\nstarted and ended at Citibike stations. \"\"\"\n\n# [START ai_platform_tfkeras_task_imports]\nimport argparse\nimport os\n\nimport tensorflow as tf\n\nfrom trainer import utils\nfrom trainer.tfkeras_model import model\n# [END ai_platform_tfkeras_task_imports]\n\n\n# [START ai_platform_tfkeras_task_args]\ndef get_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--input-path\",\n type=str,\n required=True,\n help=\"path to input data\"\n )\n parser.add_argument(\n \"--num-epochs\",\n type=int,\n help=\"number of times to go through the data, default=20\",\n )\n parser.add_argument(\n \"--batch-size\",\n type=int,\n help=\"number of records to read during each training step, default=128\",\n )\n parser.add_argument(\n \"--learning-rate\",\n type=float,\n help=\"learning rate for gradient descent, default=.01\",\n )\n parser.add_argument(\n \"--verbosity\",\n choices=[\"DEBUG\", \"ERROR\", \"FATAL\", \"INFO\", \"WARN\"],\n default=\"INFO\",\n )\n parser.add_argument(\n \"--model_dir\",\n type=str,\n help=\"Output directory for the model.\",\n default=os.environ[\"AIP_MODEL_DIR\"],\n )\n return parser.parse_args()\n# [END ai_platform_tfkeras_task_args]\n\n\n# [START ai_platform_tfkeras_task_train_and_evaluate]\n# [START ai_platform_tfkeras_task_train_and_evaluate_load]\ndef train_and_evaluate(\n input_path: str,\n model_dir: str,\n num_epochs: int = 5,\n batch_size: int = 128,\n learning_rate: float = 0.01\n) -> None:\n \"\"\"Trains and evaluates the Keras model.\n\n Uses the Keras model defined in model.py. Saves the trained model in TensorFlow SavedModel\n format to the path defined in part by the --job-dir argument.\"\"\"\n\n # Split datasets into training and testing\n train_feature, eval_feature, train_target, eval_target = utils.load_data(input_path)\n# [END ai_platform_tfkeras_task_train_and_evaluate_load]\n\n # [START ai_platform_tfkeras_task_train_and_evaluate_dimensions]\n # Extract dimensions of the data\n num_train_examples, input_dim = train_feature.shape\n num_eval_examples = eval_feature.shape[1]\n output_dim = train_target.shape[1]\n # [END ai_platform_tfkeras_task_train_and_evaluate_dimensions]\n\n # [START ai_platform_tfkeras_task_train_and_evaluate_model]\n # Create the Keras Model\n keras_model = model.create_keras_model(\n input_dim=input_dim,\n output_dim=output_dim,\n learning_rate=learning_rate,\n )\n # [END ai_platform_tfkeras_task_train_and_evaluate_model]\n\n # [START ai_platform_tfkeras_task_train_and_evaluate_training_data]\n # Pass a numpy array by passing DataFrame.values\n training_dataset = model.input_fn(\n features=train_feature.values,\n labels=train_target.values,\n shuffle=True,\n num_epochs=num_epochs,\n batch_size=batch_size,\n )\n # [END ai_platform_tfkeras_task_train_and_evaluate_training_data]\n\n # [START ai_platform_tfkeras_task_train_and_evaluate_validation_data]\n # Pass a numpy array by passing DataFrame.values\n validation_dataset = model.input_fn(\n features=eval_feature.values,\n labels=eval_target.values,\n shuffle=False,\n num_epochs=num_epochs,\n batch_size=num_eval_examples,\n )\n # [END ai_platform_tfkeras_task_train_and_evaluate_validation_data]\n\n # [START ai_platform_tfkeras_task_train_and_evaluate_tensorboard]\n # Setup Learning Rate decay.\n lr_decay_cb = tf.keras.callbacks.LearningRateScheduler(\n lambda epoch: learning_rate + 0.02 * (0.5 ** (1 + epoch)), verbose=True\n )\n\n # Setup TensorBoard callback.\n tensorboard_cb = tf.keras.callbacks.TensorBoard(\n os.path.join(model_dir, \"keras_tensorboard\"), histogram_freq=1\n )\n # [END ai_platform_tfkeras_task_train_and_evaluate_tensorboard]\n\n # [START ai_platform_tfkeras_task_train_and_evaluate_fit_export]\n # Train model\n keras_model.fit(\n training_dataset,\n steps_per_epoch=int(num_train_examples / batch_size),\n epochs=num_epochs,\n validation_data=validation_dataset,\n validation_steps=1,\n verbose=1,\n callbacks=[lr_decay_cb, tensorboard_cb],\n )\n\n # Export model\n keras_model.save(model_dir)\n print(f\"Model exported to: {model_dir}\")\n # [END ai_platform_tfkeras_task_train_and_evaluate_fit_export]\n# [END ai_platform_tfkeras_task_train_and_evaluate]\n\n\nif __name__ == \"__main__\":\n args = get_args()\n\n kwargs = {}\n if args.num_epochs:\n kwargs[\"num-epochs\"] = args.num_epochs\n if args.batch_size:\n kwargs[\"batch-size\"] = args.batch_size\n if args.learning_rate:\n kwargs[\"learning-rate\"] = args.learning_rate\n\n tf.compat.v1.logging.set_verbosity(args.verbosity)\n\n train_and_evaluate(args.input_path, args.model_dir, **kwargs)\n# [END ai_platform_tfkeras_task]\n" ]
[ [ "tensorflow.keras.callbacks.LearningRateScheduler", "tensorflow.compat.v1.logging.set_verbosity" ] ]
austereantelope/pymc
[ "657eb2a7e46fa30e61d3c1b12a8ce15020794a2c" ]
[ "pymc/bart/pgbart.py" ]
[ "# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\n\nfrom copy import copy\n\nimport aesara\nimport numpy as np\n\nfrom aesara import function as aesara_function\n\nfrom pymc.aesaraf import inputvars, join_nonshared_inputs, make_shared_replacements\nfrom pymc.bart.bart import BARTRV\nfrom pymc.bart.tree import LeafNode, SplitNode, Tree\nfrom pymc.model import modelcontext\nfrom pymc.step_methods.arraystep import ArrayStepShared, Competence\n\n_log = logging.getLogger(\"pymc\")\n\n\nclass PGBART(ArrayStepShared):\n \"\"\"\n Particle Gibss BART sampling step\n\n Parameters\n ----------\n vars: list\n List of value variables for sampler\n num_particles : int\n Number of particles for the conditional SMC sampler. Defaults to 40\n max_stages : int\n Maximum number of iterations of the conditional SMC sampler. Defaults to 100.\n batch : int or tuple\n Number of trees fitted per step. Defaults to \"auto\", which is the 10% of the `m` trees\n during tuning and after tuning. If a tuple is passed the first element is the batch size\n during tuning and the second the batch size after tuning.\n model: PyMC Model\n Optional model for sampling step. Defaults to None (taken from context).\n \"\"\"\n\n name = \"bartsampler\"\n default_blocked = False\n generates_stats = True\n stats_dtypes = [{\"variable_inclusion\": np.ndarray, \"bart_trees\": np.ndarray}]\n\n def __init__(self, vars=None, num_particles=40, max_stages=100, batch=\"auto\", model=None):\n _log.warning(\"BART is experimental. Use with caution.\")\n model = modelcontext(model)\n initial_values = model.compute_initial_point()\n value_bart = inputvars(vars)[0]\n self.bart = model.values_to_rvs[value_bart].owner.op\n\n self.X = self.bart.X\n self.Y = self.bart.Y\n self.missing_data = np.any(np.isnan(self.X))\n self.m = self.bart.m\n self.alpha = self.bart.alpha\n self.k = self.bart.k\n self.alpha_vec = self.bart.split_prior\n if self.alpha_vec is None:\n self.alpha_vec = np.ones(self.X.shape[1])\n\n self.init_mean = self.Y.mean()\n # if data is binary\n Y_unique = np.unique(self.Y)\n if Y_unique.size == 2 and np.all(Y_unique == [0, 1]):\n self.mu_std = 6 / (self.k * self.m ** 0.5)\n # maybe we need to check for count data\n else:\n self.mu_std = (2 * self.Y.std()) / (self.k * self.m ** 0.5)\n\n self.num_observations = self.X.shape[0]\n self.num_variates = self.X.shape[1]\n self.available_predictors = list(range(self.num_variates))\n\n self.sum_trees = np.full_like(self.Y, self.init_mean).astype(aesara.config.floatX)\n self.a_tree = Tree.init_tree(\n leaf_node_value=self.init_mean / self.m,\n idx_data_points=np.arange(self.num_observations, dtype=\"int32\"),\n )\n self.mean = fast_mean()\n\n self.normal = NormalSampler()\n self.prior_prob_leaf_node = compute_prior_probability(self.alpha)\n self.ssv = SampleSplittingVariable(self.alpha_vec)\n\n self.tune = True\n\n if batch == \"auto\":\n batch = max(1, int(self.m * 0.1))\n self.batch = (batch, batch)\n else:\n if isinstance(batch, (tuple, list)):\n self.batch = batch\n else:\n self.batch = (batch, batch)\n\n self.log_num_particles = np.log(num_particles)\n self.indices = list(range(2, num_particles))\n self.len_indices = len(self.indices)\n self.max_stages = max_stages\n\n shared = make_shared_replacements(initial_values, vars, model)\n self.likelihood_logp = logp(initial_values, [model.datalogpt], vars, shared)\n self.all_particles = []\n for i in range(self.m):\n self.a_tree.leaf_node_value = self.init_mean / self.m\n p = ParticleTree(self.a_tree)\n self.all_particles.append(p)\n self.all_trees = np.array([p.tree for p in self.all_particles])\n super().__init__(vars, shared)\n\n def astep(self, _):\n variable_inclusion = np.zeros(self.num_variates, dtype=\"int\")\n\n tree_ids = np.random.choice(range(self.m), replace=False, size=self.batch[~self.tune])\n for tree_id in tree_ids:\n # Generate an initial set of SMC particles\n # at the end of the algorithm we return one of these particles as the new tree\n particles = self.init_particles(tree_id)\n # Compute the sum of trees without the old tree, that we are attempting to replace\n self.sum_trees_noi = self.sum_trees - particles[0].tree.predict_output()\n # Resample leaf values for particle 1 which is a copy of the old tree\n particles[1].sample_leafs(\n self.sum_trees,\n self.X,\n self.mean,\n self.m,\n self.normal,\n self.mu_std,\n )\n\n # The old tree and the one with new leafs do not grow so we update the weights only once\n self.update_weight(particles[0], old=True)\n self.update_weight(particles[1], old=True)\n for _ in range(self.max_stages):\n # Sample each particle (try to grow each tree), except for the first two\n stop_growing = True\n for p in particles[2:]:\n tree_grew = p.sample_tree(\n self.ssv,\n self.available_predictors,\n self.prior_prob_leaf_node,\n self.X,\n self.missing_data,\n self.sum_trees,\n self.mean,\n self.m,\n self.normal,\n self.mu_std,\n )\n if tree_grew:\n self.update_weight(p)\n if p.expansion_nodes:\n stop_growing = False\n if stop_growing:\n break\n # Normalize weights\n W_t, normalized_weights = self.normalize(particles[2:])\n\n # Resample all but first two particles\n new_indices = np.random.choice(\n self.indices, size=self.len_indices, p=normalized_weights\n )\n particles[2:] = particles[new_indices]\n\n # Set the new weights\n for p in particles[2:]:\n p.log_weight = W_t\n\n for p in particles[2:]:\n p.log_weight = p.old_likelihood_logp\n\n _, normalized_weights = self.normalize(particles)\n # Get the new tree and update\n new_particle = np.random.choice(particles, p=normalized_weights)\n new_tree = new_particle.tree\n self.all_trees[tree_id] = new_tree\n new_particle.log_weight = new_particle.old_likelihood_logp - self.log_num_particles\n self.all_particles[tree_id] = new_particle\n self.sum_trees = self.sum_trees_noi + new_tree.predict_output()\n\n if self.tune:\n self.ssv = SampleSplittingVariable(self.alpha_vec)\n for index in new_particle.used_variates:\n self.alpha_vec[index] += 1\n else:\n for index in new_particle.used_variates:\n variable_inclusion[index] += 1\n\n stats = {\"variable_inclusion\": variable_inclusion, \"bart_trees\": self.all_trees}\n return self.sum_trees, [stats]\n\n def normalize(self, particles):\n \"\"\"\n Use logsumexp trick to get W_t and softmax to get normalized_weights\n \"\"\"\n log_w = np.array([p.log_weight for p in particles])\n log_w_max = log_w.max()\n log_w_ = log_w - log_w_max\n w_ = np.exp(log_w_)\n w_sum = w_.sum()\n W_t = log_w_max + np.log(w_sum) - self.log_num_particles\n normalized_weights = w_ / w_sum\n # stabilize weights to avoid assigning exactly zero probability to a particle\n normalized_weights += 1e-12\n\n return W_t, normalized_weights\n\n def init_particles(self, tree_id: int) -> np.ndarray:\n \"\"\"\n Initialize particles\n \"\"\"\n p = self.all_particles[tree_id]\n particles = [p]\n particles.append(copy(p))\n\n for _ in self.indices:\n particles.append(ParticleTree(self.a_tree))\n\n return np.array(particles)\n\n def update_weight(self, particle, old=False):\n \"\"\"\n Update the weight of a particle\n\n Since the prior is used as the proposal,the weights are updated additively as the ratio of\n the new and old log-likelihoods.\n \"\"\"\n new_likelihood = self.likelihood_logp(self.sum_trees_noi + particle.tree.predict_output())\n if old:\n particle.log_weight = new_likelihood\n particle.old_likelihood_logp = new_likelihood\n else:\n particle.log_weight += new_likelihood - particle.old_likelihood_logp\n particle.old_likelihood_logp = new_likelihood\n\n @staticmethod\n def competence(var, has_grad):\n \"\"\"\n PGBART is only suitable for BART distributions\n \"\"\"\n dist = getattr(var.owner, \"op\", None)\n if isinstance(dist, BARTRV):\n return Competence.IDEAL\n return Competence.INCOMPATIBLE\n\n\nclass ParticleTree:\n \"\"\"\n Particle tree\n \"\"\"\n\n def __init__(self, tree):\n self.tree = tree.copy() # keeps the tree that we care at the moment\n self.expansion_nodes = [0]\n self.log_weight = 0\n self.old_likelihood_logp = 0\n self.used_variates = []\n\n def sample_tree(\n self,\n ssv,\n available_predictors,\n prior_prob_leaf_node,\n X,\n missing_data,\n sum_trees,\n mean,\n m,\n normal,\n mu_std,\n ):\n tree_grew = False\n if self.expansion_nodes:\n index_leaf_node = self.expansion_nodes.pop(0)\n # Probability that this node will remain a leaf node\n prob_leaf = prior_prob_leaf_node[self.tree[index_leaf_node].depth]\n\n if prob_leaf < np.random.random():\n index_selected_predictor = grow_tree(\n self.tree,\n index_leaf_node,\n ssv,\n available_predictors,\n X,\n missing_data,\n sum_trees,\n mean,\n m,\n normal,\n mu_std,\n )\n if index_selected_predictor is not None:\n new_indexes = self.tree.idx_leaf_nodes[-2:]\n self.expansion_nodes.extend(new_indexes)\n self.used_variates.append(index_selected_predictor)\n tree_grew = True\n\n return tree_grew\n\n def sample_leafs(self, sum_trees, X, mean, m, normal, mu_std):\n\n sample_leaf_values(self.tree, sum_trees, X, mean, m, normal, mu_std)\n\n\nclass SampleSplittingVariable:\n def __init__(self, alpha_vec):\n \"\"\"\n Sample splitting variables proportional to `alpha_vec`.\n\n This is equivalent to compute the posterior mean of a Dirichlet-Multinomial model.\n This enforce sparsity.\n \"\"\"\n self.enu = list(enumerate(np.cumsum(alpha_vec / alpha_vec.sum())))\n\n def rvs(self):\n r = np.random.random()\n for i, v in self.enu:\n if r <= v:\n return i\n\n\ndef compute_prior_probability(alpha):\n \"\"\"\n Calculate the probability of the node being a LeafNode (1 - p(being SplitNode)).\n Taken from equation 19 in [Rockova2018].\n\n Parameters\n ----------\n alpha : float\n\n Returns\n -------\n list with probabilities for leaf nodes\n\n References\n ----------\n .. [Rockova2018] Veronika Rockova, Enakshi Saha (2018). On the theory of BART.\n arXiv, `link <https://arxiv.org/abs/1810.00787>`__\n \"\"\"\n prior_leaf_prob = [0]\n depth = 1\n while prior_leaf_prob[-1] < 1:\n prior_leaf_prob.append(1 - alpha ** depth)\n depth += 1\n return prior_leaf_prob\n\n\ndef grow_tree(\n tree,\n index_leaf_node,\n ssv,\n available_predictors,\n X,\n missing_data,\n sum_trees,\n mean,\n m,\n normal,\n mu_std,\n):\n current_node = tree.get_node(index_leaf_node)\n idx_data_points = current_node.idx_data_points\n\n index_selected_predictor = ssv.rvs()\n selected_predictor = available_predictors[index_selected_predictor]\n available_splitting_values = X[idx_data_points, selected_predictor]\n if missing_data:\n idx_data_points = idx_data_points[~np.isnan(available_splitting_values)]\n available_splitting_values = available_splitting_values[\n ~np.isnan(available_splitting_values)\n ]\n\n if available_splitting_values.size > 0:\n idx_selected_splitting_values = discrete_uniform_sampler(len(available_splitting_values))\n split_value = available_splitting_values[idx_selected_splitting_values]\n\n new_idx_data_points = get_new_idx_data_points(\n split_value, idx_data_points, selected_predictor, X\n )\n current_node_children = (\n current_node.get_idx_left_child(),\n current_node.get_idx_right_child(),\n )\n\n new_nodes = []\n for idx in range(2):\n idx_data_point = new_idx_data_points[idx]\n node_value = draw_leaf_value(\n sum_trees[idx_data_point],\n X[idx_data_point, selected_predictor],\n mean,\n m,\n normal,\n mu_std,\n )\n\n new_node = LeafNode(\n index=current_node_children[idx],\n value=node_value,\n idx_data_points=idx_data_point,\n )\n new_nodes.append(new_node)\n\n new_split_node = SplitNode(\n index=index_leaf_node,\n idx_split_variable=selected_predictor,\n split_value=split_value,\n )\n\n # update tree nodes and indexes\n tree.delete_node(index_leaf_node)\n tree.set_node(index_leaf_node, new_split_node)\n tree.set_node(new_nodes[0].index, new_nodes[0])\n tree.set_node(new_nodes[1].index, new_nodes[1])\n\n return index_selected_predictor\n\n\ndef sample_leaf_values(tree, sum_trees, X, mean, m, normal, mu_std):\n\n for idx in tree.idx_leaf_nodes:\n if idx > 0:\n leaf = tree[idx]\n idx_data_points = leaf.idx_data_points\n parent_node = tree[leaf.get_idx_parent_node()]\n selected_predictor = parent_node.idx_split_variable\n node_value = draw_leaf_value(\n sum_trees[idx_data_points],\n X[idx_data_points, selected_predictor],\n mean,\n m,\n normal,\n mu_std,\n )\n leaf.value = node_value\n\n\ndef get_new_idx_data_points(split_value, idx_data_points, selected_predictor, X):\n\n left_idx = X[idx_data_points, selected_predictor] <= split_value\n left_node_idx_data_points = idx_data_points[left_idx]\n right_node_idx_data_points = idx_data_points[~left_idx]\n\n return left_node_idx_data_points, right_node_idx_data_points\n\n\ndef draw_leaf_value(Y_mu_pred, X_mu, mean, m, normal, mu_std):\n \"\"\"Draw Gaussian distributed leaf values\"\"\"\n if Y_mu_pred.size == 0:\n return 0\n else:\n norm = normal.random() * mu_std\n if Y_mu_pred.size == 1:\n mu_mean = Y_mu_pred.item() / m\n else:\n mu_mean = mean(Y_mu_pred) / m\n\n draw = norm + mu_mean\n return draw\n\n\ndef fast_mean():\n \"\"\"If available use Numba to speed up the computation of the mean.\"\"\"\n try:\n from numba import jit\n except ImportError:\n return np.mean\n\n @jit\n def mean(a):\n count = a.shape[0]\n suma = 0\n for i in range(count):\n suma += a[i]\n return suma / count\n\n return mean\n\n\ndef discrete_uniform_sampler(upper_value):\n \"\"\"Draw from the uniform distribution with bounds [0, upper_value).\n\n This is the same and np.random.randit(upper_value) but faster.\n \"\"\"\n return int(np.random.random() * upper_value)\n\n\nclass NormalSampler:\n \"\"\"\n Cache samples from a standard normal distribution\n \"\"\"\n\n def __init__(self):\n self.size = 1000\n self.cache = []\n\n def random(self):\n if not self.cache:\n self.update()\n return self.cache.pop()\n\n def update(self):\n self.cache = np.random.normal(loc=0.0, scale=1, size=self.size).tolist()\n\n\ndef logp(point, out_vars, vars, shared):\n \"\"\"Compile Aesara function of the model and the input and output variables.\n\n Parameters\n ----------\n out_vars: List\n containing :class:`pymc.Distribution` for the output variables\n vars: List\n containing :class:`pymc.Distribution` for the input variables\n shared: List\n containing :class:`aesara.tensor.Tensor` for depended shared data\n \"\"\"\n out_list, inarray0 = join_nonshared_inputs(point, out_vars, vars, shared)\n f = aesara_function([inarray0], out_list[0])\n f.trust_input = True\n return f\n" ]
[ [ "numpy.log", "numpy.random.random", "numpy.unique", "numpy.isnan", "numpy.random.choice", "numpy.arange", "numpy.ones", "numpy.all", "numpy.full_like", "numpy.random.normal", "numpy.array", "numpy.exp", "numpy.zeros" ] ]
uwmisl/poretitioner
[ "0ff9f67a3b25fdcb460b11c970b2ed366da07da7" ]
[ "src/poretitioner/hdf5/hdf5.py" ]
[ "\"\"\"\n===================\nhdf5.py\n===================\n\nThe Hierarchical Data Format version 5 (HDF5) defines a\na file format for storing and organzing massive amounts of\nhiearchical data.\n\nThis module attempts to encapsulate the rich features of HDF5\nalongside your favorite python3.7+ constructs\n(e.g dataclasses)\n\n[1] - https://en.wikipedia.org/wiki/Hierarchical_Data_Format\n\n\"\"\"\nfrom __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom logging import Logger, getLogger\nfrom typing import (\n Any,\n Dict,\n Generic,\n Iterable,\n Mapping,\n Optional,\n Protocol,\n Type,\n TypedDict,\n TypeVar,\n Union,\n)\n\nimport h5py\nimport numpy as np\nfrom h5py import File as Fast5File\nfrom h5py._hl.base import Empty\n\nfrom .exceptions import HDF5_GroupSerializationException, HDF5_SerializationException\n\n# Generics\nT = TypeVar(\"T\")\nS = TypeVar(\"S\")\n\n\nclass NumpyArrayLike(np.ndarray):\n \"\"\"This class represents a numpy array with extra attributes and functionality.\n\n Subclasses of NumpyArrayLike can be treated exactly like numpy arrays computationally\n\n By default, we serialize class attributes alone.\n\n For more fine-grained control over what information is stored during serialization/pickling,\n implementers should override the `serialize_info` `deserialize_from_info`\n\n \"\"\"\n\n def __new__(cls, data: Union[np.ndarray, NumpyArrayLike]):\n obj = np.copy(data).view(\n cls\n ) # Optimization: Consider not making a copy, this is more error prone though: np.asarray(data).view(cls)\n return obj\n\n def serialize_info(self, **kwargs) -> Dict:\n \"\"\"Creates a dictionary describing the signal and its attributes.\n\n Returns\n -------\n Dict\n A serialized set of attributes.\n \"\"\"\n # When serializing, copy over any existing attributes already in self, and\n # any that don't exist in self get taken from kwargs.\n existing_info = self.__dict__\n info = {key: getattr(self, key, kwargs.get(key)) for key in kwargs.keys()}\n return {**info, **existing_info}\n\n def deserialize_from_info(self, info: Dict):\n \"\"\"Sets attributes on an object from a serialized dict.\n\n Parameters\n ----------\n info : Dict\n Dictionary of attributes to set after deserialization.\n \"\"\"\n for name, value in info.items():\n setattr(self, name, value)\n\n # Multiprocessing and Dask require pickling (i.e. serializing) their inputs.\n # By default, this will drop all our custom class data.\n # https://stackoverflow.com/questions/26598109/preserve-custom-attributes-when-pickling-subclass-of-numpy-array\n def __reduce__(self):\n reconstruct, arguments, object_state = super().__reduce__()\n # Create a custom state to pass to __setstate__ when this object is deserialized.\n info = self.serialize_info()\n new_state = object_state + (info,)\n # Return a tuple that replaces the parent's __setstate__ tuple with our own\n return (reconstruct, arguments, new_state)\n\n def __setstate__(self, state):\n info = state[-1]\n self.deserialize_from_info(info)\n # Call the parent's __setstate__ with the other tuple elements.\n super().__setstate__(state[0:-1])\n\n\n####################################\n### Fast5 Helpers ###\n####################################\n\n\n# NOTE: Sadly, many of these can't be used until \"RuntimeError: Unable to create attribute (object header message is too large)\" https://github.com/h5py/h5py/issues/1855\n# The goal is to provide high-level data-class representations of HDF5 objects, so users can just describe their structures as python dataclasses instead of finagling with h5py. \n# Unfortunately, there's currently a bug \"RuntimeError: Unable to create attribute (object header message is too large)\" that can only be fixed by delving into the low-level API ()\n\ndef hdf5_dtype(object: Any) -> Optional[np.dtype]:\n \"\"\"Returns the proper h5py dtype for an object, if one is necessary.\n Otherwise returns None.\n\n For us, this is mostly needed in the case of storing numpy data or string data,\n\n since numpy data has a specific dtype, and strings have a variable length and an assumed encoding (e.g. \"utf-8\")\n\n For more info on how h5py handles strings, see [1, 2].\n\n [1] - https://docs.h5py.org/en/stable/strings.html#strings\n [2] - https://docs.h5py.org/en/stable/special.html?highlight=string_dtype#variable-length-strings\n\n Parameters\n ----------\n object : Any\n Some object you want the dtype for if it's necessary, but are fine not having one\n if it's not.\n\n Returns\n -------\n Optional[np.dtype]\n The numpy datatype for an object if it has one, or if it's a string, and None otherwise.\n \"\"\"\n if isinstance(object, str):\n return h5py.string_dtype(length=len(object))\n elif hasattr(object, \"dtype\"):\n # Is this already a numpy-like object with a dtype? If so, just use that.\n return object.dtype\n return None # For most cases, h5py can determine the dtype from the data itself.\n\n\ndef get_class_for_name(name: str, module_name: str = __name__) -> Type:\n \"\"\"Gets a class from a module based on its name.\n Tread carefully with this. Personally I feel like it's only safe to use\n with dataclasses with known interfaces.\n\n Parameters\n ----------\n name : str\n Name of the class we're trying to get the class object for.\n\n module_name: str, optional\n Which module to get a class from, by defualt __name__.\n\n Returns\n -------\n Type\n [description]\n \"\"\"\n import importlib\n\n this_module = importlib.import_module(module_name)\n this_class = getattr(this_module, name)\n return this_class\n\n\nclass HasFast5(Protocol):\n f5: Union[Fast5File, HDF5_Group]\n\n\n###########################################################\n#\n# HDF5 Base Types\n#\n###########################################################\n\n# Note: We never create or instantiate AttributeManagers directly, instead we borrow its interface.\n# 3 Laws to keep in mind with Attributes:\n#\n#\n# 1) They may be created from any scalar or NumPy array\n#\n# 2) Each attribute should be small (generally < 64k)\n#\n# 3) There is no partial I/O (i.e. slicing); the entire attribute must be read.\n#\n# https://docs.h5py.org/en/stable/high/attr.html\n\n# Attrs are really just mappings from names to data/objects.\nHDF5_Attribute_Objects = Mapping[str, Optional[Any]]\n\n\nclass IsAttr(Protocol):\n \"\"\"A special protocol for objects that are just meant to be set data attributes, and don't\n need any special HDF5 consdiration (e.g. a class that just needs to store a few numbers).\n \"\"\"\n\n def as_attr(self) -> np.dtype:\n ...\n\n def from_attr(self, attr) -> IsAttr:\n ...\n\n\nclass HDF5IsAttr(IsAttr):\n def as_attr(self) -> np.dtype:\n ...\n\n def from_attr(self, attr) -> IsAttr:\n ...\n\n\nclass HasAttrs(Protocol):\n def get_attrs(self) -> HDF5_Attributes:\n ...\n\n def create_attr(self, name: str, value: Optional[Any], log: Optional[Logger] = None):\n \"\"\"Adds an attribute to the current object.\n\n Any existing attribute with this name will be overwritten.\n\n Parameters\n ----------\n name : str\n Name of the attribute.\n value : Optional[Any]\n Value of the attribute.\n \"\"\"\n ...\n\n def create_attrs(self, attrs: HDF5_Attributes, log: Optional[Logger] = None):\n \"\"\"Adds multiple attributes to the current object.\n\n Any existing attribute with the names in attrs will be overwritten.\n\n Parameters\n ----------\n attrs :\n Name of the attribute.\n value : Optional[Any]\n Value of the attribute.\n \"\"\"\n\n def object_from_attr(self, name: str, log: Optional[Logger] = None) -> Optional[Any]:\n \"\"\"Creates an object from an attribute (if one could be made).\n # TODO: Plugin Register via Plugins\n\n Parameters\n ----------\n name : str\n Name of the attribute.\n\n Returns\n ----------\n An instantiated object represented by this attr, or None if one couldn't be found.\n \"\"\"\n ...\n\n def objects_from_attrs(\n self, attrs: HDF5_Attributes, log: Optional[Logger] = None\n ) -> HDF5_Attribute_Objects:\n \"\"\"Creates mapping of attribute names to their serialzed objects (if one could be made).\n\n Parameters\n ----------\n name : str\n Name of the attribute.\n\n Returns\n ----------\n An instantiated object represented by this attr, or None if one couldn't be found.\n \"\"\"\n ...\n\n\nclass HDF5_AttributeHaving(HasAttrs):\n def __init__(self, has_attrs: Optional[HasAttrs]):\n super().__init__()\n self.attrs = self.get_attrs() if has_attrs is None else has_attrs.get_attrs()\n\n def get_attrs(self) -> HDF5_Attributes:\n return self.attrs\n\n def create_attr(self, name: str, value: Optional[Any], log: Optional[Logger] = None):\n \"\"\"Adds an attribute to the current object.\n\n WARNING: Any existing attribute will be overwritten!\n\n This method will coerce value to a special 'Empty' type used by HDF5 if the value\n provided is zero-length or None. For more on Attributes and Empty types, see [1, 2]\n\n [1] - https://docs.h5py.org/en/stable/high/attr.html#attributes\n [2] - https://docs.h5py.org/en/stable/high/dataset.html?highlight=Empty#creating-and-reading-empty-or-null-datasets-and-attributes\n\n Parameters\n ----------\n name : str\n Name of the attribute.\n value : Optional[Any]\n Value of the attribute. This method will coerce this value\n to a special Empty object if it's zero-length or None [2].\n \"\"\"\n\n if value is None or value == \"\" or (hasattr(value, \"__len__\") and len(value) < 1):\n empty = h5py.Empty(dtype=np.uint8)\n self.get_attrs().create(name, empty)\n elif isinstance(value, HDF5IsAttr):\n attr_value = value.as_attr()\n self.get_attrs().create(name, value, dtype=hdf5_dtype(attr_value))\n else:\n self.get_attrs().create(name, value, dtype=hdf5_dtype(value))\n\n def create_attrs(self, attrs: HDF5_Attributes, log: Optional[Logger] = None):\n for attr_name, attr_value in attrs.items():\n self.create_attr(attr_name, attr_value, log=log)\n\n def object_from_attr(self, name: str, log: Optional[Logger] = None) -> Optional[Any]:\n log = log if log is not None else getLogger()\n try:\n attr_value = self.get_attrs()[name]\n except AttributeError:\n log.warning(\n f\"Could not find an attribute with the name '{name}' on object {self!r}. Returning None\"\n )\n return None\n\n if attr_value.shape is None:\n \"\"\"\n From the Docs:\n\n An empty dataset has shape defined as None,\n which is the best way of determining whether a dataset is empty or not.\n An empty dataset can be “read” in a similar way to scalar datasets.\n\n [1] - https://docs.h5py.org/en/stable/high/dataset.html?highlight=Empty#creating-and-reading-empty-or-null-datasets-and-attributes\n \"\"\"\n return \"\"\n return bytes.decode(bytes(attr_value), encoding=\"utf-8\")\n\n def objects_from_attrs(self, log: Optional[Logger] = None) -> HDF5_Attribute_Objects:\n objects: HDF5_Attribute_Objects = {\n attr_name: self.object_from_attr(attr_name, log=log)\n for attr_name in self.get_attrs().keys()\n }\n return objects\n\n def copy_attr(self, name: str, source: HDF5_AttributeHaving):\n \"\"\"Copy a single attribute from a source.\n This will overwrite any attribute of this name, if one exists.\n\n Parameters\n ----------\n name : str\n Which attribute to copy.\n from : HDF5_AttributeHaving\n Which attribute-haver to copy from.\n \"\"\"\n self.create_attr(name, source.get_attrs()[name])\n\n def copy_all_attrs(self, source: HDF5_AttributeHaving):\n \"\"\"Copy a all attributes from a source.\n This will overwrite any attributes sharing the same names, if any exists.\n\n Parameters\n ----------\n from : HDF5_AttributeHaving\n Which attribute-haver to copy all attributes from.\n \"\"\"\n for name in source.get_attrs().keys():\n self.copy_attr(name, source)\n\n\nclass HDF5_ParentHaving:\n @property\n def parent(self) -> HDF5_Group:\n return HDF5_Group(self.parent)\n\n\nclass HDF5_Dataset(h5py.Dataset, NumpyArrayLike, HDF5_AttributeHaving, HDF5_ParentHaving):\n\n def __new__(cls, dataset: NumpyArrayLike) -> HDF5_Dataset:\n if isinstance(dataset, HDF5_Dataset):\n return dataset\n \n self = dataset\n \n\n def __init__(self, dataset: h5py.Dataset):\n self._dataset = dataset\n\n def __getattr__(self, attrib: str):\n return getattr(self._dataset, attrib)\n\n\nclass HDF5_Group(h5py.Group, HDF5_AttributeHaving, HDF5_ParentHaving):\n def __new__(cls, group: Optional[h5py.Group]) -> HDF5_Group:\n if isinstance(group, HDF5_Group):\n return group\n hdf5_group = super().__new__(cls, group)\n hdf5_group._group = group\n return hdf5_group\n\n def __init__(self, group: Optional[h5py.Group]):\n if isinstance(group, HDF5_Group):\n return\n super().__init__(group.id)\n self._group = group\n\n @property\n def parent(self) -> HDF5_Group:\n return HDF5_Group(self._group.parent)\n\n def require_group(self, name: str):\n return HDF5_Group(self._group.require_group(name))\n\n def require_dataset(self, name, data, dtype, shape, **kwds):\n return HDF5_Dataset(self._group.require_dataset(name, shape, data=data, dtype=dtype,**kwds))\n\n def __getattr__(self, attrib: str):\n return getattr(self._group, attrib)\n\n\nclass HDF5_Attributes(h5py.AttributeManager, HDF5_ParentHaving):\n def __init__(self, attrs: h5py.AttributeManager):\n self.attrs = attrs\n\n def __getattr__(self, attrib: str):\n return getattr(self.attrs, attrib)\n\n\nHDF5_Type = Union[HDF5_Dataset, HDF5_Group, HDF5_Attributes]\n\n\nclass HDF5_Serializing(ABC):\n \"\"\"Any object that can be HDFSserialized.\n\n Don't instantiate this directly, rather subclass.\n \"\"\"\n\n @classmethod\n @abstractmethod\n def from_a(cls, a: HDF5_Type, log: Optional[Logger] = None) -> HDF5_Serializing:\n \"\"\"Creates an instance of this class (from) (a) HDF5_Type.\n\n Parameters\n ----------\n a : HDF5_Types\n Instance of an HDF5Type (e.g. a h5py.Group).\n\n log : Logger, optional\n Logger to use for information/warnings/debug\n\n Returns\n -------\n HDF5_Serializing\n An instance of this class with data derived from (a) HDF5_Type.\n\n Raises\n ------\n NotImplementedError\n This method wasn't implemented, but needs to be.\n \"\"\"\n raise NotImplementedError(\n f\"{cls!s} is missing an implementation for {HDF5_Serializing.from_a.__name__}\"\n )\n\n @abstractmethod\n def as_a(\n self, a: HDF5_Type, parent: Optional[HDF5_Group] = None, log: Optional[Logger] = None\n ) -> HDF5_Type:\n \"\"\"Returns this object, formatted (as) (a) given HDF5 type (thus the name).\n\n Parameters\n ----------\n a : HDF5_Types\n One of the HDF5 types we understand.\n\n parent : Optional[HDF5_Group]\n The parent group to which this object should be added/updated.\n If parent is None, we return the HDFS_Type proxy in isolation (e.g)\n not attached to the parent if it doesn't already have one. None by default.\n\n log : Logger, optional\n Logger to use for information/warnings/debug\n\n Returns\n -------\n HDF5_Type\n This object serialized to a given HDF5 type.\n\n Raises\n ------\n NotImplementedError\n This method wasn't implemented, but needs to be.\n \"\"\"\n raise NotImplementedError(\n f\"{self!s} is missing an implementation for {HDF5_Serializing.as_a.__name__}!\"\n )\n\n @abstractmethod\n def update(self, log: Optional[Logger] = None):\n \"\"\"Makes sure any changes have been reflected in the underlying object.\n\n Parameters\n ----------\n log : Optional[Logger], optional\n Logger to use, by default None\n\n Raises\n ------\n NotImplementedError\n This method wasn't implemented.\n \"\"\"\n raise NotImplementedError(\n f\"{self!s} is missing an implementation for {HDF5_Serializing.update.__name__}!\"\n )\n\n\n###########################################################\n#\n# HDF5 Groups\n#\n###########################################################\n\n\nclass HDF5_GroupSerializing(HDF5_Serializing, HDF5_AttributeHaving):\n \"\"\"Objects adhering to the `HDF5_GroupSerializable` can be written to and\n read directly from hd5 Groups.\n \"\"\"\n\n def name(self) -> str:\n \"\"\"Group name that this object will be stored under.\n i.e. If this method returns \"patrice_lmb\", then a subsequent call to\n\n `self.as_group(Group(\"/Foo/bar/\"))`\n\n Will return a group at /Foo/bar/patrice_lmb\n\n Be double-sure to override this if you want it to be anything other than the class name.\n\n Returns\n -------\n str\n Name to use in the Fast5 file.\n \"\"\"\n return self.__class__.__name__\n\n def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:\n \"\"\"Stores and Returns this object as an HDF5 Group, rooted at the group passed in.\n This should be used to directly set the contents of an Hdf5 group.\n This method should also create the group named 'name' in the parent_group, if it doesn't already exist.\n\n class Baz(HDF5_GroupSerializable):\n def name(self):\n return \"boop\"\n # ...Implementation\n\n my_hdf5_file = h5py.File(\"/path/to/file\")\n foo_group = filts.require_group(\"/foo\")\n\n my_serial = Baz()\n baz_group = foo_group.require_group(my_serial.name()) # Make space in the file for Baz at f'/foo/{my_serial.name()}'\n my_serialized_group = my_serial.as_group(foo_group) # Sets \"/foo/boop\" group to the serialized group\n\n my_serialized_group # /boop group, rooted at /foo/\n\n Parameters\n ----------\n parent_group : h5py.Group\n Which group to store this group under. This doesn't necessarily have to be the root group of the file.\n\n Returns\n -------\n h5py.Group\n Group that stores a serialization of this instance.\n \"\"\"\n ...\n\n @classmethod\n def from_group(cls, group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_GroupSerializable:\n \"\"\"Serializes this object FROM an HDF5 Group.\n\n class Baz(HDF5_GroupSerializable):\n # ...Implementation\n\n my_hdf5_file = h5py.File(\"/path/to/file\")\n baz_serialized_group = filts.require_group(\"/baz\")\n\n baz = Baz.from_group(baz_serialized_group) # I now have an instance of Baz.\n\n Parameters\n ----------\n group : h5py.Group\n HDF5 Group that can be serialized into this instance.\n\n Returns\n -------\n HDF5_GroupSerializable\n Instance of an adherent to this protocol.\n \"\"\"\n ...\n\n\nclass HDF5_GroupSerializable(HDF5_GroupSerializing):\n \"\"\"Base class for objects that can be written to and\n read directly from hd5 Groups.\n\n Not meant to be instantiated directly. Instead, subclass and make sure your\n `as_group` implementation uses the group created by `super().as_group(...)`.\n\n NOTE: Make sure to call super().as_group(...)\n \"\"\"\n\n def name(self) -> str:\n \"\"\"Group name that this object will be stored under.\n i.e. If this method returns \"patrice_lmb\", then a subsequent call to\n\n `self.as_group(Group(\"/Foo/bar/\"))`\n\n Will return a group at /Foo/bar/patrice_lmb\n\n Override this if you want it to be anything other than the class name.\n\n Returns\n -------\n str\n Name to use in the Fast5 file.\n \"\"\"\n return self.__class__.__name__\n\n def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:\n new_group = parent_group.require_group(self.name())\n parent_group[self.name()] = self\n # Note: This does nothing but register a group with the name 'name' in the parent group.\n # Implementers must now write their serialized instance to this group.\n return self\n\n @classmethod\n @abstractmethod\n def from_group(cls, group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_GroupSerializable:\n raise NotImplementedError(\n f\"from_group not implemented for {cls.__name__}. Make sure you write a method that returns a serialzied version of this object.\"\n )\n\n def require_group_from_group(\n self, parent_group: HDF5_Group, log: Optional[Logger] = None\n ) -> HDF5_GroupSerializable:\n # child_group = parent_group.require_group(self.name())\n child_group = self.as_group(parent_group, log=log)\n\n @classmethod\n def from_a(cls, a: HDF5_Group, log: Logger) -> HDF5_Serializing:\n return cls.from_group(parent_group=a, log=log)\n\n def as_a(self, a: HDF5_Type, log: Logger) -> HDF5_Type:\n return self.as_group(parent_group=a, log=log)\n\n def update(self, log: Optional[Logger] = None):\n self.as_a(self._group.parent, log=log)\n\n\nclass HDF5_GroupSerialiableDict(Dict[T, S], HDF5_GroupSerializable):\n def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:\n log = log if log is not None else getLogger()\n\n my_group = parent_group.require_group(self.name())\n \"\"\"Returns this object as an HDF5 Group.\"\"\"\n for field_name, field_value in self.items():\n if isinstance(field_value, HDF5_GroupSerializable):\n # This value is actually its own group.\n # So we create a new group rooted at our dataclass's group\n # And assign it the value of whatever the group of the value is.\n field_value.as_a(my_group, log=log)\n # my_group.require_group(field_name)\n # my_group[field_name] = field_value\n elif isinstance(field_value, HDF5_DatasetSerializable):\n field_value.as_a(parent_group, log)\n else:\n my_group.create_attr(field_name, field_value)\n return my_group\n\n @classmethod\n def from_group(\n cls, group: HDF5_Group, log: Optional[Logger] = None\n ) -> HDF5_GroupSerialableDataclass:\n log = log if log is not None else getLogger()\n if not log:\n log = getLogger()\n my_instance = cls.__new__(cls)\n\n # First, copy over attrs:\n for name, value in group.attrs.items():\n object.__setattr__(my_instance, name, value)\n\n # Then, copy over any datasets or groups.\n for name, value in group.items():\n if isinstance(value, h5py.Dataset):\n # Assuming we're storing a numpy array as this dataset\n buffer = np.empty(value.shape, dtype=value.dtype)\n # Copies the values into our buffer\n value.read_direct(buffer)\n object.__setattr__(my_instance, name, NumpyArrayLike(buffer))\n elif isinstance(value, h5py.Group):\n # If it's a group, we have to do a little more work\n # 1) Find the class described by the group\n # 1.1) Verify that we actually know a class by that name. Raise an exception if we don't.\n # 1.2) Verify that that class has a method to create an instance group a group.\n # 2) Create a new class instance from that group\n # 3) Set this object's 'name' field to the object we just created.\n try:\n ThisClass = get_class_for_name(name)\n except AttributeError as e:\n serial_exception = HDF5_GroupSerializationException(\n f\"We couldn't serialize group named {name} (group is attached in the exception.\",\n e,\n group=value,\n )\n log.exception(serial_exception.msg, serial_exception)\n raise serial_exception\n\n # assert get_class_for_name(name) and isinstance(), f\"No class found that corresponds to group {name}! Make sure there's a corresponding dataclass named {name} in this module scope!\"\n\n try:\n this_instance = ThisClass.from_group(value, log=log)\n except AttributeError as e:\n serial_exception = HDF5_GroupSerializationException(\n f\"We couldn't serialize group named {name!s} from class {ThisClass!s}. It appears {ThisClass!s} doesn't implement the {HDF5_GroupSerializing.__name__} protocol. Group is attached in the exception.\",\n e,\n group=value,\n )\n log.exception(serial_exception.msg, serial_exception)\n raise serial_exception\n\n object.__setattr__(my_instance, name, this_instance)\n\n return my_instance\n\n\nclass HDF5_GroupSerialableDataclass(HDF5_GroupSerializable):\n def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:\n log = log if log is not None else getLogger()\n\n \"\"\"Returns this object as an HDF5 Group.\"\"\"\n my_group: HDF5_Group = parent_group.require_group(self.name())\n\n for field_name, field_value in vars(self).items():\n if isinstance(field_value, HDF5_GroupSerializable):\n # This value is actually its own group.\n # So we create a new group rooted at our dataclass's group\n # And assign it the value of whatever the group of the value is.\n # new_group = my_group.require_group(field_name)\n field_value.as_group(my_group)\n elif isinstance(field_value, HDF5_DatasetSerializable):\n field_value.as_a(my_group, log)\n else:\n my_group.create_attr(field_name, field_value)\n return my_group\n\n @classmethod\n def from_group(\n cls, group: HDF5_Group, log: Optional[Logger] = None\n ) -> HDF5_GroupSerialableDataclass:\n log = log if log is not None else getLogger()\n if not log:\n log = getLogger()\n my_instance = cls.__new__(cls)\n\n # First, copy over attrs:\n my_instance.create_attrs(group.get_attrs())\n\n # Then, copy over any datasets or groups.\n for name, value in group.items():\n if isinstance(value, h5py.Dataset):\n # Assuming we're storing a numpy array as this dataset\n buffer = np.empty(value.shape, dtype=value.dtype)\n # Copies the values into our buffer\n value.read_direct(buffer)\n object.__setattr__(my_instance, name, buffer)\n elif isinstance(value, h5py.Group):\n # If it's a group, we have to do a little more work\n # 1) Find the class described by the group\n # 1.1) Verify that we actually know a class by that name. Raise an exception if we don't.\n # 1.2) Verify that that class has a method to create an instance group a group.\n # 2) Create a new class instance from that group\n # 3) Set this object's 'name' field to the object we just created.\n try:\n ThisClass = get_class_for_name(name)\n except AttributeError as e:\n serial_exception = HDF5_GroupSerializationException(\n f\"We couldn't serialize group named {name} (group is attached in the exception.\",\n e,\n group=value,\n )\n log.exception(serial_exception.msg, serial_exception)\n raise serial_exception\n\n # assert get_class_for_name(name) and isinstance(), f\"No class found that corresponds to group {name}! Make sure there's a corresponding dataclass named {name} in this module scope!\"\n\n try:\n this_instance = ThisClass.from_group(value, log=log)\n except AttributeError as e:\n serial_exception = HDF5_GroupSerializationException(\n f\"We couldn't serialize group named {name!s} from class {ThisClass!s}. It appears {ThisClass!s} doesn't implement the {HDF5_GroupSerializing.__name__} protocol. Group is attached in the exception.\",\n e,\n group=value,\n )\n log.exception(serial_exception.msg, serial_exception)\n raise serial_exception\n\n object.__setattr__(my_instance, name, this_instance)\n\n return my_instance\n\n\n###########################################################\n#\n# HDF5 Datasets\n#\n###########################################################\n\n\nclass HDF5_DatasetSerializing(HDF5_Dataset, HDF5_Serializing):\n \"\"\"Objects adhering to the `HDF5_GroupSerializable` can be written to and\n read directly from hd5 Groups.\n \"\"\"\n\n def name(self) -> str:\n \"\"\"Group name that this object will be stored under.\n i.e. If this method returns \"patrice_lmb\", then a subsequent call to\n\n `self.as_group(Group(\"/Foo/bar/\"))`\n\n Will return a group at /Foo/bar/patrice_lmb\n\n Be double-sure to override this if you want it to be anything other than the class name.\n\n Returns\n -------\n str\n Name to use in the Fast5 file.\n \"\"\"\n return self.__class__.__name__\n\n\nclass HDF5_DatasetSerializable(HDF5_DatasetSerializing):\n @classmethod\n def from_a(\n cls, a: Union[HDF5_Dataset, HDF5_Group], log: Optional[Logger] = None\n ) -> HDF5_DatasetSerializable:\n # Assume A is the parent group\n # Assuming we're storing a numpy array as this dataset\n\n # Copies the values into our buffer\n try:\n buffer = np.empty(a.shape, dtype=a.dtype)\n a.read_direct(buffer)\n data = NumpyArrayLike(buffer)\n\n return HDF5_DatasetSerializable(cls.__new__(cls, buffer))\n\n except AttributeError as e:\n log.error(\"Could not convert to HDF5_DatasetSerializable from: {a!r}\")\n raise e\n # serialized = cls.__new__(cls)\n return\n\n def as_a(self, a: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Dataset:\n dataset = HDF5_Dataset(a.require_dataset(self.name(), shape=self.shape, dtype=self.dtype))\n return dataset\n\n def update(self, log: Optional[Logger] = None):\n self.as_a(self._group.parent, log=log)\n\n\nclass HDF5_DatasetSerialableDataclass(HDF5_DatasetSerializable):\n def as_dataset(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Dataset:\n log = log if log is not None else getLogger()\n\n \"\"\"Returns this object as an HDF5 Group.\"\"\"\n dataset: HDF5_Dataset = super().as_a(parent_group)\n\n dataset.create_attrs(vars(self))\n # for field_name, field_value in vars(self).items():\n # dataset.create_attr(field_name, field_value)\n return dataset\n\n @classmethod\n def from_dataset(\n cls, dataset: HDF5_Dataset, log: Optional[Logger] = None\n ) -> HDF5_DatasetSerialableDataclass:\n log = log if log is not None else getLogger()\n if not log:\n log = getLogger()\n my_instance = cls.__new__(dataset)\n\n # First, copy over attrs:\n for name, value in dataset.objects_from_attrs():\n object.__setattr__(my_instance, name, value)\n\n return my_instance\n" ]
[ [ "numpy.copy", "numpy.empty" ] ]
elerac/codepattern
[ "8ee7d04870b1d9b64045a15c488792b0f0f9aef3" ]
[ "examples/capture_x.py" ]
[ "\"\"\"\nCapture projection pattern and decode x-coorde.\n\"\"\"\nimport cv2\nimport numpy as np\nimport structuredlight as sl\n\ndef imshowAndCapture(cap, img_pattern, delay=250):\n cv2.imshow(\"\", img_pattern)\n cv2.waitKey(delay)\n ret, img_frame = cap.read()\n img_gray = cv2.cvtColor(img_frame, cv2.COLOR_BGR2GRAY)\n return img_gray\n\ndef main():\n width = 640\n height = 480\n\n cap = cv2.VideoCapture(1) # External web camera\n gray = sl.Gray()\n \n # Generate and Decode x-coord\n # Generate\n imlist_posi_pat = gray.generate((width, height))\n imlist_nega_pat = sl.invert(imlist_posi_pat)\n\n # Capture\n imlist_posi_cap = [ imshowAndCapture(cap, img) for img in imlist_posi_pat]\n imlist_nega_cap = [ imshowAndCapture(cap, img) for img in imlist_nega_pat]\n \n # Decode\n img_index = gray.decode(imlist_posi_cap, imlist_nega_cap)\n\n # Visualize decode result\n img_correspondence = np.clip(img_index/width*255.0, 0, 255).astype(np.uint8)\n cv2.imshow(\"corresponnence map\", img_correspondence)\n cv2.waitKey(0)\n cv2.imwrite(\"correspondence.png\", img_correspondence)\n cv2.destroyAllWindows()\n cap.release()\n\nif __name__==\"__main__\":\n main()\n" ]
[ [ "numpy.clip" ] ]
hdkai/Plasma
[ "1942d7fe5f6b41c9a16c8e2d1b6c7cf263307c39" ]
[ "torchplasma/filters/gaussian.py" ]
[ "# \n# Plasma\n# Copyright (c) 2021 Yusuf Olokoba.\n#\n\nfrom torch import arange, exp, tensor, Tensor\nfrom torch.nn.functional import conv2d, conv3d, pad\nfrom typing import Tuple\n\ndef gaussian_kernel (kernel_size: int, sigma: float = -1.) -> Tensor:\n \"\"\"\n Normalized 1D Gaussian kernel.\n This operation is NOT differentiable w.r.t its arguments.\n\n Parameters:\n kernel_size (int): Kernel size, should be odd.\n sigma (float): Gaussian standard deviation. If less than 1, it is automatically computed from the kernel size.\n\n Returns:\n Tensor: Normalized Gaussian kernel with shape (K,).\n \"\"\"\n sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8 if sigma < 0 else sigma # From OpenCV ::getGaussianKernel\n x = arange(kernel_size).float() - kernel_size // 2\n x = x + 0.5 if kernel_size % 2 == 0 else x\n kernel = exp((-x.pow(2.) / (2. * sigma ** 2)))\n return kernel / kernel.sum()\n\ndef gaussian_filter (input: Tensor, kernel_size: Tuple[int, int]) -> Tensor:\n \"\"\"\n Apply a Gaussian filter to an image.\n\n Parameters:\n input (Tensor): Input image with shape (N,C,H,W).\n kernel_size (tuple): Kernel size in each dimension (Ky,Kx).\n\n Returns:\n Tensor: Filtered image with shape (N,C,H,W).\n \"\"\"\n _,channels,_,_ = input.shape\n kernel_size_y, kernel_size_x = kernel_size\n # Compute kernels\n kernel_x = gaussian_kernel(kernel_size_x).to(input.device)\n kernel_y = gaussian_kernel(kernel_size_y).to(input.device)\n # Reshape\n kernel_x = kernel_x.expand(channels, 1, 1, -1)\n kernel_y = kernel_y.expand(channels, 1, 1, -1).permute(0, 1, 3, 2).contiguous()\n # Seperable convolution\n result = conv2d(input, kernel_x, padding=(0, kernel_size_x // 2), groups=channels)\n result = conv2d(result, kernel_y, padding=(kernel_size_y // 2, 0), groups=channels)\n return result\n\ndef gaussian_filter_3d (input: Tensor, kernel_size: Tuple[int, int, int]) -> Tensor:\n \"\"\"\n Apply a Gaussian filter to a volume.\n\n Parameters:\n input (Tensor): Input volume with shape (N,C,D,H,W).\n kernel_size (tuple): Kernel size in each dimension (Kz,Ky,Kx).\n\n Returns:\n Tensor: Filtered volume with shape (N,C,D,H,W).\n \"\"\"\n _,channels,_,_,_ = input.shape\n kernel_size_z, kernel_size_y, kernel_size_x = kernel_size\n # Compute kernels\n kernel_x = gaussian_kernel(kernel_size_x).to(input.device)\n kernel_y = gaussian_kernel(kernel_size_y).to(input.device)\n kernel_z = gaussian_kernel(kernel_size_z).to(input.device)\n # Reshape\n kernel_x = kernel_x.expand(channels, 1, 1, 1, -1)\n kernel_y = kernel_y.expand(channels, 1, 1, 1, -1).permute(0, 1, 2, 4, 3).contiguous()\n kernel_z = kernel_z.expand(channels, 1, 1, 1, -1).permute(0, 1, 4, 2, 3).contiguous()\n # Seperable convolution\n result = conv3d(input, kernel_x, padding=(0, 0, kernel_size_x // 2), groups=channels)\n result = conv3d(result, kernel_y, padding=(0, kernel_size_y // 2, 0), groups=channels)\n result = conv3d(result, kernel_z, padding=(kernel_size_z // 2, 0, 0), groups=channels)\n return result" ]
[ [ "torch.nn.functional.conv2d", "torch.nn.functional.conv3d", "torch.arange" ] ]
iheb-brini/fitness-lab
[ "2d82d7a2ecba27f535cda880865e6d9ed446eac5", "2d82d7a2ecba27f535cda880865e6d9ed446eac5" ]
[ "Modules/nn/architectures/DenseNet/classes.py", "Modules/nn/applications/GAN/WGAN/classes.py" ]
[ "from torch import nn, cat\n\nfrom .constants import NUM_CONVS_IN_DENSE_BLOCKS\n\n\ndef conv_block(in_channels, out_channels):\n blk = nn.Sequential(\n nn.BatchNorm2d(in_channels), nn.ReLU(),\n nn.Conv2d(in_channels, out_channels,\n kernel_size=3, padding=1)\n )\n\n return blk\n\n\nclass DenseBlock(nn.Module):\n def __init__(self, num_convs, in_channels, out_channels, **kwargs):\n super().__init__(**kwargs)\n block_list = []\n for i in range(num_convs):\n block_list.append(conv_block(\n out_channels*i + in_channels, out_channels))\n\n self.net = nn.Sequential(*block_list)\n\n def forward(self, X):\n for layer in self.net:\n Y = layer(X)\n # Concatenate the input and output of each block on the channel dimension\n X = cat((X, Y), axis=1)\n\n return X\n\n\ndef transitive_block(in_channels, out_channels):\n blk = nn.Sequential(\n nn.BatchNorm2d(in_channels), nn.ReLU(),\n nn.Conv2d(in_channels, out_channels,\n kernel_size=1),\n nn.AvgPool2d(kernel_size=2, stride=2)\n )\n return blk\n\n\nclass DenseNet(nn.Module):\n def __init__(self, in_channels, **kwargs):\n super().__init__(**kwargs)\n\n num_channels, growth_rate = 64, 32\n num_convs_in_dense_blocks = NUM_CONVS_IN_DENSE_BLOCKS\n\n list_blocks = [\n nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64), nn.ReLU(),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1),\n ]\n\n for i, num_convs in enumerate(num_convs_in_dense_blocks):\n list_blocks.append(DenseBlock(\n num_convs, num_channels, growth_rate))\n\n num_channels += num_convs * growth_rate\n\n if i != len(num_convs_in_dense_blocks) - 1:\n list_blocks.append(transitive_block(\n num_channels, num_channels // 2))\n num_channels = num_channels // 2\n\n list_blocks.extend([nn.BatchNorm2d(num_channels), nn.ReLU(), nn.AdaptiveAvgPool2d((1, 1)),\n nn.Flatten(), nn.Linear(num_channels, 10)])\n\n self.blocks = nn.Sequential(*list_blocks)\n\n def forward(self, X):\n return self.blocks(X)\n", "\"\"\"\nBased on the implementation of GANs specialization course by DeepLearning.ai\nhttps://www.coursera.org/learn/build-basic-generative-adversarial-networks-gans\n\"\"\"\n\nimport torch\nfrom torch import nn\n\nclass Generator(nn.Module):\n '''\n Generator Class\n Values:\n z_dim: the dimension of the noise vector, a scalar\n im_chan: the number of channels in the images, fitted for the dataset used, a scalar\n (MNIST is black-and-white, so 1 channel is your default)\n hidden_dim: the inner dimension, a scalar\n '''\n def __init__(self, z_dim=10, im_chan=1, hidden_dim=64):\n super(Generator, self).__init__()\n self.z_dim = z_dim\n # Build the neural network\n self.gen = nn.Sequential(\n self.make_gen_block(z_dim, hidden_dim * 4),\n self.make_gen_block(hidden_dim * 4, hidden_dim * 2, kernel_size=4, stride=1),\n self.make_gen_block(hidden_dim * 2, hidden_dim),\n self.make_gen_block(hidden_dim, im_chan, kernel_size=4, final_layer=True),\n )\n\n def make_gen_block(self, input_channels, output_channels, kernel_size=3, stride=2, final_layer=False):\n '''\n Function to return a sequence of operations corresponding to a generator block of DCGAN;\n a transposed convolution, a batchnorm (except in the final layer), and an activation.\n Parameters:\n input_channels: how many channels the input feature representation has\n output_channels: how many channels the output feature representation should have\n kernel_size: the size of each convolutional filter, equivalent to (kernel_size, kernel_size)\n stride: the stride of the convolution\n final_layer: a boolean, true if it is the final layer and false otherwise \n (affects activation and batchnorm)\n '''\n if not final_layer:\n return nn.Sequential(\n nn.ConvTranspose2d(input_channels, output_channels, kernel_size, stride),\n nn.BatchNorm2d(output_channels),\n nn.ReLU(inplace=True),\n )\n else:\n return nn.Sequential(\n nn.ConvTranspose2d(input_channels, output_channels, kernel_size, stride),\n nn.Tanh(),\n )\n\n def forward(self, noise):\n '''\n Function for completing a forward pass of the generator: Given a noise tensor,\n returns generated images.\n Parameters:\n noise: a noise tensor with dimensions (n_samples, z_dim)\n '''\n x = noise.view(len(noise), self.z_dim, 1, 1)\n return self.gen(x)\n\ndef get_noise(n_samples, z_dim, device='cpu'):\n '''\n Function for creating noise vectors: Given the dimensions (n_samples, z_dim)\n creates a tensor of that shape filled with random numbers from the normal distribution.\n Parameters:\n n_samples: the number of samples to generate, a scalar\n z_dim: the dimension of the noise vector, a scalar\n device: the device type\n '''\n return torch.randn(n_samples, z_dim, device=device)\n\nclass Critic(nn.Module):\n '''\n Critic Class\n Values:\n im_chan: the number of channels in the images, fitted for the dataset used, a scalar\n (MNIST is black-and-white, so 1 channel is your default)\n hidden_dim: the inner dimension, a scalar\n '''\n def __init__(self, im_chan=1, hidden_dim=64):\n super(Critic, self).__init__()\n self.crit = nn.Sequential(\n self.make_crit_block(im_chan, hidden_dim),\n self.make_crit_block(hidden_dim, hidden_dim * 2),\n self.make_crit_block(hidden_dim * 2, 1, final_layer=True),\n )\n\n def make_crit_block(self, input_channels, output_channels, kernel_size=4, stride=2, final_layer=False):\n '''\n Function to return a sequence of operations corresponding to a critic block of DCGAN;\n a convolution, a batchnorm (except in the final layer), and an activation (except in the final layer).\n Parameters:\n input_channels: how many channels the input feature representation has\n output_channels: how many channels the output feature representation should have\n kernel_size: the size of each convolutional filter, equivalent to (kernel_size, kernel_size)\n stride: the stride of the convolution\n final_layer: a boolean, true if it is the final layer and false otherwise \n (affects activation and batchnorm)\n '''\n if not final_layer:\n return nn.Sequential(\n nn.Conv2d(input_channels, output_channels, kernel_size, stride),\n nn.BatchNorm2d(output_channels),\n nn.LeakyReLU(0.2, inplace=True),\n )\n else:\n return nn.Sequential(\n nn.Conv2d(input_channels, output_channels, kernel_size, stride),\n )\n\n def forward(self, image):\n '''\n Function for completing a forward pass of the critic: Given an image tensor, \n returns a 1-dimension tensor representing fake/real.\n Parameters:\n image: a flattened image tensor with dimension (im_chan)\n '''\n crit_pred = self.crit(image)\n return crit_pred.view(len(crit_pred), -1)" ]
[ [ "torch.nn.Sequential", "torch.cat", "torch.nn.Conv2d", "torch.nn.Flatten", "torch.nn.MaxPool2d", "torch.nn.AvgPool2d", "torch.nn.Linear", "torch.nn.AdaptiveAvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ], [ "torch.nn.ConvTranspose2d", "torch.randn", "torch.nn.Conv2d", "torch.nn.Tanh", "torch.nn.LeakyReLU", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
gabinsane/myGym
[ "a41c6b11a47eaf19d0c69e67aeb48cf7a999d45a", "a41c6b11a47eaf19d0c69e67aeb48cf7a999d45a" ]
[ "myGym/envs/base_env.py", "myGym/stable_baselines_mygym/common/runners.py" ]
[ "import pybullet_data\nimport glob\nimport pybullet\nimport pybullet_utils.bullet_client as bc\nimport time\nimport numpy as np\nfrom gym.utils import seeding\nimport gym\nimport os\nimport inspect\nfrom myGym.envs.camera import Camera\nimport pkg_resources\ncurrentdir = pkg_resources.resource_filename(\"myGym\", \"envs\")\nrepodir = pkg_resources.resource_filename(\"myGym\", \"./\")\n\n\nclass BaseEnv(gym.Env):\n \"\"\"\n The base class for environments without rendering\n\n Parameters:\n :param gui_on: (bool) Whether or not to use PyBullet built-in GUI\n :param objects_dir_path: (str) Path to directory with URDF files for objects\n :param max_steps: (int) The maximum number of actions per episode\n :param show_bounding_boxes_gui: (bool) Whether or not to show bounding boxes in GUI\n :param changing_light_gui: (bool) Whether or not to change light in GUI\n :param shadows_on_gui: (bool) Whether or not to show shadows in GUI\n \"\"\"\n metadata = {'render.modes': [\n 'human', 'rgb_array'], 'video.frames_per_second': 50}\n\n def __init__(self,\n gui_on=True,\n objects_dir_path=pkg_resources.resource_filename(\"myGym\", \"envs/\"),\n max_steps=1024,\n show_bounding_boxes_gui=False,\n changing_light_gui=False,\n shadows_on_gui=True,\n timestep=1./240.\n ):\n self.gui_on = gui_on\n self.max_steps = max_steps\n self.show_bounding_boxes_gui = show_bounding_boxes_gui\n self.changing_light_gui = changing_light_gui\n self.shadows_on_gui = shadows_on_gui\n\n # Set episode information\n self.episode_start_time = None\n self.episode_over = False\n self.episode_failed = False\n self.episode_reward = 0.0\n self.episode_final_reward = []\n self.episode_final_distance = []\n self.episode_number = 0\n self.episode_steps = 0\n self.episode_max_time = 300\n self.episode_info = \"\"\n\n # Set general params\n self.time_step = 1. / 240.\n #self.time_step = timestep\n self.urdf_root = pybullet_data.getDataPath()\n self.observation = {}\n\n # Set objects information\n self.objects_dir_path = objects_dir_path\n self.env_objects = []\n self.scene_objects_uids = {}\n self.all_objects_filenames = self._get_all_urdf_filenames(self.objects_dir_path)\n\n # Set GUI\n self._connect_to_physics_server()\n\n # Set env params and load models\n self._set_physics()\n self._setup_scene()\n self._set_observation_space()\n self._set_action_space()\n\n def _connect_to_physics_server(self):\n \"\"\"\n Connect to the PyBullet physics server in SHARED_MEMORY, GUI or DIRECT mode\n \"\"\"\n if self.gui_on:\n self.p = bc.BulletClient(connection_mode=pybullet.GUI)\n # if (self.p < 0):\n # self.p = bc.BulletClient(connection_mode=p.GUI)\n self._set_gui_mode()\n else:\n self.p = bc.BulletClient(connection_mode=pybullet.DIRECT)\n self.p.setPhysicsEngineParameter(enableFileCaching=0)\n\n def _set_gui_mode(self):\n \"\"\"\n Set GUI parameters: camera, shadows, extra elements\n \"\"\"\n self.p.resetDebugVisualizerCamera(3.3, -40, -41, [0.0, 0.0, 0.33])\n self.p.configureDebugVisualizer(self.p.COV_ENABLE_SHADOWS, self.shadows_on_gui)\n self.p.configureDebugVisualizer(self.p.COV_ENABLE_GUI, 0)\n\n def _set_physics(self):\n \"\"\"\n Set physics engine parameters\n \"\"\"\n self.p.setGravity(0, 0, -9.81)\n self.p.setPhysicsEngineParameter(solverResidualThreshold=0.001, numSolverIterations=150, numSubSteps=10, useSplitImpulse=1, collisionFilterMode=1, constraintSolverType=self.p.CONSTRAINT_SOLVER_LCP_DANTZIG, globalCFM=0.000001)\n self.p.setTimeStep(self.time_step)\n self.p.setRealTimeSimulation(0)\n self.p.setPhysicsEngineParameter(enableConeFriction=1)\n print(self.p.getPhysicsEngineParameters())\n\n def _setup_scene(self):\n \"\"\"\n Set up scene elements (furniture, objects, robots)\n \"\"\"\n raise NotImplementedError\n\n def _set_observation_space(self):\n \"\"\"\n Set limits of observations\n \"\"\"\n raise NotImplementedError\n\n def _set_action_space(self):\n \"\"\"\n Set limits of actions\n \"\"\"\n raise NotImplementedError\n\n def _get_observation(self):\n \"\"\"\n Get info about the state of the environment\n\n Returns:\n :return observation: (object) Observation of the environment\n \"\"\"\n raise NotImplementedError\n\n def step(self, action):\n \"\"\"\n Apply action on the environment\n\n Parameters:\n :param action: (object) An action provided by the agent\n Returns:\n :return observation: (object)\n :return reward: (float)\n :return done: (bool):\n :return info: (dict):\n \"\"\"\n raise NotImplementedError\n\n def _add_scene_object_uid(self, scene_object_uid, name):\n \"\"\"\n Call this method in order to enable texturization of object\n\n Parameters:\n :param scene_object: (int)\n \"\"\"\n self.scene_objects_uids[scene_object_uid] = name\n\n def get_scene_object_uid_by_name(self, name):\n for uid, object_name in self.scene_objects_uids.items():\n if name == object_name:\n return uid\n return None\n\n def seed(self, seed=None):\n \"\"\"\n Set the seed for this env's random number generator(s)\n \"\"\"\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def hard_reset(self):\n \"\"\"\n Full reset of the simulation. Delete and load again all objects and reset physics.\n \"\"\"\n self.p.resetSimulation()\n self.p.disconnect()\n self._connect_to_physics_server()\n self.scene_objects_uids = {}\n #self.episode_number = 0\n self._set_physics()\n self._setup_scene()\n\n def _restart_episode(self):\n \"\"\"\n Reset episode information and delete all objects\n \"\"\"\n self.p.removeAllUserDebugItems()\n self.episode_start_time = time.time()\n self.episode_over = False\n self.episode_failed = False\n self.episode_reward = 0.0\n self.episode_steps = 0\n\n def reset(self, hard=False):\n \"\"\"\n Reset the state of the environment\n \"\"\"\n if hard:\n self.hard_reset()\n else:\n self._remove_all_objects()\n\n self._restart_episode()\n\n def _draw_bounding_boxes(self):\n \"\"\"\n Show bounding boxes in tne PyBullet GUI\n \"\"\"\n for object in self.env_objects:\n object.draw_bounding_box()\n\n def _compute_reward(self):\n \"\"\"\n Compute reward for the agent\n \"\"\"\n return NotImplementedError\n\n def _print_episode_summary(self, info_dict={}):\n \"\"\"\n Show an extra information about the episode\n\n Parameters:\n :param info_dict: (dict) Extra info\n \"\"\"\n if self.episode_failed:\n episode_status = \"FAILURE\"\n else:\n episode_status = \"SUCCESS\"\n\n print(\"#---------Episode-Summary---------#\")\n print(\"Episode number: \" + str(self.episode_number))\n print(\"Episode's number of steps: \" + str(self.episode_steps))\n #print(\"Episode status: \" + episode_status)\n print(\"Episode info: \" + self.episode_info)\n print(\"Episode reward: \" + str(self.episode_reward))\n #print(\"Last step reward: \" + str(self.reward.rewards_history[-1]))\n print(\"#---------------------------------#\")\n\n for key, value in info_dict.items():\n print(key + \": \" + str(value))\n\n def _get_random_urdf_filenames(self, n, used_objects=None):\n \"\"\"\n Sample random URDF files from directory with objects URDFs\n\n Parameters:\n :param n: (int) Number of URDF's\n :param used_objects: (list) Specified subset of objects\n Returns:\n :return selected_objects_filenames: (list)\n \"\"\"\n if used_objects or (self.all_objects_filenames is None):\n all_objects_filenames = []\n for object_name in used_objects:\n if \"virtual\" in object_name:\n all_objects_filenames.append(object_name)\n for file in self.all_objects_filenames:\n if '/'+object_name+'.' in file:\n all_objects_filenames.append(file)\n else:\n # uses self.all_objects_filenames\n pass\n assert all_objects_filenames is not None\n\n selected_objects_filenames = []\n total_num_objects = len(all_objects_filenames)\n if (n <= total_num_objects):\n selected_objects = np.random.choice(\n np.arange(total_num_objects), n, replace=True)\n else:\n selected_objects = list(np.arange(total_num_objects))\n remain = n - total_num_objects\n selected_objects += list(np.random.choice(\n np.arange(total_num_objects), remain))\n for object_id in selected_objects:\n selected_objects_filenames.append(all_objects_filenames[object_id])\n return selected_objects_filenames\n\n def _get_all_urdf_filenames(self, dir):\n \"\"\"\n Get all URDF filenames from directory\n\n Parameters:\n :param dir: (int) Number of URDFs\n Returns:\n :return filenames: (list)\n \"\"\"\n list_all = []\n for (dirpath, dirnames, filenames) in os.walk(self.objects_dir_path):\n if '_old' not in dirpath and 'urdf' in dirpath:\n list_all += [os.path.join(dirpath, file) for file in filenames]\n return list_all\n\n def _remove_object(self, object):\n \"\"\"\n Totally remove object from the simulation\n\n Parameters:\n :param object: (EnvObject) Object to remove\n \"\"\"\n self.env_objects.remove(object)\n self.p.removeBody(object.uid)\n\n def _remove_all_objects(self):\n \"\"\"\n Remove all objects from simulation (not scene objects or robots)\n \"\"\"\n env_objects_copy = self.env_objects[:]\n for env_object in env_objects_copy:\n self._remove_object(env_object)\n\n def get_texturizable_objects_uids(self):\n \"\"\"\n Get all objects in the environment, on which textures can be applied\n \n Returns:\n :return texturizable_objects_uids: (list)\n \"\"\"\n return [object.get_uid() for object in self.env_objects] + list(self.scene_objects_uids.keys())\n\n def get_colorizable_objects_uids(self):\n \"\"\"\n Get all objects in the environment, which color can be changed\n\n Returns:\n :return colorizable_objects_uids: (list)\n \"\"\"\n return [object.get_uid() for object in self.env_objects] + list(self.scene_objects_uids.keys())\n\n def __del__(self):\n \"\"\"\n Disconnect from the physics server\n \"\"\"\n self.p.disconnect()\n\n\nclass CameraEnv(BaseEnv):\n \"\"\"\n The class for environments with rendering\n\n Parameters:\n :param camera_resolution: (list) The number of pixels in image (WxH)\n :param shadows_on: (bool) Whether or not to use shadows while rendering, only applies to ER_TINY_RENDERER\n :param render_on: (bool) Turn on rendering\n :param renderer: (int) self.p.ER_TINY_RENDERER (CPU) or self.p.ER_BULLET_HARDWARE_OPENGL (GPU)\n :param active_cameras: (list) Set 1 at a position(=camera number) to save images from this camera\n \"\"\"\n def __init__(self, camera_resolution=[640, 480], shadows_on=True,\n render_on=True, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL,\n active_cameras=None, **kwargs):\n\n super(CameraEnv, self).__init__(**kwargs)\n\n self.camera_resolution = camera_resolution\n self.shadows_on = shadows_on\n self.render_on = render_on\n self.renderer = renderer\n self.active_cameras = active_cameras\n self.cameras = []\n\n self.set_light()\n self._set_cameras()\n\n def set_light(self, light_direction=[1, 1, 1], light_color=[0.1, 0.1, 0.1],\n light_distance=1., light_ambient=1., light_diffuse=1.,\n light_specular=1.):\n \"\"\"\n Set light parameters for rendering, doesn't affect PyBullet GUI. Appart from light_direction, all parameters only apply to ER_TINY_RENDERER.\n\n Parameters:\n :param light_direction: (list) Specifies the world position of the light source\n :param light_color: (list) Directional light color in RGB in range 0..1\n :param light_distance: (float) Distance of the light along the normalized light_direction\n :param light_ambient: (float) Light ambient coefficient in range 0..1\n :param light_diffuse: (float) Light diffuse coefficient in range 0..1\n :param light_specular: (float) Light specular coefficient in range 0..1\n \"\"\"\n self.light_direction = light_direction\n self.light_color = light_color\n self.light_distance = light_distance\n self.light_ambient = light_ambient\n self.light_diffuse = light_diffuse\n self.light_specular = light_specular\n\n def get_render_parameters(self):\n \"\"\"\n Return environment parameters for rendering, initially is intended to\n use by cameras\n\n Returns:\n :return render_parameters: (dict) Render parameters\n \"\"\"\n return {\n \"width\": self.camera_resolution[0],\n \"height\": self.camera_resolution[1],\n \"lightDirection\": self.light_direction,\n \"lightColor\": self.light_color,\n \"lightDistance\": self.light_distance,\n \"shadow\": 1 if self.shadows_on else 0,\n \"lightAmbientCoeff\": self.light_ambient,\n \"lightDiffuseCoeff\": self.light_diffuse,\n \"lightSpecularCoeff\": self.light_specular,\n \"renderer\": self.renderer\n }\n\n def _set_cameras(self):\n \"\"\"\n Set cameras available to use for rendering\n \"\"\"\n raise NotImplementedError\n\n def get_cameras(self):\n return self.cameras\n\n def add_camera(self, **kwargs):\n \"\"\"\n Add new camera to the environment\n\n Parameters:\n :param position: (list) Eye position in Cartesian world coordinates\n :prarm target_position: (list) Position of the target point\n :param up_vector: (list) Up vector of the camera\n :param up_axis_index: (int) Either 1 for Y or 2 for Z axis up\n :param yaw: (float) Yaw angle in degrees left/right around up-axis\n :param pitch: (float) Pitch in degrees up/down\n :param roll: (float) Roll in degrees around forward vector\n :param distance: (float) Distance from eye to focus point\n :param field_of_view: (float) Field of view\n :param near_plane_distance: (float) Near plane distance\n :param far_plane_distance: (float) Far plane distance\n \"\"\"\n self.cameras.append(Camera(env=self, **kwargs))\n\n def set_active_cameras(self, active_cameras):\n\n if (len(active_cameras) == len(self.cameras)):\n self.active_cameras = active_cameras\n\n def change_current_camera(self, camera_num):\n print(\"Change camera to \" + str(self.current_camera))\n self.current_camera = camera_num\n\n def render(self, mode=\"rgb_array\", camera_id=None):\n \"\"\"\n Get image (image, depth, segmentation_mask) from camera or active cameras\n\n Parameters:\n :param mode: (str) rgb_array to return RGB image\n :param camera_id: (int) Get image from specified camera\n Returns:\n :return camera_data: (dict) Key: camera_id, Value: info from camera\n \"\"\"\n if mode != \"rgb_array\":\n return np.array([])\n camera_data = {}\n if self.render_on:\n if camera_id is not None:\n camera_data[camera_id] = self.cameras[camera_id].render()\n else:\n for camera_num in range(len(self.active_cameras)):\n if self.active_cameras[camera_num]:\n camera_data[camera_num] = self.cameras[camera_num].render()\n return camera_data\n\n def project_point_to_camera_image(self, point, camera_id):\n \"\"\"\n Project 3D point in Cartesian world coordinates to 2D point in pixel space\n\n Parameters:\n :param point: (list) 3D point in Cartesian world coordinates\n :param camera_id: (int) Index of camera to project on\n\n Returns:\n :return 2d_point: (list) 2D coordinates of point on imageg\n \"\"\"\n return self.cameras[camera_id].project_point_to_image(point)\n\n def get_camera_opencv_matrix_values(self, camera_id):\n \"\"\"\n Compute values of OpenCV matrix\n\n Parameters:\n :param camera_id: (int) Index of camera to get matrix from\n Returns:\n :return values: (dict) fx, fy, cx, cy values\n \"\"\"\n return self.cameras[camera_id].get_opencv_camera_matrix_values()\n", "from abc import ABC, abstractmethod\nimport typing\nfrom typing import Union, Optional, Any\nfrom gym import spaces\nimport gym\nimport numpy as np\n\nfrom myGym.stable_baselines_mygym.common.callbacks import BaseCallback\nfrom myGym.stable_baselines_mygym.common.vec_env import VecEnv\n\nif typing.TYPE_CHECKING:\n from myGym.stable_baselines_mygym.common.base_class import BaseRLModel # pytype: disable=pyi-error\n\n\nclass AbstractEnvRunner(ABC):\n def __init__(self, *, env: Union[gym.Env, VecEnv], model: 'BaseRLModel', n_steps: int):\n \"\"\"\n Collect experience by running `n_steps` in the environment.\n Note: if this is a `VecEnv`, the total number of steps will\n be `n_steps * n_envs`.\n\n :param env: (Union[gym.Env, VecEnv]) The environment to learn from\n :param model: (BaseRLModel) The model to learn\n :param n_steps: (int) The number of steps to run for each environment\n \"\"\"\n self.env = env\n self.model = model\n n_envs = env.num_envs\n self.batch_ob_shape = (n_envs * n_steps,) + env.observation_space.shape\n self.obs = np.zeros((n_envs,) +env.envs[0].real_obsspace.shape, dtype=env.observation_space.dtype.name)\n self.obs[:] = env.envs[0].reset().reshape(1, -1)\n self.obs = self.obs.reshape(self.env.envs[0].num_robots, -1)\n self.n_steps = n_steps\n self.states = model.initial_state\n self.dones = [False for _ in range(n_envs)]\n self.callback = None # type: Optional[BaseCallback]\n self.continue_training = True\n self.n_envs = n_envs\n\n def run(self, callback: Optional[BaseCallback] = None) -> Any:\n \"\"\"\n Collect experience.\n\n :param callback: (Optional[BaseCallback]) The callback that will be called\n at each environment step.\n \"\"\"\n self.callback = callback\n self.continue_training = True\n return self._run()\n\n @abstractmethod\n def _run(self) -> Any:\n \"\"\"\n This method must be overwritten by child class.\n \"\"\"\n raise NotImplementedError\n\n\ndef traj_segment_generator(policy, env, horizon, reward_giver=None, gail=False, callback=None):\n \"\"\"\n Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)\n :param policy: (MLPPolicy) the policy\n :param env: (Gym Environment) the environment\n :param horizon: (int) the number of timesteps to run per batch\n :param reward_giver: (TransitionClassifier) the reward predicter from obsevation and action\n :param gail: (bool) Whether we are using this generator for standard trpo or with gail\n :param callback: (BaseCallback)\n :return: (dict) generator that returns a dict with the following keys:\n - observations: (np.ndarray) observations\n - rewards: (numpy float) rewards (if gail is used it is the predicted reward)\n - true_rewards: (numpy float) if gail is used it is the original reward\n - vpred: (numpy float) action logits\n - dones: (numpy bool) dones (is end of episode, used for logging)\n - episode_starts: (numpy bool)\n True if first timestep of an episode, used for GAE\n - actions: (np.ndarray) actions\n - nextvpred: (numpy float) next action logits\n - ep_rets: (float) cumulated current episode reward\n - ep_lens: (int) the length of the current episode\n - ep_true_rets: (float) the real environment reward\n - continue_training: (bool) Whether to continue training\n or stop early (triggered by the callback)\n \"\"\"\n # Check when using GAIL\n assert not (gail and reward_giver is None), \"You must pass a reward giver when using GAIL\"\n\n # Initialize state variables\n step = 0\n action = env.action_space.sample() # not used, just so we have the datatype\n observation = env.reset()\n\n cur_ep_ret = 0 # return in current episode\n current_it_len = 0 # len of current iteration\n current_ep_len = 0 # len of current episode\n cur_ep_true_ret = 0\n ep_true_rets = []\n ep_rets = [] # returns of completed episodes in this segment\n ep_lens = [] # Episode lengths\n\n # Initialize history arrays\n observations = np.array([observation for _ in range(horizon)])\n true_rewards = np.zeros(horizon, 'float32')\n rewards = np.zeros(horizon, 'float32')\n vpreds = np.zeros(horizon, 'float32')\n episode_starts = np.zeros(horizon, 'bool')\n dones = np.zeros(horizon, 'bool')\n actions = np.array([action for _ in range(horizon)])\n states = policy.initial_state\n episode_start = True # marks if we're on first timestep of an episode\n done = False\n\n callback.on_rollout_start()\n\n while True:\n action, vpred, states, _ = policy.step(observation.reshape(-1, *observation.shape), states, done)\n # Slight weirdness here because we need value function at time T\n # before returning segment [0, T-1] so we get the correct\n # terminal value\n if step > 0 and step % horizon == 0:\n callback.update_locals(locals())\n callback.on_rollout_end()\n yield {\n \"observations\": observations,\n \"rewards\": rewards,\n \"dones\": dones,\n \"episode_starts\": episode_starts,\n \"true_rewards\": true_rewards,\n \"vpred\": vpreds,\n \"actions\": actions,\n \"nextvpred\": vpred[0] * (1 - episode_start),\n \"ep_rets\": ep_rets,\n \"ep_lens\": ep_lens,\n \"ep_true_rets\": ep_true_rets,\n \"total_timestep\": current_it_len,\n 'continue_training': True\n }\n _, vpred, _, _ = policy.step(observation.reshape(-1, *observation.shape))\n # Be careful!!! if you change the downstream algorithm to aggregate\n # several of these batches, then be sure to do a deepcopy\n ep_rets = []\n ep_true_rets = []\n ep_lens = []\n # Reset current iteration length\n current_it_len = 0\n callback.on_rollout_start()\n i = step % horizon\n observations[i] = observation\n vpreds[i] = vpred[0]\n actions[i] = action[0]\n episode_starts[i] = episode_start\n\n clipped_action = action\n # Clip the actions to avoid out of bound error\n if isinstance(env.action_space, gym.spaces.Box):\n clipped_action = np.clip(action, env.action_space.low, env.action_space.high)\n\n if gail:\n reward = reward_giver.get_reward(observation, clipped_action[0])\n observation, true_reward, done, info = env.step(clipped_action[0])\n else:\n observation, reward, done, info = env.step(clipped_action[0])\n true_reward = reward\n\n if callback is not None:\n callback.update_locals(locals())\n if callback.on_step() is False:\n # We have to return everything so pytype does not complain\n yield {\n \"observations\": observations,\n \"rewards\": rewards,\n \"dones\": dones,\n \"episode_starts\": episode_starts,\n \"true_rewards\": true_rewards,\n \"vpred\": vpreds,\n \"actions\": actions,\n \"nextvpred\": vpred[0] * (1 - episode_start),\n \"ep_rets\": ep_rets,\n \"ep_lens\": ep_lens,\n \"ep_true_rets\": ep_true_rets,\n \"total_timestep\": current_it_len,\n 'continue_training': False\n }\n return\n\n rewards[i] = reward\n true_rewards[i] = true_reward\n dones[i] = done\n episode_start = done\n\n cur_ep_ret += reward\n cur_ep_true_ret += true_reward\n current_it_len += 1\n current_ep_len += 1\n if done:\n # Retrieve unnormalized reward if using Monitor wrapper\n maybe_ep_info = info.get('episode')\n if maybe_ep_info is not None:\n if not gail:\n cur_ep_ret = maybe_ep_info['r']\n cur_ep_true_ret = maybe_ep_info['r']\n\n ep_rets.append(cur_ep_ret)\n ep_true_rets.append(cur_ep_true_ret)\n ep_lens.append(current_ep_len)\n cur_ep_ret = 0\n cur_ep_true_ret = 0\n current_ep_len = 0\n if not isinstance(env, VecEnv):\n observation = env.reset()\n step += 1\n" ]
[ [ "numpy.arange", "numpy.array" ], [ "numpy.zeros", "numpy.clip" ] ]
dsilvalo28/AIVA-DAIA
[ "55b1f547aaf850df1ea3ddd9a2f6b5a2af410889" ]
[ "src/TreeDetector.py" ]
[ "import cv2\nimport numpy as np\nfrom src.Detector import Detector\n\n\n# Tree detector class #\nclass TreeDetector(Detector):\n def __init__(self, image_path=None):\n self.__image_path = image_path\n self.image = None\n if image_path is not None:\n self.read(self.__image_path)\n\n # *** CONSTANTS *** #\n self.__threshold_down = 127\n self.__threshold_up = 255\n self.__totalm2 = 12000\n self.__treesperm2 = 0.6\n\n # *** PRIVATE *** #\n def __preprocess_image(self):\n \"\"\"\n :return: Preprocessed set image\n \"\"\"\n preprocessed_image = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)\n hsv_image = cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV)\n\n return preprocessed_image, hsv_image\n\n # *** PUBLIC *** #\n def read(self, image):\n \"\"\"\n :param image: Set the image to work with\n \"\"\"\n self.image = image\n\n def read_from_path(self, image_path):\n \"\"\"\n :param image_path: Set the path to read the image and the image\n \"\"\"\n self.__image_path = image_path\n self.image = cv2.imread(self.__image_path)\n return self.image\n\n def process_image(self, lc=[0, 100, 100], uc=[120, 255, 255]):\n \"\"\"\n :param lc: [int, int, int] Lower HSV color values\n :param uc: [int, int, int] Lower HSV color values\n :return: [np.array] 3 channel segmentation mask of the set image\n \"\"\"\n preprocessed_image, hsv_image = self.__preprocess_image()\n ret, segmented_image = cv2.threshold(preprocessed_image, self.__threshold_down, self.__threshold_up,\n cv2.THRESH_BINARY)\n\n # Creaccion de mascara\n lower_color = np.array(lc, dtype='uint8')\n upper_color = np.array(uc, dtype='uint8')\n mask = cv2.inRange(hsv_image, lower_color, upper_color)\n mask_3_channels = np.dstack((mask, mask, mask))\n\n # ret2, thresh = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n # segmented_image_boolean = segmented_image.astype(np.bool)\n return mask_3_channels\n\n def calculate_percentage(self):\n \"\"\"\n :return: Percentage of tree mass of the set image\n \"\"\"\n segmented_image = self.process_image()\n percentage = np.mean(segmented_image/2.55)\n return percentage\n\n def calculate_m2(self):\n \"\"\"\n :return: m² of tree mass of the set image\n \"\"\"\n percentage = self.calculate_percentage()\n m2 = percentage * self.__totalm2\n return m2\n\n def calculate_number_trees(self):\n \"\"\"\n :return: Number of trees of the set image\n \"\"\"\n m2 = self.calculate_m2()\n n_trees = int(m2 * self.__treesperm2)\n return n_trees\n" ]
[ [ "numpy.array", "numpy.mean", "numpy.dstack" ] ]
aholinch/Keplers-Goat-Herd
[ "18cc49465353eb6ce6ce9e9e84d81fca9f5d3c59" ]
[ "util/kgh.py" ]
[ "import numpy as np, time\n\ndef mToE(m, e):\n if e <= 0.5:\n return mToE(m,e,10)\n\n if e <= 0.9:\n return mToE(m,e,25)\n\n if e <= 0.95:\n return mToE(m,e,50)\n\n if e <= 0.99:\n return mToE(m,e,128)\n\n return mToE(m,e,256)\n\n\ndef mToE(m, eccentricity, N_it):\n \"\"\"Solve Kepler's equation, E - e sin E = ell, via the contour integration method of Philcox et al. (2021)\n This uses techniques described in Ullisch (2020) to solve the `geometric goat problem'.\n\n Args:\n m: mean anomaly, in the range (0,2 pi).\n eccentricity (float): Eccentricity. Must be in the range 0<e<1.\n N_it (float): Number of grid-points.\n\n Returns:\n (float): eccentric anomaly, E.\n \"\"\"\n\n # Check inputs\n if eccentricity<=0.:\n raise Exception(\"Eccentricity must be greater than zero!\")\n elif eccentricity>=1:\n raise Exception(\"Eccentricity must be less than unity!\")\n if m>2.*np.pi:\n raise Exception(\"Mean anomaly should be in the range (0, 2 pi)\")\n if m<0:\n raise Exception(\"Mean anomaly should be in the range (0, 2 pi)\")\n if N_it<2:\n raise Exception(\"Need at least two sampling points!\")\n\n # Define sampling points\n N_points = N_it - 2\n N_fft = (N_it-1)*2\n\n # Define contour radius\n radius = eccentricity/2\n\n # Generate e^{ikx} sampling points and precompute real and imaginary parts\n j_arr = np.arange(N_points)\n freq = (2*np.pi*(j_arr+1.)/N_fft)[:,np.newaxis]\n exp2R = np.cos(freq)\n exp2I = np.sin(freq)\n ecosR= eccentricity*np.cos(radius*exp2R)\n esinR = eccentricity*np.sin(radius*exp2R)\n exp4R = exp2R*exp2R-exp2I*exp2I\n exp4I = 2.*exp2R*exp2I\n coshI = np.cosh(radius*exp2I)\n sinhI = np.sinh(radius*exp2I)\n\n # Precompute e sin(e/2) and e cos(e/2)\n esinRadius = eccentricity*np.sin(radius);\n ecosRadius = eccentricity*np.cos(radius);\n\n # Define contour center for each ell and precompute sin(center), cos(center)\n center = m-eccentricity/2.\n if m < np.pi:\n center += eccentricity\n sinC = np.sin(center)\n cosC = np.cos(center)\n output = center\n\n ## Accumulate Fourier coefficients\n # NB: we halve the integration range by symmetry, absorbing factor of 2 into ratio\n\n ## Separate out j = 0 piece, which is simpler\n\n # Compute z in real and imaginary parts (zI = 0 here)\n zR = center + radius\n\n # Compute e*sin(zR) from precomputed quantities\n tmpsin = sinC*ecosRadius+cosC*esinRadius\n\n # Compute f(z(x)) in real and imaginary parts (fxI = 0)\n fxR = zR - tmpsin - m \n\n # Add to arrays, with factor of 1/2 since an edge\n ft_gx2 = 0.5/fxR\n ft_gx1 = 0.5/fxR\n\n ## Compute j = 1 to N_points pieces\n\n # Compute z in real and imaginary parts\n zR = center + radius*exp2R\n zI = radius*exp2I\n\n # Compute f(z(x)) in real and imaginary parts\n # can use precomputed cosh / sinh / cos / sin for this!\n tmpsin = sinC*ecosR+cosC*esinR # e sin(zR)\n tmpcos = cosC*ecosR-sinC*esinR # e cos(zR)\n\n fxR = zR - tmpsin*coshI-m\n fxI = zI - tmpcos*sinhI\n\n # Compute 1/f(z) and append to array\n ftmp = fxR*fxR+fxI*fxI;\n fxR /= ftmp;\n fxI /= ftmp;\n\n ft_gx2 += np.sum(exp4R*fxR+exp4I*fxI,axis=0)\n ft_gx1 += np.sum(exp2R*fxR+exp2I*fxI,axis=0)\n\n ## Separate out j = N_it piece, which is simpler\n\n # Compute z in real and imaginary parts (zI = 0 here)\n zR = center - radius\n\n # Compute sin(zR) from precomputed quantities\n tmpsin = sinC*ecosRadius-cosC*esinRadius\n\n # Compute f(z(x)) in real and imaginary parts (fxI = 0 here)\n fxR = zR - tmpsin-m\n\n # Add to sum, with 1/2 factor for edges\n ft_gx2 += 0.5/fxR;\n ft_gx1 += -0.5/fxR;\n\n ### Compute and return the solution E(ell,e)\n output += radius*ft_gx2/ft_gx1;\n\n return output[0]\n\nif __name__==\"__main__\":\n \"\"\"Test the Python function above with a simple example\"\"\"\n\n # Parameters\n N_ell = 10000\n eccentricity = 0.5\n N_it = 10\n\n print(\"\\n##### PARAMETERS #####\")\n print(\"# N_ell = %d\"%N_ell)\n print(\"# Eccentricity = %.2f\"%eccentricity)\n print(\"# Iterations: %d\"%N_it)\n print(\"######################\")\n\n # Create ell array from E\n E_true = (2.0*np.pi*(np.arange(N_ell)+0.5))/N_ell\n ell_input = E_true - eccentricity*np.sin(E_true)\n\n E_out = [0 for i in range(len(ell_input))]\n # Time the function\n init = time.time()\n for i in range(len(ell_input)):\n E_out[i] = mToE(ell_input[i],eccentricity,N_it)\n runtime = time.time()-init\n\n print(\"\\nEstimation complete after %.1f millseconds, achieving mean error %.2e.\\n\"%(runtime*1000.,np.mean(np.abs(E_out-E_true))))\n" ]
[ [ "numpy.cosh", "numpy.abs", "numpy.arange", "numpy.cos", "numpy.sinh", "numpy.sin", "numpy.sum" ] ]
liuky74/detr
[ "e2b59573dcb86720562dfbdb02977ef996857025" ]
[ "models/backbone.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nBackbone modules.\n\"\"\"\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn.functional as F\nimport torchvision\nfrom torch import nn\nfrom torchvision.models._utils import IntermediateLayerGetter\nfrom typing import Dict, List\n\nfrom util.misc import NestedTensor, is_main_process\n\nfrom .position_encoding import build_position_encoding\n\n\nclass FrozenBatchNorm2d(torch.nn.Module):\n \"\"\"\n BatchNorm2d where the batch statistics and the affine parameters are fixed.\n\n Copy-paste from torchvision.misc.ops with added eps before rqsrt,\n without which any other models than torchvision.models.resnet[18,34,50,101]\n produce nans.\n \"\"\"\n\n def __init__(self, n):\n super(FrozenBatchNorm2d, self).__init__()\n self.register_buffer(\"weight\", torch.ones(n))\n self.register_buffer(\"bias\", torch.zeros(n))\n self.register_buffer(\"running_mean\", torch.zeros(n))\n self.register_buffer(\"running_var\", torch.ones(n))\n # 固定参数的batch norm,读取到本层参数时删除它\n def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n num_batches_tracked_key = prefix + 'num_batches_tracked'\n if num_batches_tracked_key in state_dict:\n del state_dict[num_batches_tracked_key]\n\n super(FrozenBatchNorm2d, self)._load_from_state_dict(\n state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs)\n\n def forward(self, x):\n # move reshapes to the beginning\n # to make it fuser-friendly\n w = self.weight.reshape(1, -1, 1, 1)\n b = self.bias.reshape(1, -1, 1, 1)\n rv = self.running_var.reshape(1, -1, 1, 1)\n rm = self.running_mean.reshape(1, -1, 1, 1)\n eps = 1e-5\n scale = w * (rv + eps).rsqrt()\n bias = b - rm * scale\n return x * scale + bias\n\n\nclass BackboneBase(nn.Module):\n\n def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):\n super().__init__()\n for name, parameter in backbone.named_parameters():\n if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name: # 初始层和第一层不参与训练\n parameter.requires_grad_(False)\n if return_interm_layers: # 说明取数据的层\n return_layers = {\"layer1\": \"0\", \"layer2\": \"1\", \"layer3\": \"2\", \"layer4\": \"3\"}\n else:\n return_layers = {'layer4': \"0\"}\n self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) # 这个函数可以返回一个新模型,新模型的输出为指定层名的输出\n self.num_channels = num_channels\n\n def forward(self, tensor_list: NestedTensor):\n xs = self.body(tensor_list.tensors) # 输出\n out: Dict[str, NestedTensor] = {}\n for name, x in xs.items():\n m = tensor_list.mask\n assert m is not None\n mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]\n out[name] = NestedTensor(x, mask)\n return out\n\n\nclass Backbone(BackboneBase):\n \"\"\"ResNet backbone with frozen BatchNorm.\"\"\"\n def __init__(self, name: str,\n train_backbone: bool,\n return_interm_layers: bool,\n dilation: bool):\n backbone = getattr(torchvision.models, name)(\n replace_stride_with_dilation=[False, False, dilation],\n pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d)\n num_channels = 512 if name in ('resnet18', 'resnet34') else 2048\n super().__init__(backbone, train_backbone, num_channels, return_interm_layers)\n\n\nclass Joiner(nn.Sequential):\n def __init__(self, backbone, position_embedding):\n super().__init__(backbone, position_embedding)\n\n def forward(self, tensor_list: NestedTensor):\n xs = self[0](tensor_list) # boneNet输出\n out: List[NestedTensor] = []\n pos = []\n for name, x in xs.items():\n out.append(x)\n # position encoding\n pos.append(self[1](x).to(x.tensors.dtype)) # position embedding\n\n return out, pos\n\n\ndef build_backbone(args):\n position_embedding = build_position_encoding(args) #构建特征图像素坐标\n train_backbone = args.lr_backbone > 0 # 是否训练主干网络\n return_interm_layers = args.masks\n backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation) # 生成主干网络\n model = Joiner(backbone, position_embedding) # 将embedding与主函数融合\n model.num_channels = backbone.num_channels\n return model\n" ]
[ [ "torch.ones", "torch.zeros" ] ]
garciadias/k-means_on_apogee
[ "7c3315a0d305f255c121a015607e22e5a46bba82" ]
[ "src/create_dataset.py" ]
[ "\"\"\"Create csv with spectral data\"\"\"\nfrom os import getcwd\nfrom pathlib import Path\n\nfrom astropy.io import fits\nimport pandas as pd\n\nPROJECT_PATH = getcwd()\nSPECTRA = {}\nfor spectrum_path in Path('%s/data/fits/' % PROJECT_PATH).glob('*fits'):\n spectrum_fits = fits.open(spectrum_path)\n spectrum = spectrum_fits[1].data[0]\n SPECTRA[spectrum_fits[0].header['OBJID']] = spectrum\n Path(spectrum_path).unlink()\nwavelenght = spectrum_fits[4].data[0]\n\nall_spectra = pd.DataFrame(SPECTRA, index=wavelenght).T\n\nall_spectra.to_csv('%s/data/all_spectra.csv' % PROJECT_PATH)\n\nPath(PROJECT_PATH + '/models').mkdir(exist_ok=True)\n" ]
[ [ "pandas.DataFrame" ] ]
Vishal-V/federated
[ "2575ac3c571004ba554bd0c0d11c2e307ff22d57" ]
[ "tensorflow_federated/python/core/impl/executors/execution_context_test.py" ]
[ "# Lint as: python3\n# Copyright 2019, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport contextlib\n\nfrom absl.testing import absltest\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.api import computations\nfrom tensorflow_federated.python.core.api import intrinsics\nfrom tensorflow_federated.python.core.impl.compiler import type_factory\nfrom tensorflow_federated.python.core.impl.executors import execution_context\nfrom tensorflow_federated.python.core.impl.executors import executor_stacks\n\ntf.compat.v1.enable_v2_behavior()\n\n\[email protected]\ndef _execution_context(num_clients=None):\n executor_factory = executor_stacks.local_executor_factory(num_clients)\n yield execution_context.ExecutionContext(executor_factory)\n\n\nclass RetryableErrorTest(absltest.TestCase):\n\n def test_is_retryable_error(self):\n retryable_error = execution_context.RetryableError()\n self.assertTrue(execution_context._is_retryable_error(retryable_error))\n self.assertFalse(execution_context._is_retryable_error(TypeError()))\n self.assertFalse(execution_context._is_retryable_error(1))\n self.assertFalse(execution_context._is_retryable_error('a'))\n self.assertFalse(execution_context._is_retryable_error(None))\n\n\nclass ExecutionContextIntegrationTest(absltest.TestCase):\n\n def test_simple_no_arg_tf_computation_with_int_result(self):\n\n @computations.tf_computation\n def comp():\n return tf.constant(10)\n\n with _execution_context():\n result = comp()\n\n self.assertEqual(result, 10)\n\n def test_one_arg_tf_computation_with_int_param_and_result(self):\n\n @computations.tf_computation(tf.int32)\n def comp(x):\n return tf.add(x, 10)\n\n with _execution_context():\n result = comp(3)\n\n self.assertEqual(result, 13)\n\n def test_three_arg_tf_computation_with_int_params_and_result(self):\n\n @computations.tf_computation(tf.int32, tf.int32, tf.int32)\n def comp(x, y, z):\n return tf.multiply(tf.add(x, y), z)\n\n with _execution_context():\n result = comp(3, 4, 5)\n\n self.assertEqual(result, 35)\n\n def test_tf_computation_with_dataset_params_and_int_result(self):\n\n @computations.tf_computation(computation_types.SequenceType(tf.int32))\n def comp(ds):\n return ds.reduce(np.int32(0), lambda x, y: x + y)\n\n with _execution_context():\n ds = tf.data.Dataset.range(10).map(lambda x: tf.cast(x, tf.int32))\n result = comp(ds)\n\n self.assertEqual(result, 45)\n\n def test_tf_computation_with_structured_result(self):\n\n @computations.tf_computation\n def comp():\n return collections.OrderedDict([\n ('a', tf.constant(10)),\n ('b', tf.constant(20)),\n ])\n\n with _execution_context():\n result = comp()\n\n self.assertIsInstance(result, collections.OrderedDict)\n self.assertDictEqual(result, {'a': 10, 'b': 20})\n\n def test_with_temperature_sensor_example(self):\n\n @computations.tf_computation(\n computation_types.SequenceType(tf.float32), tf.float32)\n def count_over(ds, t):\n return ds.reduce(\n np.float32(0), lambda n, x: n + tf.cast(tf.greater(x, t), tf.float32))\n\n @computations.tf_computation(computation_types.SequenceType(tf.float32))\n def count_total(ds):\n return ds.reduce(np.float32(0.0), lambda n, _: n + 1.0)\n\n @computations.federated_computation(\n type_factory.at_clients(computation_types.SequenceType(tf.float32)),\n type_factory.at_server(tf.float32))\n def comp(temperatures, threshold):\n return intrinsics.federated_mean(\n intrinsics.federated_map(\n count_over,\n intrinsics.federated_zip(\n [temperatures,\n intrinsics.federated_broadcast(threshold)])),\n intrinsics.federated_map(count_total, temperatures))\n\n with _execution_context():\n to_float = lambda x: tf.cast(x, tf.float32)\n temperatures = [\n tf.data.Dataset.range(10).map(to_float),\n tf.data.Dataset.range(20).map(to_float),\n tf.data.Dataset.range(30).map(to_float),\n ]\n threshold = 15.0\n result = comp(temperatures, threshold)\n self.assertAlmostEqual(result, 8.333, places=3)\n\n num_clients = 3\n with _execution_context(num_clients):\n to_float = lambda x: tf.cast(x, tf.float32)\n temperatures = [\n tf.data.Dataset.range(10).map(to_float),\n tf.data.Dataset.range(20).map(to_float),\n tf.data.Dataset.range(30).map(to_float),\n ]\n threshold = 15.0\n result = comp(temperatures, threshold)\n self.assertAlmostEqual(result, 8.333, places=3)\n\n def test_changing_cardinalities_across_calls(self):\n\n @computations.federated_computation(type_factory.at_clients(tf.int32))\n def comp(x):\n return x\n\n five_ints = list(range(5))\n ten_ints = list(range(10))\n\n with _execution_context():\n five = comp(five_ints)\n ten = comp(ten_ints)\n\n self.assertEqual(five, five_ints)\n self.assertEqual(ten, ten_ints)\n\n def test_conflicting_cardinalities_within_call(self):\n\n @computations.federated_computation(\n [type_factory.at_clients(tf.int32),\n type_factory.at_clients(tf.int32)])\n def comp(x):\n return x\n\n five_ints = list(range(5))\n ten_ints = list(range(10))\n\n with _execution_context():\n with self.assertRaisesRegex(ValueError, 'Conflicting cardinalities'):\n comp([five_ints, ten_ints])\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "tensorflow.compat.v1.enable_v2_behavior", "tensorflow.constant", "tensorflow.greater", "numpy.int32", "tensorflow.cast", "tensorflow.add", "numpy.float32", "tensorflow.data.Dataset.range" ] ]
ronghanghu/cmn
[ "85644ad56f8f62d04a5e8636ad3efe9ef7b34705", "85644ad56f8f62d04a5e8636ad3efe9ef7b34705" ]
[ "util/im_processing.py", "models/visual7w_attention_model.py" ]
[ "from __future__ import absolute_import, division, print_function\n\nimport skimage.transform\nimport numpy as np\n\ndef rectify_bboxes(bboxes, height, width):\n bboxes = np.maximum(bboxes, 0)\n bboxes[:, 2:4] = np.maximum(bboxes[:, 0:2], bboxes[:, 2:4])\n bboxes[:, 0] = np.minimum(bboxes[:, 0], width-1)\n bboxes[:, 1] = np.minimum(bboxes[:, 1], height-1)\n bboxes[:, 2] = np.minimum(bboxes[:, 2], width-1)\n bboxes[:, 3] = np.minimum(bboxes[:, 3], height-1)\n return bboxes\n\ndef resize_and_pad(im, input_h, input_w):\n # Resize and pad im to input_h x input_w size\n im_h, im_w = im.shape[:2]\n scale = min(input_h / im_h, input_w / im_w)\n resized_h = int(np.round(im_h * scale))\n resized_w = int(np.round(im_w * scale))\n pad_h = int(np.floor(input_h - resized_h) / 2)\n pad_w = int(np.floor(input_w - resized_w) / 2)\n\n resized_im = skimage.transform.resize(im, [resized_h, resized_w])\n if im.ndim > 2:\n new_im = np.zeros((input_h, input_w, im.shape[2]), dtype=resized_im.dtype)\n else:\n new_im = np.zeros((input_h, input_w), dtype=resized_im.dtype)\n new_im[pad_h:pad_h+resized_h, pad_w:pad_w+resized_w, ...] = resized_im\n\n return new_im\n\ndef resize_and_crop(im, input_h, input_w):\n # Resize and crop im to input_h x input_w size\n im_h, im_w = im.shape[:2]\n scale = max(input_h / im_h, input_w / im_w)\n resized_h = int(np.round(im_h * scale))\n resized_w = int(np.round(im_w * scale))\n crop_h = int(np.floor(resized_h - input_h) / 2)\n crop_w = int(np.floor(resized_w - input_w) / 2)\n\n resized_im = skimage.transform.resize(im, [resized_h, resized_w])\n if im.ndim > 2:\n new_im = np.zeros((input_h, input_w, im.shape[2]), dtype=resized_im.dtype)\n else:\n new_im = np.zeros((input_h, input_w), dtype=resized_im.dtype)\n new_im[...] = resized_im[crop_h:crop_h+input_h, crop_w:crop_w+input_w, ...]\n\n return new_im\n\ndef crop_bboxes_subtract_mean(im, bboxes, crop_size, image_mean):\n if isinstance(bboxes, list):\n bboxes = np.array(bboxes)\n bboxes = bboxes.reshape((-1, 4))\n\n im = skimage.img_as_ubyte(im)\n num_bbox = bboxes.shape[0]\n imcrop_batch = np.zeros((num_bbox, crop_size, crop_size, 3), dtype=np.float32)\n for n_bbox in range(bboxes.shape[0]):\n xmin, ymin, xmax, ymax = bboxes[n_bbox]\n # crop and resize\n imcrop = im[ymin:ymax+1, xmin:xmax+1, :]\n imcrop_batch[n_bbox, ...] = skimage.img_as_ubyte(\n skimage.transform.resize(imcrop, [crop_size, crop_size]))\n imcrop_batch -= image_mean\n return imcrop_batch\n\ndef bboxes_from_masks(masks):\n if masks.ndim == 2:\n masks = masks[np.newaxis, ...]\n num_mask = masks.shape[0]\n bboxes = np.zeros((num_mask, 4), dtype=np.int32)\n for n_mask in range(num_mask):\n idx = np.nonzero(masks[n_mask])\n xmin, xmax = np.min(idx[1]), np.max(idx[1])\n ymin, ymax = np.min(idx[0]), np.max(idx[0])\n bboxes[n_mask, :] = [xmin, ymin, xmax, ymax]\n return bboxes\n\ndef crop_masks_subtract_mean(im, masks, crop_size, image_mean):\n if masks.ndim == 2:\n masks = masks[np.newaxis, ...]\n num_mask = masks.shape[0]\n\n im = skimage.img_as_ubyte(im)\n bboxes = bboxes_from_masks(masks)\n imcrop_batch = np.zeros((num_mask, crop_size, crop_size, 3), dtype=np.float32)\n for n_mask in range(num_mask):\n xmin, ymin, xmax, ymax = bboxes[n_mask]\n\n # crop and resize\n im_masked = im.copy()\n mask = masks[n_mask, ..., np.newaxis]\n im_masked *= mask\n im_masked += image_mean.astype(np.uint8) * (1 - mask)\n imcrop = im_masked[ymin:ymax+1, xmin:xmax+1, :]\n imcrop_batch[n_mask, ...] = skimage.img_as_ubyte(skimage.transform.resize(imcrop, [224, 224]))\n\n imcrop_batch -= image_mean\n return imcrop_batch\n", "from models import modules, fastrcnn_vgg_net, lstm_net\n\nimport tensorflow as tf\nfrom tensorflow import convert_to_tensor as to_T\n\ndef visual7w_attbilstm_net(input_batch, bbox_batch1, spatial_batch1,\n bbox_batch2, spatial_batch2, expr_obj, num_vocab, embed_dim, lstm_dim,\n vgg_dropout, lstm_dropout):\n # a sentence is parsed into [expr_obj1, expr_relation, expr_obj2]\n # bbox_batch1 has shape [N_batch*N1, 5] and\n # spatial_batch1 has shape [N_batch, N1, D_spatial] and\n # bbox_batch2 has shape [N2, 5] and\n # spatial_batch2 has shape [1, N2, D_spatial] and\n # expr_obj has shape [T, N_batch]\n # where N1 is the number of choices (= 4 in Visual 7W) and\n # N2 is the number of proposals (~ 300 for RPN in Faster RCNN)\n\n N_batch = tf.shape(spatial_batch1)[0]\n N1 = tf.shape(spatial_batch1)[1]\n N2 = tf.shape(spatial_batch2)[1]\n\n # Extract visual features\n vis_feat1 = fastrcnn_vgg_net.vgg_roi_fc7(input_batch,\n tf.reshape(bbox_batch1, [-1, 5]), \"vgg_local\",\n apply_dropout=vgg_dropout)\n D_vis = vis_feat1.get_shape().as_list()[-1]\n vis_feat1 = tf.reshape(vis_feat1, to_T([N_batch, N1, D_vis]))\n vis_feat1.set_shape([None, None, D_vis])\n\n # Reshape and tile vis_feat2 and spatial_batch2\n vis_feat2 = fastrcnn_vgg_net.vgg_roi_fc7(input_batch,\n tf.reshape(bbox_batch2, [-1, 5]), \"vgg_local\",\n apply_dropout=vgg_dropout, reuse=True)\n vis_feat2 = tf.reshape(vis_feat2, to_T([1, N2, D_vis]))\n vis_feat2 = tf.tile(vis_feat2, to_T([N_batch, 1, 1]))\n vis_feat2.set_shape([None, None, D_vis])\n spatial_batch2 = tf.tile(spatial_batch2, to_T([N_batch, 1, 1]))\n\n # Extract representation using attention\n lang_obj1, lang_obj2, lang_relation = lstm_net.attbilstm(\n expr_obj, \"lstm\", num_vocab=num_vocab, embed_dim=embed_dim,\n lstm_dim=lstm_dim, apply_dropout=lstm_dropout)\n\n # Score for each bounding box matching the first object\n # scores_obj1 has shape [N_batch, N1, 1]\n scores_obj1 = modules.localization_module_batch_score(vis_feat1,\n spatial_batch1, lang_obj1)\n # Score for each bounding box matching the second object\n # scores_obj2 has shape [N_batch, N2, 1]\n scores_obj2 = modules.localization_module_batch_score(vis_feat2,\n spatial_batch2, lang_obj2, reuse=True)\n\n # Scores for each pair of bounding box matching the relationship\n # Tile the scores by broadcasting add\n # scores_rel has shape [N_batch, N1, N2, 1]\n scores_rel = modules.relationship_module_spatial_only_batch_score(\n spatial_batch1, scores_obj1, spatial_batch2, scores_obj2, lang_relation,\n rescale_scores=True)\n # marginal_scores has shape [N_batch, N1, 1]\n tf.add_to_collection(\"s_pair\", scores_rel)\n\n marginal_scores = tf.reduce_max(scores_rel, reduction_indices=2)\n final_scores = tf.reshape(marginal_scores, to_T([N_batch, -1]))\n\n return final_scores\n" ]
[ [ "numpy.maximum", "numpy.minimum", "numpy.nonzero", "numpy.min", "numpy.round", "numpy.max", "numpy.floor", "numpy.array", "numpy.zeros" ], [ "tensorflow.convert_to_tensor", "tensorflow.reduce_max", "tensorflow.shape", "tensorflow.reshape", "tensorflow.add_to_collection" ] ]
PrathikShirolkar/AutomaticImageColization
[ "981a011cbd32f741668738cafc1dd9ed44965402" ]
[ "viewFile.py" ]
[ "from tensorflow.python import pywrap_tensorflow\ncheckpoint_path = 'tmodel.ckpt-100'\n#checkpoint_path = \"deeplab_resnet_init.ckpt\"\nreader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)\nvar_to_shape_map = reader.get_variable_to_shape_map()\nfor key in var_to_shape_map:\n print(\"tensor_name: \", key)\n print(reader.get_tensor(key))\n" ]
[ [ "tensorflow.python.pywrap_tensorflow.NewCheckpointReader" ] ]
skn123/FAST
[ "d66522260bf65c5ab74d75050131d5a353cbf602" ]
[ "source/FAST/Examples/Python/convert_video_to_image_frames.py" ]
[ "## @example convert_video_to_image_frames.py\n# This example loads a video and converts to a stream of image frames and display the\n# individual frames with matplotlib.\n#\n# Note that additional dependencies are required to stream videos in FAST:\n# Linux: sudo apt install ubuntu-restricted-extras libgstreamer1.0-dev libgstreamer-plugins-bad1.0-dev libgstreamer-plugins-base1.0-dev libgstreamer-plugins-good1.0-dev\n# Windows: K-lite codec pack https://codecguide.com/download_kl.htm\nimport fast\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#fast.Reporter.setGlobalReportMethod(fast.Reporter.COUT) # Uncomment to show debug info\n\nfast.downloadTestDataIfNotExists() # This will download the test data needed to run the example\n\nstreamer = fast.MovieStreamer.New()\nstreamer.setFilename(fast.Config.getTestDataPath() + 'US/sagittal_spine.avi')\n\ndataChannel = streamer.getOutputPort()\nstreamer.update() # Start pipeline\n\nframe_list = []\ncounter = 0\nwhile True:\n frame = dataChannel.getNextImage()\n counter += 1\n if frame.isLastFrame():\n break\n\n # Only show every X frame\n if counter % 20 == 0: frame_list.append((np.asarray(frame), counter))\n\n if len(frame_list) == 9:\n # Display the 9 last frames\n f, axes = plt.subplots(3,3, figsize=(10,10))\n for i in range(3):\n for j in range(3):\n axes[j, i].set_title('Frame: ' + str(frame_list[i + j*3][1]))\n axes[j, i].imshow(frame_list[i + j*3][0][..., 0], cmap='gray')\n plt.show()\n frame_list.clear()\n" ]
[ [ "numpy.asarray", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
shankharaj29/tensor2tensor
[ "5a867d031bd493eeb7d2776e1118d1594ff0a623", "5a867d031bd493eeb7d2776e1118d1594ff0a623", "5a867d031bd493eeb7d2776e1118d1594ff0a623" ]
[ "tensor2tensor/models/video/savp.py", "tensor2tensor/bin/t2t_attack.py", "tensor2tensor/data_generators/algorithmic.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Stochastic Adversarial Video Prediction model.\n\nReference: https://arxiv.org/abs/1804.01523\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport numbers\nimport numpy as np\n\nfrom tensor2tensor.layers import common_layers\nfrom tensor2tensor.layers import common_video\nfrom tensor2tensor.models.video import savp_params # pylint: disable=unused-import\nfrom tensor2tensor.models.video import sv2p\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import update_ops_hook\n\nimport tensorflow as tf\n\ngan_losses = tf.contrib.gan.losses.wargs\n\n\nclass NextFrameSavpBase(object):\n \"\"\"Main function for Stochastic Adversarial Video Prediction.\"\"\"\n\n def encoder(self, inputs, n_layers=3):\n \"\"\"Convnet that encodes inputs into mean and std of a gaussian.\n\n Args:\n inputs: 5-D Tensor, shape (batch_size, num_frames, width, height, channels)\n n_layers: Number of layers.\n\n Returns:\n z_mu: Mean of the latent gaussians.\n z_log_var: log(var) of the latent gaussians.\n\n Raises:\n ValueError: If inputs is not a 5-D tensor or not float32.\n \"\"\"\n latent_dims = self.hparams.z_dim\n\n shape_as_list = inputs.shape.as_list()\n if len(shape_as_list) != 5:\n raise ValueError(\"Expected inputs to be a 5-D, got %d\" %\n len(shape_as_list))\n if inputs.dtype != tf.float32:\n raise ValueError(\"Expected dtype tf.float32, got %s\" % inputs.dtype)\n\n # Flatten (N,T,W,H,C) into (NT,W,H,C)\n batch_size, _ = shape_as_list[:2]\n inputs = tf.reshape(inputs, [-1] + list(inputs.shape)[2:])\n n_filters = 64\n rectified = None\n\n # Applies 3 layer conv-net with padding, instance normalization\n # and leaky relu as per the encoder in\n # https://github.com/alexlee-gk/video_prediction\n padding = [[0, 0], [1, 1], [1, 1], [0, 0]]\n for i in range(n_layers):\n with tf.variable_scope(\"layer_%d\" % (i + 1)):\n n_filters *= 2**i\n if i:\n padded = tf.pad(rectified, padding)\n else:\n padded = tf.pad(inputs, padding)\n convolved = tf.layers.conv2d(padded, filters=n_filters, kernel_size=4,\n strides=2, padding=\"VALID\")\n normalized = tf.contrib.layers.instance_norm(convolved)\n rectified = tf.nn.leaky_relu(normalized, alpha=0.2)\n\n # Mean pooling across all spatial dimensions.\n pooled = tf.nn.avg_pool(\n rectified, [1] + rectified.shape[1:3].as_list() + [1],\n strides=[1, 1, 1, 1], padding=\"VALID\")\n squeezed = tf.squeeze(pooled, [1, 2])\n\n # Down-project and output the mean and log of the standard deviation of\n # the latents.\n with tf.variable_scope(\"z_mu\"):\n z_mu = tf.layers.dense(squeezed, latent_dims)\n with tf.variable_scope(\"z_log_sigma_sq\"):\n z_log_var = tf.layers.dense(squeezed, latent_dims)\n z_log_var = tf.clip_by_value(z_log_var, -10, 10)\n\n # Reshape to (batch_size X num_frames X latent_dims)\n z_mu = tf.reshape(z_mu, (batch_size, -1, latent_dims))\n z_log_var = tf.reshape(\n z_log_var, (batch_size, -1, latent_dims))\n return z_mu, z_log_var\n\n def expected_output_shape(self, input_shape, stride, padding, kernel_size):\n return (input_shape + 2*padding - kernel_size) // stride + 1\n\n def get_fc_dimensions(self, strides, kernel_sizes):\n \"\"\"Get expected fully connected shape after a series of convolutions.\"\"\"\n output_height, output_width, _ = self.hparams.problem.frame_shape\n output_steps = self.hparams.video_num_target_frames\n output_shape = np.array([output_steps, output_height, output_width])\n for curr_stride, kernel_size in zip(strides, kernel_sizes):\n output_shape = self.expected_output_shape(\n output_shape, np.array(curr_stride), 1, kernel_size)\n return np.prod(output_shape) * self.hparams.num_discriminator_filters * 8\n\n def discriminator(self, frames):\n \"\"\"3-D SNGAN discriminator.\n\n Args:\n frames: a list of batch-major tensors indexed by time.\n\n Returns:\n logits: 1-D Tensor with shape=batch_size.\n Positive logits imply that the discriminator thinks that it\n belongs to the true class.\n \"\"\"\n ndf = self.hparams.num_discriminator_filters\n frames = tf.stack(frames)\n\n # Switch from time-major axis to batch-major axis.\n frames = common_video.swap_time_and_batch_axes(frames)\n\n # 3-D Conv-net mapping inputs to activations.\n num_outputs = [ndf, ndf*2, ndf*2, ndf*4, ndf*4, ndf*8, ndf*8]\n kernel_sizes = [3, 4, 3, 4, 3, 4, 3]\n strides = [[1, 1, 1], [1, 2, 2], [1, 1, 1], [1, 2, 2], [1, 1, 1],\n [2, 2, 2], [1, 1, 1]]\n\n names = [\"video_sn_conv0_0\", \"video_sn_conv0_1\", \"video_sn_conv1_0\",\n \"video_sn_conv1_1\", \"video_sn_conv2_0\", \"video_sn_conv2_1\",\n \"video_sn_conv3_0\"]\n iterable = zip(num_outputs, kernel_sizes, strides, names)\n activations = frames\n for num_filters, kernel_size, stride, name in iterable:\n activations = self.pad_conv3d_lrelu(activations, num_filters, kernel_size,\n stride, name)\n num_fc_dimensions = self.get_fc_dimensions(strides, kernel_sizes)\n activations = tf.reshape(activations, (-1, num_fc_dimensions))\n return tf.squeeze(tf.layers.dense(activations, 1))\n\n def d_step(self, true_frames, gen_frames):\n \"\"\"Performs the discriminator step in computing the GAN loss.\n\n Applies stop-gradient to the generated frames while computing the\n discriminator loss to make sure that the gradients are not back-propagated\n to the generator. This makes sure that only the discriminator is updated.\n\n Args:\n true_frames: True outputs\n gen_frames: Generated frames.\n Returns:\n d_loss: Loss component due to the discriminator.\n \"\"\"\n hparam_to_disc_loss = {\n \"least_squares\": gan_losses.least_squares_discriminator_loss,\n \"cross_entropy\": gan_losses.modified_discriminator_loss,\n \"wasserstein\": gan_losses.wasserstein_discriminator_loss}\n\n # Concat across batch-axis.\n _, batch_size, _, _, _ = common_layers.shape_list(true_frames)\n all_frames = tf.concat(\n [true_frames, tf.stop_gradient(gen_frames)], axis=1)\n\n all_logits = self.discriminator(all_frames)\n true_logits, fake_logits_stop = \\\n all_logits[:batch_size], all_logits[batch_size:]\n mean_true_logits = tf.reduce_mean(true_logits)\n tf.summary.scalar(\"mean_true_logits\", mean_true_logits)\n\n mean_fake_logits_stop = tf.reduce_mean(fake_logits_stop)\n tf.summary.scalar(\"mean_fake_logits_stop\", mean_fake_logits_stop)\n\n discriminator_loss_func = hparam_to_disc_loss[self.hparams.gan_loss]\n gan_d_loss = discriminator_loss_func(\n discriminator_real_outputs=true_logits,\n discriminator_gen_outputs=fake_logits_stop,\n add_summaries=True)\n return gan_d_loss, true_logits, fake_logits_stop\n\n def g_step(self, gen_frames, fake_logits_stop):\n \"\"\"Performs the generator step in computing the GAN loss.\n\n Args:\n gen_frames: Generated frames\n fake_logits_stop: Logits corresponding to the generated frames as per\n the discriminator. Assumed to have a stop-gradient term.\n Returns:\n gan_g_loss_pos_d: Loss.\n gan_g_loss_neg_d: -gan_g_loss_pos_d but with a stop gradient on generator.\n \"\"\"\n hparam_to_gen_loss = {\n \"least_squares\": gan_losses.least_squares_generator_loss,\n \"cross_entropy\": gan_losses.modified_generator_loss,\n \"wasserstein\": gan_losses.wasserstein_generator_loss\n }\n\n fake_logits = self.discriminator(gen_frames)\n mean_fake_logits = tf.reduce_mean(fake_logits)\n tf.summary.scalar(\"mean_fake_logits\", mean_fake_logits)\n\n # Generator loss.\n # Using gan_g_loss_pos_d updates the discriminator as well.\n # To avoid this add gan_g_loss_neg_d = -gan_g_loss_pos_d\n # but with stop gradient on the generator.\n # This makes sure that the net gradient on the discriminator is zero and\n # net-gradient on the generator is just due to the gan_g_loss_pos_d.\n generator_loss_func = hparam_to_gen_loss[self.hparams.gan_loss]\n gan_g_loss_pos_d = generator_loss_func(\n discriminator_gen_outputs=fake_logits, add_summaries=True)\n gan_g_loss_neg_d = -generator_loss_func(\n discriminator_gen_outputs=fake_logits_stop, add_summaries=True)\n return gan_g_loss_pos_d, gan_g_loss_neg_d\n\n def get_gan_loss(self, true_frames, gen_frames, name):\n \"\"\"Get the discriminator + generator loss at every step.\n\n This performs an 1:1 update of the discriminator and generator at every\n step.\n\n Args:\n true_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)\n Assumed to be ground truth.\n gen_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)\n Assumed to be fake.\n name: discriminator scope.\n Returns:\n loss: 0-D Tensor, with d_loss + g_loss\n \"\"\"\n # D - STEP\n with tf.variable_scope(\"%s_discriminator\" % name, reuse=tf.AUTO_REUSE):\n gan_d_loss, _, fake_logits_stop = self.d_step(\n true_frames, gen_frames)\n\n # G - STEP\n with tf.variable_scope(\"%s_discriminator\" % name, reuse=True):\n gan_g_loss_pos_d, gan_g_loss_neg_d = self.g_step(\n gen_frames, fake_logits_stop)\n gan_g_loss = gan_g_loss_pos_d + gan_g_loss_neg_d\n tf.summary.scalar(\"gan_loss_%s\" % name, gan_g_loss_pos_d + gan_d_loss)\n\n if self.hparams.gan_optimization == \"joint\":\n gan_loss = gan_g_loss + gan_d_loss\n else:\n curr_step = self.get_iteration_num()\n gan_loss = tf.cond(\n tf.logical_not(curr_step % 2 == 0), lambda: gan_g_loss,\n lambda: gan_d_loss)\n return gan_loss\n\n def get_extra_loss(self, latent_means=None, latent_stds=None,\n true_frames=None, gen_frames=None):\n \"\"\"Gets extra loss from VAE and GAN.\"\"\"\n if not self.is_training:\n return 0.0\n\n vae_loss, d_vae_loss, d_gan_loss = 0.0, 0.0, 0.0\n # Use sv2p's KL divergence computation.\n if self.hparams.use_vae:\n vae_loss = super(NextFrameSavpBase, self).get_extra_loss(\n latent_means=latent_means, latent_stds=latent_stds)\n\n if self.hparams.use_gan:\n # Strip out the first context_frames for the true_frames\n # Strip out the first context_frames - 1 for the gen_frames\n context_frames = self.hparams.video_num_input_frames\n true_frames = tf.stack(\n tf.unstack(true_frames, axis=0)[context_frames:])\n\n # discriminator for VAE.\n if self.hparams.use_vae:\n gen_enc_frames = tf.stack(\n tf.unstack(gen_frames, axis=0)[context_frames-1:])\n d_vae_loss = self.get_gan_loss(true_frames, gen_enc_frames, name=\"vae\")\n\n # discriminator for GAN.\n gen_prior_frames = tf.stack(\n tf.unstack(self.gen_prior_video, axis=0)[context_frames-1:])\n d_gan_loss = self.get_gan_loss(true_frames, gen_prior_frames, name=\"gan\")\n\n return (\n vae_loss + self.hparams.gan_loss_multiplier * d_gan_loss +\n self.hparams.gan_vae_loss_multiplier * d_vae_loss)\n\n def pad_conv3d_lrelu(self, activations, n_filters, kernel_size, strides,\n scope):\n \"\"\"Pad, apply 3-D convolution and leaky relu.\"\"\"\n padding = [[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]\n\n # tf.nn.conv3d accepts a list of 5 values for strides\n # with first and last value equal to 1\n if isinstance(strides, numbers.Integral):\n strides = [strides] * 3\n strides = [1] + strides + [1]\n\n # Filter_shape = [K, K, K, num_input, num_output]\n filter_shape = (\n [kernel_size]*3 + activations.shape[-1:].as_list() + [n_filters])\n\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n conv_filter = tf.get_variable(\n \"conv_filter\", shape=filter_shape,\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n if self.hparams.use_spectral_norm:\n conv_filter, assign_op = common_layers.apply_spectral_norm(conv_filter)\n if self.is_training:\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, assign_op)\n\n padded = tf.pad(activations, padding)\n convolved = tf.nn.conv3d(\n padded, conv_filter, strides=strides, padding=\"VALID\")\n rectified = tf.nn.leaky_relu(convolved, alpha=0.2)\n return rectified\n\n @staticmethod\n def train_hooks(hook_context):\n del hook_context\n return [update_ops_hook.UpdateOpsHook()]\n\n\[email protected]_model\nclass NextFrameSAVP(NextFrameSavpBase, sv2p.NextFrameSv2pLegacy):\n \"\"\"Stochastic Adversarial Video Prediction.\"\"\"\n\n def construct_model(self, images, actions, rewards):\n \"\"\"Model that takes in images and returns predictions.\n\n Args:\n images: list of 4-D Tensors indexed by time.\n (batch_size, width, height, channels)\n actions: list of action tensors\n each action should be in the shape ?x1xZ\n rewards: list of reward tensors\n each reward should be in the shape ?x1xZ\n\n Returns:\n video: list of 4-D predicted frames.\n all_rewards: predicted rewards.\n latent_means: list of gaussian means conditioned on the input at\n every frame.\n latent_stds: list of gaussian stds conditioned on the input at\n every frame.\n\n Raises:\n ValueError: If not exactly one of self.hparams.vae or self.hparams.gan\n is set to True.\n \"\"\"\n if not self.hparams.use_vae and not self.hparams.use_gan:\n raise ValueError(\"Set at least one of use_vae or use_gan to be True\")\n if self.hparams.gan_optimization not in [\"joint\", \"sequential\"]:\n raise ValueError(\"self.hparams.gan_optimization should be either joint \"\n \"or sequential got %s\" % self.hparams.gan_optimization)\n\n images = tf.unstack(images, axis=0)\n actions = tf.unstack(actions, axis=0)\n rewards = tf.unstack(rewards, axis=0)\n\n latent_dims = self.hparams.z_dim\n context_frames = self.hparams.video_num_input_frames\n seq_len = len(images)\n input_shape = common_layers.shape_list(images[0])\n batch_size = input_shape[0]\n\n # Model does not support reward-conditioned frame generation.\n fake_rewards = rewards[:-1]\n\n # Concatenate x_{t-1} and x_{t} along depth and encode it to\n # produce the mean and standard deviation of z_{t-1}\n image_pairs = tf.concat([images[:seq_len - 1],\n images[1:seq_len]], axis=-1)\n\n z_mu, z_log_sigma_sq = self.encoder(image_pairs)\n # Unstack z_mu and z_log_sigma_sq along the time dimension.\n z_mu = tf.unstack(z_mu, axis=0)\n z_log_sigma_sq = tf.unstack(z_log_sigma_sq, axis=0)\n iterable = zip(images[:-1], actions[:-1], fake_rewards,\n z_mu, z_log_sigma_sq)\n\n # Initialize LSTM State\n lstm_state = [None] * 7\n gen_cond_video, gen_prior_video, all_rewards, latent_means, latent_stds = \\\n [], [], [], [], []\n pred_image = tf.zeros_like(images[0])\n prior_latent_state, cond_latent_state = None, None\n train_mode = self.hparams.mode == tf.estimator.ModeKeys.TRAIN\n\n # Create scheduled sampling function\n ss_func = self.get_scheduled_sample_func(batch_size)\n\n with tf.variable_scope(\"prediction\", reuse=tf.AUTO_REUSE):\n\n for step, (image, action, reward, mu, log_sigma_sq) in enumerate(iterable): # pylint:disable=line-too-long\n # Sample latents using a gaussian centered at conditional mu and std.\n latent = common_video.get_gaussian_tensor(mu, log_sigma_sq)\n\n # Sample prior latents from isotropic normal distribution.\n prior_latent = tf.random_normal(tf.shape(latent), dtype=tf.float32)\n\n # LSTM that encodes correlations between conditional latents.\n # Pg 22 in https://arxiv.org/pdf/1804.01523.pdf\n enc_cond_latent, cond_latent_state = common_video.basic_lstm(\n latent, cond_latent_state, latent_dims, name=\"cond_latent\")\n\n # LSTM that encodes correlations between prior latents.\n enc_prior_latent, prior_latent_state = common_video.basic_lstm(\n prior_latent, prior_latent_state, latent_dims, name=\"prior_latent\")\n\n # Scheduled Sampling\n done_warm_start = step > context_frames - 1\n groundtruth_items = [image]\n generated_items = [pred_image]\n input_image, = self.get_scheduled_sample_inputs(\n done_warm_start, groundtruth_items, generated_items, ss_func)\n\n all_latents = tf.concat([enc_cond_latent, enc_prior_latent], axis=0)\n all_image = tf.concat([input_image, input_image], axis=0)\n all_action = tf.concat([action, action], axis=0)\n all_rewards = tf.concat([reward, reward], axis=0)\n\n all_pred_images, lstm_state, _ = self.construct_predictive_tower(\n all_image, all_rewards, all_action, lstm_state, all_latents,\n concat_latent=True)\n\n cond_pred_images, prior_pred_images = \\\n all_pred_images[:batch_size], all_pred_images[batch_size:]\n\n if train_mode and self.hparams.use_vae:\n pred_image = cond_pred_images\n else:\n pred_image = prior_pred_images\n\n gen_cond_video.append(cond_pred_images)\n gen_prior_video.append(prior_pred_images)\n latent_means.append(mu)\n latent_stds.append(log_sigma_sq)\n\n gen_cond_video = tf.stack(gen_cond_video, axis=0)\n self.gen_prior_video = tf.stack(gen_prior_video, axis=0)\n fake_rewards = tf.stack(fake_rewards, axis=0)\n\n if train_mode and self.hparams.use_vae:\n return gen_cond_video, fake_rewards, latent_means, latent_stds\n else:\n return self.gen_prior_video, fake_rewards, latent_means, latent_stds\n\n\[email protected]_model\nclass NextFrameSavpRl(NextFrameSavpBase, sv2p.NextFrameSv2p):\n \"\"\"Stochastic Adversarial Video Prediction for RL pipeline.\"\"\"\n\n def video_features(\n self, all_frames, all_actions, all_rewards, all_raw_frames):\n \"\"\"No video wide feature.\"\"\"\n del all_actions, all_rewards, all_raw_frames\n # Concatenate x_{t-1} and x_{t} along depth and encode it to\n # produce the mean and standard deviation of z_{t-1}\n seq_len = len(all_frames)\n image_pairs = tf.concat([all_frames[:seq_len-1],\n all_frames[1:seq_len]], axis=-1)\n z_mu, z_log_sigma_sq = self.encoder(image_pairs)\n # Unstack z_mu and z_log_sigma_sq along the time dimension.\n z_mu = tf.unstack(z_mu, axis=0)\n z_log_sigma_sq = tf.unstack(z_log_sigma_sq, axis=0)\n return [z_mu, z_log_sigma_sq]\n\n def video_extra_loss(self, frames_predicted, frames_target,\n internal_states, video_features):\n\n if not self.is_training:\n return 0.0\n\n latent_means, latent_stds = video_features\n true_frames, gen_frames = frames_target, frames_predicted\n\n loss = super(NextFrameSavpRl, self).get_extra_loss(\n latent_means=latent_means, latent_stds=latent_stds,\n true_frames=true_frames, gen_frames=gen_frames)\n return loss\n\n def next_frame(self, frames, actions, rewards, target_frame,\n internal_states, video_features):\n del target_frame\n\n if not self.hparams.use_vae or self.hparams.use_gan:\n raise NotImplementedError(\"Only supporting VAE for now.\")\n\n if self.has_pred_actions or self.has_values:\n raise NotImplementedError(\"Parameter sharing with policy not supported.\")\n\n image, action, reward = frames[0], actions[0], rewards[0]\n latent_dims = self.hparams.z_dim\n batch_size = common_layers.shape_list(image)[0]\n\n if internal_states is None:\n # Initialize LSTM State\n frame_index = 0\n lstm_state = [None] * 7\n cond_latent_state, prior_latent_state = None, None\n gen_prior_video = []\n else:\n (frame_index, lstm_state, cond_latent_state,\n prior_latent_state, gen_prior_video) = internal_states\n\n z_mu, log_sigma_sq = video_features\n z_mu, log_sigma_sq = z_mu[frame_index], log_sigma_sq[frame_index]\n\n # Sample latents using a gaussian centered at conditional mu and std.\n latent = common_video.get_gaussian_tensor(z_mu, log_sigma_sq)\n\n # Sample prior latents from isotropic normal distribution.\n prior_latent = tf.random_normal(tf.shape(latent), dtype=tf.float32)\n\n # # LSTM that encodes correlations between conditional latents.\n # # Pg 22 in https://arxiv.org/pdf/1804.01523.pdf\n enc_cond_latent, cond_latent_state = common_video.basic_lstm(\n latent, cond_latent_state, latent_dims, name=\"cond_latent\")\n\n # LSTM that encodes correlations between prior latents.\n enc_prior_latent, prior_latent_state = common_video.basic_lstm(\n prior_latent, prior_latent_state, latent_dims, name=\"prior_latent\")\n\n all_latents = tf.concat([enc_cond_latent, enc_prior_latent], axis=0)\n all_image = tf.concat([image, image], 0)\n all_action = tf.concat([action, action], 0) if self.has_actions else None\n\n all_pred_images, lstm_state = self.construct_predictive_tower(\n all_image, None, all_action, lstm_state, all_latents,\n concat_latent=True)\n\n cond_pred_images, prior_pred_images = \\\n all_pred_images[:batch_size], all_pred_images[batch_size:]\n\n if self.is_training and self.hparams.use_vae:\n pred_image = cond_pred_images\n else:\n pred_image = prior_pred_images\n\n gen_prior_video.append(prior_pred_images)\n internal_states = (frame_index + 1, lstm_state, cond_latent_state,\n prior_latent_state, gen_prior_video)\n\n if not self.has_rewards:\n return pred_image, None, 0.0, internal_states\n\n pred_reward = self.reward_prediction(\n pred_image, action, reward, latent)\n return pred_image, pred_reward, None, None, 0.0, internal_states\n", "# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Adversarially attack a model.\n\nThis script adversarially attacks a model and evaluates accuracy at various\n epsilons.\n\nParams such as which epsilons to evaluate at and the attack algorithm are\n specified by attack_params, see models/resnet.py for examples.\n\n--ignore_incorrect will only attack those examples that are already correctly\n classified by the model.\n\n--surrogate_attack will attack a model (A) and evaluate adversarial examples for\n A on a different model (B).\n\nExample run:\n- train a resnet on cifar10:\n bin/t2t_trainer.py --problem=image_cifar10 --hparams_set=resnet_cifar_32 \\\n --model=resnet\n\n- evaluate robustness using the FGSM attack:\n bin/t2t_attack.py --attack_params_set=resnet_fgsm --problem=image_cifar10\\\n --hparams_set=resnet_cifar_32 --model=resnet\n\"\"\"\n\nimport os\n\nfrom tensor2tensor.bin import t2t_trainer\nfrom tensor2tensor.data_generators import problem as problem_lib # pylint: disable=unused-import\nfrom tensor2tensor.utils import adv_attack_utils\nfrom tensor2tensor.utils import cloud_mlengine\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import t2t_model\nfrom tensor2tensor.utils import trainer_lib\nfrom tensor2tensor.utils import usr_dir\n\nimport tensorflow as tf\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\n# See flags.py for additional command-line flags.\nflags.DEFINE_string(\"attack_params_set\", None,\n \"Which attack parameters to use.\")\nflags.DEFINE_boolean(\"surrogate_attack\", False,\n \"Perform an attack on a surrogate model.\")\nflags.DEFINE_string(\"surrogate_model\", None, \"Surrogate model to attack.\")\nflags.DEFINE_string(\"surrogate_hparams_set\", None,\n \"Surrogate model's hyperparameter set.\")\nflags.DEFINE_string(\"surrogate_output_dir\", None,\n \"Directory storing surrogate model's weights.\")\nflags.DEFINE_boolean(\n \"ignore_incorrect\", False, \"Ignore examples that are \"\n \"incorrectly classified to begin with.\")\n\n\ndef create_attack_params():\n return registry.attack_params(FLAGS.attack_params_set)\n\n\ndef create_attack(attack):\n return registry.attack(attack)\n\n\ndef create_surrogate_hparams():\n return trainer_lib.create_hparams(FLAGS.surrogate_hparams_set, None)\n\n\ndef create_surrogate_run_config(hp):\n \"\"\"Create a run config.\n\n Args:\n hp: model hyperparameters\n Returns:\n a run config\n \"\"\"\n save_ckpt_steps = max(FLAGS.iterations_per_loop, FLAGS.local_eval_frequency)\n save_ckpt_secs = FLAGS.save_checkpoints_secs or None\n if save_ckpt_secs:\n save_ckpt_steps = None\n assert FLAGS.surrogate_output_dir\n # the various custom getters we have written do not play well together yet.\n # TODO(noam): ask rsepassi for help here.\n daisy_chain_variables = (\n hp.daisy_chain_variables and hp.activation_dtype == \"float32\" and\n hp.weight_dtype == \"float32\")\n return trainer_lib.create_run_config(\n model_name=FLAGS.model,\n model_dir=os.path.expanduser(FLAGS.surrogate_output_dir),\n master=FLAGS.master,\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.tpu_num_shards,\n log_device_placement=FLAGS.log_device_placement,\n save_checkpoints_steps=save_ckpt_steps,\n save_checkpoints_secs=save_ckpt_secs,\n keep_checkpoint_max=FLAGS.keep_checkpoint_max,\n keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours,\n num_gpus=FLAGS.worker_gpu,\n gpu_order=FLAGS.gpu_order,\n num_async_replicas=FLAGS.worker_replicas,\n gpu_mem_fraction=FLAGS.worker_gpu_memory_fraction,\n enable_graph_rewriter=FLAGS.enable_graph_rewriter,\n use_tpu=FLAGS.use_tpu,\n schedule=FLAGS.schedule,\n no_data_parallelism=hp.no_data_parallelism,\n daisy_chain_variables=daisy_chain_variables,\n ps_replicas=FLAGS.ps_replicas,\n ps_job=FLAGS.ps_job,\n ps_gpu=FLAGS.ps_gpu,\n sync=FLAGS.sync,\n worker_id=FLAGS.worker_id,\n worker_job=FLAGS.worker_job,\n random_seed=FLAGS.random_seed,\n tpu_infeed_sleep_secs=FLAGS.tpu_infeed_sleep_secs,\n inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,\n log_step_count_steps=FLAGS.log_step_count_steps,\n intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads)\n\n\ndef prepare_data(problem, hparams, params, config):\n \"\"\"Construct input pipeline.\"\"\"\n input_fn = problem.make_estimator_input_fn(\n tf.estimator.ModeKeys.EVAL, hparams, force_repeat=True)\n dataset = input_fn(params, config)\n features, _ = dataset.make_one_shot_iterator().get_next()\n inputs, labels = features[\"targets\"], features[\"inputs\"]\n inputs = tf.to_float(inputs)\n input_shape = inputs.shape.as_list()\n inputs = tf.reshape(inputs, [hparams.batch_size] + input_shape[1:])\n labels = tf.reshape(labels, [hparams.batch_size])\n return inputs, labels, features\n\n\ndef main(argv):\n tf.logging.set_verbosity(tf.logging.INFO)\n trainer_lib.set_random_seed(FLAGS.random_seed)\n usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)\n t2t_trainer.maybe_log_registry_and_exit()\n\n\n if FLAGS.cloud_mlengine:\n cloud_mlengine.launch()\n return\n\n if FLAGS.generate_data:\n t2t_trainer.generate_data()\n\n if cloud_mlengine.job_dir():\n FLAGS.output_dir = cloud_mlengine.job_dir()\n\n if argv:\n t2t_trainer.set_hparams_from_args(argv[1:])\n\n if FLAGS.surrogate_attack:\n tf.logging.warn(\"Performing surrogate model attack.\")\n sur_hparams = create_surrogate_hparams()\n trainer_lib.add_problem_hparams(sur_hparams, FLAGS.problem)\n\n hparams = t2t_trainer.create_hparams()\n trainer_lib.add_problem_hparams(hparams, FLAGS.problem)\n\n attack_params = create_attack_params()\n attack_params.add_hparam(attack_params.epsilon_name, 0.0)\n\n if FLAGS.surrogate_attack:\n sur_config = create_surrogate_run_config(sur_hparams)\n config = t2t_trainer.create_run_config(hparams)\n params = {\n \"batch_size\": hparams.batch_size,\n \"use_tpu\": FLAGS.use_tpu,\n }\n\n # add \"_rev\" as a hack to avoid image standardization\n problem = registry.problem(FLAGS.problem + \"_rev\")\n\n inputs, labels, features = prepare_data(problem, hparams, params, config)\n\n sess = tf.Session()\n\n if FLAGS.surrogate_attack:\n sur_model_fn = t2t_model.T2TModel.make_estimator_model_fn(\n FLAGS.surrogate_model, sur_hparams, use_tpu=FLAGS.use_tpu)\n sur_ch_model = adv_attack_utils.T2TAttackModel(\n sur_model_fn, features, params, sur_config, scope=\"surrogate\")\n # Dummy call to construct graph\n sur_ch_model.get_probs(inputs)\n\n checkpoint_path = os.path.expanduser(FLAGS.surrogate_output_dir)\n tf.train.init_from_checkpoint(\n tf.train.latest_checkpoint(checkpoint_path), {\"/\": \"surrogate/\"})\n sess.run(tf.global_variables_initializer())\n\n other_vars = set(tf.global_variables())\n\n model_fn = t2t_model.T2TModel.make_estimator_model_fn(\n FLAGS.model, hparams)\n ch_model = adv_attack_utils.T2TAttackModel(model_fn, features, params, config)\n\n acc_mask = None\n probs = ch_model.get_probs(inputs)\n if FLAGS.ignore_incorrect:\n preds = tf.argmax(probs, -1, output_type=labels.dtype)\n preds = tf.reshape(preds, labels.shape)\n acc_mask = tf.to_float(tf.equal(labels, preds))\n one_hot_labels = tf.one_hot(labels, probs.shape[-1])\n\n if FLAGS.surrogate_attack:\n attack = create_attack(attack_params.attack)(sur_ch_model, sess=sess)\n else:\n attack = create_attack(attack_params.attack)(ch_model, sess=sess)\n\n new_vars = set(tf.global_variables()) - other_vars\n\n # Restore weights\n saver = tf.train.Saver(new_vars)\n checkpoint_path = os.path.expanduser(FLAGS.output_dir)\n saver.restore(sess, tf.train.latest_checkpoint(checkpoint_path))\n\n # reuse variables\n tf.get_variable_scope().reuse_variables()\n\n def compute_accuracy(x, l, mask):\n \"\"\"Compute model accuracy.\"\"\"\n preds = ch_model.get_probs(x)\n preds = tf.squeeze(preds)\n preds = tf.argmax(preds, -1, output_type=l.dtype)\n\n _, acc_update_op = tf.metrics.accuracy(l, preds, weights=mask)\n\n if FLAGS.surrogate_attack:\n preds = sur_ch_model.get_probs(x)\n preds = tf.squeeze(preds)\n preds = tf.argmax(preds, -1, output_type=l.dtype)\n acc_update_op = tf.tuple((acc_update_op,\n tf.metrics.accuracy(l, preds, weights=mask)[1]))\n\n sess.run(tf.initialize_local_variables())\n for i in range(FLAGS.eval_steps):\n tf.logging.info(\n \"\\tEvaluating batch [%d / %d]\" % (i + 1, FLAGS.eval_steps))\n acc = sess.run(acc_update_op)\n if FLAGS.surrogate_attack:\n tf.logging.info(\"\\tFinal acc: (%.4f, %.4f)\" % (acc[0], acc[1]))\n else:\n tf.logging.info(\"\\tFinal acc: %.4f\" % acc)\n return acc\n\n epsilon_acc_pairs = []\n for epsilon in attack_params.attack_epsilons:\n tf.logging.info(\"Attacking @ eps=%.4f\" % epsilon)\n attack_params.set_hparam(attack_params.epsilon_name, epsilon)\n adv_x = attack.generate(inputs, y=one_hot_labels, **attack_params.values())\n acc = compute_accuracy(adv_x, labels, acc_mask)\n epsilon_acc_pairs.append((epsilon, acc))\n\n for epsilon, acc in epsilon_acc_pairs:\n if FLAGS.surrogate_attack:\n tf.logging.info(\n \"Accuracy @ eps=%.4f: (%.4f, %.4f)\" % (epsilon, acc[0], acc[1]))\n else:\n tf.logging.info(\"Accuracy @ eps=%.4f: %.4f\" % (epsilon, acc))\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n", "# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Algorithmic data generators.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport numpy as np\nfrom six.moves import range # pylint: disable=redefined-builtin\nfrom tensor2tensor.data_generators import generator_utils as utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.layers import modalities\nfrom tensor2tensor.utils import metrics\nfrom tensor2tensor.utils import registry\nimport tensorflow as tf\n\n\nclass AlgorithmicProblem(problem.Problem):\n \"\"\"Base class for algorithmic problems.\"\"\"\n\n @property\n def num_symbols(self):\n raise NotImplementedError()\n\n def generator(self, nbr_symbols, max_length, nbr_cases):\n \"\"\"Generates the data.\"\"\"\n raise NotImplementedError()\n\n @property\n def train_length(self):\n return 40\n\n @property\n def dev_length(self):\n return 400\n\n @property\n def train_size(self):\n return 100000\n\n @property\n def dev_size(self):\n return 10000\n\n @property\n def num_shards(self):\n return 10\n\n def generate_data(self, data_dir, _, task_id=-1):\n\n def generator_eos(nbr_symbols, max_length, nbr_cases):\n \"\"\"Shift by NUM_RESERVED_IDS and append EOS token.\"\"\"\n for case in self.generator(nbr_symbols, max_length, nbr_cases):\n new_case = {}\n for feature in case:\n new_case[feature] = [\n i + text_encoder.NUM_RESERVED_TOKENS for i in case[feature]\n ] + [text_encoder.EOS_ID]\n yield new_case\n\n utils.generate_dataset_and_shuffle(\n generator_eos(self.num_symbols, self.train_length, self.train_size),\n self.training_filepaths(data_dir, self.num_shards, shuffled=True),\n generator_eos(self.num_symbols, self.dev_length, self.dev_size),\n self.dev_filepaths(data_dir, 1, shuffled=True),\n shuffle=False)\n\n def hparams(self, defaults, unused_model_hparams):\n p = defaults\n vocab_size = self.num_symbols + text_encoder.NUM_RESERVED_TOKENS\n p.modality = {\"inputs\": modalities.ModalityType.SYMBOL,\n \"targets\": modalities.ModalityType.SYMBOL}\n p.vocab_size = {\"inputs\": vocab_size,\n \"targets\": vocab_size}\n p.input_space_id = problem.SpaceID.DIGIT_0\n p.target_space_id = problem.SpaceID.DIGIT_1\n\n\[email protected]_problem\nclass AlgorithmicIdentityBinary40(AlgorithmicProblem):\n \"\"\"Problem spec for algorithmic binary identity task.\"\"\"\n\n @property\n def num_symbols(self):\n return 2\n\n def generator(self, nbr_symbols, max_length, nbr_cases):\n \"\"\"Generator for the identity (copy) task on sequences of symbols.\n\n The length of the sequence is drawn uniformly at random from [1, max_length]\n and then symbols are drawn uniformly at random from [0, nbr_symbols) until\n nbr_cases sequences have been produced.\n\n Args:\n nbr_symbols: number of symbols to use in each sequence.\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n\n Yields:\n A dictionary {\"inputs\": input-list, \"targets\": target-list} where\n input-list and target-list are the same.\n \"\"\"\n for _ in range(nbr_cases):\n l = np.random.randint(max_length) + 1\n inputs = [np.random.randint(nbr_symbols) for _ in range(l)]\n yield {\"inputs\": inputs, \"targets\": inputs}\n\n\[email protected]_problem\nclass AlgorithmicIdentityDecimal40(AlgorithmicIdentityBinary40):\n \"\"\"Problem spec for algorithmic decimal identity task.\"\"\"\n\n @property\n def num_symbols(self):\n return 10\n\n\[email protected]_problem\nclass AlgorithmicShiftDecimal40(AlgorithmicProblem):\n \"\"\"Problem spec for algorithmic decimal shift task.\"\"\"\n\n @property\n def num_symbols(self):\n return 20\n\n def generator(self, nbr_symbols, max_length, nbr_cases):\n \"\"\"Generator for the shift task on sequences of symbols.\n\n The length of the sequence is drawn uniformly at random from [1, max_length]\n and then symbols are drawn uniformly at random from [0, nbr_symbols - shift]\n until nbr_cases sequences have been produced (output[i] = input[i] + shift).\n\n Args:\n nbr_symbols: number of symbols to use in each sequence (input + output).\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n\n Yields:\n A dictionary {\"inputs\": input-list, \"targets\": target-list} where\n target-list[i] = input-list[i] + shift.\n \"\"\"\n shift = 10\n for _ in range(nbr_cases):\n l = np.random.randint(max_length) + 1\n inputs = [np.random.randint(nbr_symbols - shift) for _ in range(l)]\n yield {\"inputs\": inputs, \"targets\": [i + shift for i in inputs]}\n\n @property\n def dev_length(self):\n return 80\n\n\[email protected]_problem\nclass AlgorithmicReverseBinary40(AlgorithmicProblem):\n \"\"\"Problem spec for algorithmic binary reversing task.\"\"\"\n\n @property\n def num_symbols(self):\n return 2\n\n def generator(self, nbr_symbols, max_length, nbr_cases):\n \"\"\"Generator for the reversing task on sequences of symbols.\n\n The length of the sequence is drawn uniformly at random from [1, max_length]\n and then symbols are drawn uniformly at random from [0, nbr_symbols) until\n nbr_cases sequences have been produced.\n\n Args:\n nbr_symbols: number of symbols to use in each sequence.\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n\n Yields:\n A dictionary {\"inputs\": input-list, \"targets\": target-list} where\n target-list is input-list reversed.\n \"\"\"\n for _ in range(nbr_cases):\n l = np.random.randint(max_length) + 1\n inputs = [np.random.randint(nbr_symbols) for _ in range(l)]\n yield {\"inputs\": inputs, \"targets\": list(reversed(inputs))}\n\n\[email protected]_problem\nclass AlgorithmicReverseDecimal40(AlgorithmicReverseBinary40):\n \"\"\"Problem spec for algorithmic decimal reversing task.\"\"\"\n\n @property\n def num_symbols(self):\n return 10\n\n\ndef zipf_distribution(nbr_symbols, alpha):\n \"\"\"Helper function: Create a Zipf distribution.\n\n Args:\n nbr_symbols: number of symbols to use in the distribution.\n alpha: float, Zipf's Law Distribution parameter. Default = 1.5.\n Usually for modelling natural text distribution is in\n the range [1.1-1.6].\n\n Returns:\n distr_map: list of float, Zipf's distribution over nbr_symbols.\n\n \"\"\"\n tmp = np.power(np.arange(1, nbr_symbols + 1), -alpha)\n zeta = np.r_[0.0, np.cumsum(tmp)]\n return [x / zeta[-1] for x in zeta]\n\n\ndef zipf_random_sample(distr_map, sample_len):\n \"\"\"Helper function: Generate a random Zipf sample of given length.\n\n Args:\n distr_map: list of float, Zipf's distribution over nbr_symbols.\n sample_len: integer, length of sequence to generate.\n\n Returns:\n sample: list of integer, Zipf's random sample over nbr_symbols.\n\n \"\"\"\n u = np.random.random(sample_len)\n # Random produces values in range [0.0,1.0); even if it is almost\n # improbable(but possible) that it can generate a clear 0.000..0.\n return list(np.searchsorted(distr_map, u))\n\n\ndef reverse_generator_nlplike(nbr_symbols,\n max_length,\n nbr_cases,\n scale_std_dev=100,\n alpha=1.5):\n \"\"\"Generator for the reversing nlp-like task on sequences of symbols.\n\n The length of the sequence is drawn from a Gaussian(Normal) distribution\n at random from [1, max_length] and with std deviation of 1%,\n then symbols are drawn from Zipf's law at random from [0, nbr_symbols) until\n nbr_cases sequences have been produced.\n\n Args:\n nbr_symbols: integer, number of symbols.\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n scale_std_dev: float, Normal distribution's standard deviation scale factor\n used to draw the length of sequence. Default = 1% of the max_length.\n alpha: float, Zipf's Law Distribution parameter. Default = 1.5.\n Usually for modelling natural text distribution is in\n the range [1.1-1.6].\n\n Yields:\n A dictionary {\"inputs\": input-list, \"targets\": target-list} where\n target-list is input-list reversed.\n \"\"\"\n std_dev = max_length / scale_std_dev\n distr_map = zipf_distribution(nbr_symbols, alpha)\n for _ in range(nbr_cases):\n l = int(abs(np.random.normal(loc=max_length / 2, scale=std_dev)) + 1)\n inputs = zipf_random_sample(distr_map, l)\n yield {\"inputs\": inputs, \"targets\": list(reversed(inputs))}\n\n\[email protected]_problem\nclass AlgorithmicReverseNlplike8k(AlgorithmicProblem):\n \"\"\"Problem spec for algorithmic nlp-like reversing task.\"\"\"\n\n @property\n def num_symbols(self):\n return 8000\n\n def generator(self, nbr_symbols, max_length, nbr_cases):\n return reverse_generator_nlplike(nbr_symbols, max_length, nbr_cases, 10,\n 1.300)\n\n @property\n def train_length(self):\n return 70\n\n @property\n def dev_length(self):\n return 70\n\n\[email protected]_problem\nclass AlgorithmicReverseNlplike32k(AlgorithmicReverseNlplike8k):\n \"\"\"Problem spec for algorithmic nlp-like reversing task, 32k vocab.\"\"\"\n\n @property\n def num_symbols(self):\n return 32000\n\n def generator(self, nbr_symbols, max_length, nbr_cases):\n return reverse_generator_nlplike(nbr_symbols, max_length, nbr_cases, 10,\n 1.050)\n\n\ndef lower_endian_to_number(l, base):\n \"\"\"Helper function: convert a list of digits in the given base to a number.\"\"\"\n return sum([d * (base**i) for i, d in enumerate(l)])\n\n\ndef number_to_lower_endian(n, base):\n \"\"\"Helper function: convert a number to a list of digits in the given base.\"\"\"\n if n < base:\n return [n]\n return [n % base] + number_to_lower_endian(n // base, base)\n\n\ndef random_number_lower_endian(length, base):\n \"\"\"Helper function: generate a random number as a lower-endian digits list.\"\"\"\n if length == 1: # Last digit can be 0 only if length is 1.\n return [np.random.randint(base)]\n prefix = [np.random.randint(base) for _ in range(length - 1)]\n return prefix + [np.random.randint(base - 1) + 1] # Last digit is not 0.\n\n\[email protected]_problem\nclass AlgorithmicAdditionBinary40(AlgorithmicProblem):\n \"\"\"Problem spec for algorithmic binary addition task.\"\"\"\n\n @property\n def num_symbols(self):\n return 2\n\n def generator(self, base, max_length, nbr_cases): # pylint: disable=arguments-differ\n \"\"\"Generator for the addition task.\n\n The length of each number is drawn uniformly at random in [1, max_length/2]\n and then digits are drawn uniformly at random. The numbers are added and\n separated by [base] in the input. Stops at nbr_cases.\n\n Args:\n base: in which base are the numbers.\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n\n Yields:\n A dictionary {\"inputs\": input-list, \"targets\": target-list} where\n input-list are the 2 numbers and target-list is the result of adding them.\n\n Raises:\n ValueError: if max_length is lower than 3.\n \"\"\"\n if max_length < 3:\n raise ValueError(\"Maximum length must be at least 3.\")\n for _ in range(nbr_cases):\n l1 = np.random.randint(max_length // 2) + 1\n l2 = np.random.randint(max_length - l1 - 1) + 1\n n1 = random_number_lower_endian(l1, base)\n n2 = random_number_lower_endian(l2, base)\n result = lower_endian_to_number(n1, base) + lower_endian_to_number(\n n2, base)\n inputs = n1 + [base] + n2\n targets = number_to_lower_endian(result, base)\n yield {\"inputs\": inputs, \"targets\": targets}\n\n\[email protected]_problem\nclass AlgorithmicAdditionDecimal40(AlgorithmicAdditionBinary40):\n \"\"\"Problem spec for algorithmic decimal addition task.\"\"\"\n\n @property\n def num_symbols(self):\n return 10\n\n\[email protected]_problem\nclass AlgorithmicMultiplicationBinary40(AlgorithmicProblem):\n \"\"\"Problem spec for algorithmic binary multiplication task.\"\"\"\n\n @property\n def num_symbols(self):\n return 2\n\n def generator(self, base, max_length, nbr_cases): # pylint: disable=arguments-differ\n \"\"\"Generator for the multiplication task.\n\n The length of each number is drawn uniformly at random in [1, max_length/2]\n and then digits are drawn uniformly at random. The numbers are multiplied\n and separated by [base] in the input. Stops at nbr_cases.\n\n Args:\n base: in which base are the numbers.\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n\n Yields:\n A dictionary {\"inputs\": input-list, \"targets\": target-list} where\n input-list are the 2 numbers and target-list is the result of multiplying\n them.\n\n Raises:\n ValueError: if max_length is lower than 3.\n \"\"\"\n if max_length < 3:\n raise ValueError(\"Maximum length must be at least 3.\")\n for _ in range(nbr_cases):\n l1 = np.random.randint(max_length // 2) + 1\n l2 = np.random.randint(max_length - l1 - 1) + 1\n n1 = random_number_lower_endian(l1, base)\n n2 = random_number_lower_endian(l2, base)\n result = lower_endian_to_number(n1, base) * lower_endian_to_number(\n n2, base)\n inputs = n1 + [base] + n2\n targets = number_to_lower_endian(result, base)\n yield {\"inputs\": inputs, \"targets\": targets}\n\n\[email protected]_problem\nclass AlgorithmicMultiplicationDecimal40(AlgorithmicMultiplicationBinary40):\n \"\"\"Problem spec for algorithmic decimal multiplication task.\"\"\"\n\n @property\n def num_symbols(self):\n return 10\n\n\[email protected]_problem\nclass AlgorithmicReverseBinary40Test(AlgorithmicReverseBinary40):\n \"\"\"Test Problem with tiny dataset.\"\"\"\n\n @property\n def train_length(self):\n return 10\n\n @property\n def dev_length(self):\n return 10\n\n @property\n def train_size(self):\n return 1000\n\n @property\n def dev_size(self):\n return 100\n\n @property\n def num_shards(self):\n return 1\n\n\[email protected]_problem\nclass AlgorithmicSortProblem(AlgorithmicProblem):\n \"\"\"Problem spec for sorting numbers.\"\"\"\n\n @property\n def num_symbols(self):\n return max(self.train_length, self.dev_length)\n\n @property\n def train_length(self):\n return 10\n\n @property\n def dev_length(self):\n return self.train_length * 2\n\n @property\n def unique(self):\n \"\"\"Unique numbers wo/ replacement or w/ replacement in sorting task.\"\"\"\n return False\n\n def generator(self, nbr_symbols, max_length, nbr_cases):\n \"\"\"Generating for sorting task on sequence of symbols.\n\n The length of the sequence is drawn uniformly at random from [1, max_length]\n and then symbols are drawn (uniquely w/ or w/o replacement) uniformly at\n random from [0, nbr_symbols) until nbr_cases sequences have been produced.\n\n Args:\n nbr_symbols: number of symbols to use in each sequence.\n max_length: integer, maximum length of sequences to generate.\n nbr_cases: the number of cases to generate.\n\n Yields:\n A dictionary {\"inputs\": input-list, \"targets\": target-list} where\n target-list is input-list sorted.\n \"\"\"\n for _ in range(nbr_cases):\n # Sample the sequence length.\n length = np.random.randint(max_length) + 1\n\n if self.unique:\n # Sample our inputs w/o replacement.\n inputs = np.arange(nbr_symbols)\n np.random.shuffle(inputs)\n\n # Truncate to the desired length.\n inputs = inputs[:length]\n inputs = list(inputs)\n else:\n inputs = list(np.random.randint(nbr_symbols, size=length))\n\n # Targets are simply the sorted inputs.\n targets = list(sorted(inputs))\n\n yield {\"inputs\": inputs, \"targets\": targets}\n\n def eval_metrics(self):\n defaults = super(AlgorithmicSortProblem, self).eval_metrics()\n return defaults + [metrics.Metrics.EDIT_DISTANCE]\n\n\[email protected]_problem\nclass TinyAlgo(AlgorithmicIdentityBinary40):\n \"\"\"A small algorthmic problem for testing.\"\"\"\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n \"\"\"Ganerate data for this problem.\"\"\"\n\n del tmp_dir, task_id\n identity_problem = AlgorithmicIdentityBinary40()\n utils.generate_files(\n identity_problem.generator(self.num_symbols, 40, 100000),\n self.training_filepaths(data_dir, 1, shuffled=True), 100)\n utils.generate_files(\n identity_problem.generator(self.num_symbols, 400, 10000),\n self.dev_filepaths(data_dir, 1, shuffled=True), 100)\n\n @classmethod\n def setup_for_test(cls):\n \"\"\"Setup directories and files required to run the problem.\"\"\"\n\n tmp_dir = tf.test.get_temp_dir()\n shutil.rmtree(tmp_dir)\n os.mkdir(tmp_dir)\n cls.data_dir = tmp_dir\n\n # Generate a small test dataset\n cls().generate_data(TinyAlgo.data_dir, None)\n" ]
[ [ "tensorflow.concat", "tensorflow.stack", "tensorflow.pad", "tensorflow.summary.scalar", "tensorflow.squeeze", "tensorflow.layers.dense", "tensorflow.stop_gradient", "tensorflow.truncated_normal_initializer", "tensorflow.logical_not", "tensorflow.layers.conv2d", "tensorflow.unstack", "tensorflow.shape", "tensorflow.zeros_like", "tensorflow.nn.conv3d", "tensorflow.contrib.layers.instance_norm", "numpy.array", "tensorflow.add_to_collection", "tensorflow.nn.leaky_relu", "tensorflow.clip_by_value", "tensorflow.reduce_mean", "tensorflow.reshape", "numpy.prod", "tensorflow.variable_scope" ], [ "tensorflow.train.latest_checkpoint", "tensorflow.metrics.accuracy", "tensorflow.initialize_local_variables", "tensorflow.reshape", "tensorflow.global_variables", "tensorflow.logging.warn", "tensorflow.argmax", "tensorflow.squeeze", "tensorflow.equal", "tensorflow.global_variables_initializer", "tensorflow.logging.info", "tensorflow.one_hot", "tensorflow.to_float", "tensorflow.Session", "tensorflow.logging.set_verbosity", "tensorflow.train.Saver", "tensorflow.get_variable_scope", "tensorflow.app.run" ], [ "numpy.random.random", "numpy.arange", "numpy.cumsum", "numpy.random.shuffle", "numpy.random.normal", "numpy.searchsorted", "tensorflow.test.get_temp_dir", "numpy.random.randint" ] ]
ealopez/pycroscopy
[ "9f7c0543b67eaa0668296295fc5f492360c130a0", "9f7c0543b67eaa0668296295fc5f492360c130a0" ]
[ "pycroscopy/analysis/fitter.py", "pycroscopy/core/io/write_utils.py" ]
[ "\"\"\"\nCreated on 7/17/16 10:08 AM\n@author: Numan Laanait, Suhas Somnath, Chris Smith\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import, unicode_literals\n\nimport numpy as np\nimport psutil\nimport scipy\nimport h5py\nimport time as tm\nfrom .guess_methods import GuessMethods\nfrom .fit_methods import Fit_Methods\nfrom ..core.io.pycro_data import PycroDataset\nfrom ..core.io.io_utils import get_available_memory, recommend_cpu_cores, format_time\nfrom ..core.io.hdf_utils import check_for_old, find_results_groups, check_for_matching_attrs, get_attr\nfrom .optimize import Optimize\n\n\nclass Fitter(object):\n \"\"\"\n Encapsulates the typical routines performed during model-dependent analysis of data.\n This abstract class should be extended to cover different types of imaging modalities.\n \"\"\"\n\n def __init__(self, h5_main, variables=['Frequency'], parallel=True, verbose=False):\n \"\"\"\n For now, we assume that the guess dataset has not been generated for this dataset but we will relax this\n requirement after testing the basic components.\n\n Parameters\n ----------\n h5_main : h5py.Dataset instance\n The dataset over which the analysis will be performed. This dataset should be linked to the spectroscopic\n indices and values, and position indices and values datasets.\n variables : list(string), Default ['Frequency']\n Lists of attributes that h5_main should possess so that it may be analyzed by Model.\n parallel : bool, optional\n Should the parallel implementation of the fitting be used. Default True\n verbose : bool, optional. default = False\n Whether or not to print statements that aid in debugging\n\n \"\"\"\n\n if not isinstance(h5_main, PycroDataset):\n h5_main = PycroDataset(h5_main)\n\n # Checking if dataset has the proper dimensions for the model to run.\n if self._is_legal(h5_main, variables):\n self.h5_main = h5_main\n\n else:\n raise ValueError('Provided dataset is not a \"Main\" dataset with necessary ancillary datasets')\n\n # Checking if parallel processing will be used\n self._parallel = parallel\n self._verbose = verbose\n\n # Determining the max size of the data that can be put into memory\n self._set_memory_and_cores()\n\n self._start_pos = 0\n self._end_pos = self.h5_main.shape[0]\n self.h5_guess = None\n self.h5_fit = None\n self.h5_results_grp = None\n\n # TODO: do NOT expose a lot of innards. Turn it into private with _var_name\n self.data = None\n self.guess = None\n self.fit = None\n\n self._fitter_name = None # Reset this in the extended classes\n self._parms_dict = dict()\n\n def _set_memory_and_cores(self):\n \"\"\"\n Checks hardware limitations such as memory, # cpus and sets the recommended datachunk sizes and the\n number of cores to be used by analysis methods.\n \"\"\"\n\n if self._parallel:\n self._maxCpus = max(1, psutil.cpu_count() - 2)\n else:\n self._maxCpus = 1\n\n if self._maxCpus == 1:\n self._parallel = False\n\n self._maxMemoryMB = get_available_memory() / 1024 ** 2 # in Mb\n\n self._maxDataChunk = int(self._maxMemoryMB / self._maxCpus)\n\n # Now calculate the number of positions that can be stored in memory in one go.\n mb_per_position = self.h5_main.dtype.itemsize * self.h5_main.shape[1] / 1024.0 ** 2\n\n # TODO: The size of the chunk should be determined by BOTH the computation time and memory restrictions\n self._max_pos_per_read = int(np.floor(self._maxDataChunk / mb_per_position))\n if self._verbose:\n print('Allowed to read {} pixels per chunk'.format(self._max_pos_per_read))\n\n def _is_legal(self, h5_main, variables):\n \"\"\"\n Checks whether or not the provided object can be analyzed by this Model class.\n Classes that extend this class will do additional checks to ensure that the supplied dataset is legal.\n\n Parameters\n ----\n h5_main : PycroDataset instance\n The dataset over which the analysis will be performed. This dataset should be linked to the spectroscopic\n indices and values, and position indices and values datasets.\n\n variables : list(string)\n The dimensions needed to be present in the attributes of h5_main to analyze the data with Model.\n\n Returns\n -------\n legal : Boolean\n Whether or not this dataset satisfies the necessary conditions for analysis\n\n \"\"\"\n return np.all(np.isin(variables, h5_main.spec_dim_labels))\n\n def _get_data_chunk(self):\n \"\"\"\n Reads the next chunk of data for the guess or the fit into memory\n \"\"\"\n if self._start_pos < self.h5_main.shape[0]:\n self._end_pos = int(min(self.h5_main.shape[0], self._start_pos + self._max_pos_per_read))\n self.data = self.h5_main[self._start_pos:self._end_pos, :]\n if self._verbose:\n print('\\nReading pixels {} to {} of {}'.format(self._start_pos, self._end_pos, self.h5_main.shape[0]))\n\n else:\n if self._verbose:\n print('Finished reading all data!')\n self.data = None\n\n def _get_guess_chunk(self):\n \"\"\"\n Returns a chunk of guess dataset corresponding to the main dataset.\n Should be called BEFORE _get_data_chunk since it relies upon current values of\n `self._start_pos`, `self._end_pos`\n\n Parameters\n -----\n None\n\n Returns\n --------\n\n \"\"\"\n if self.data is None:\n self._end_pos = int(min(self.h5_main.shape[0], self._start_pos + self._max_pos_per_read))\n self.guess = self.h5_guess[self._start_pos:self._end_pos, :]\n else:\n self.guess = self.h5_guess[self._start_pos:self._end_pos, :]\n\n if self._verbose:\n print('Guess of shape: {}'.format(self.guess.shape))\n\n def _set_results(self, is_guess=False):\n \"\"\"\n Writes the provided guess or fit results into appropriate datasets.\n Given that the guess and fit datasets are relatively small, we should be able to hold them in memory just fine\n\n Parameters\n ---------\n is_guess : bool, optional\n Default - False\n Flag that differentiates the guess from the fit\n \"\"\"\n statement = 'guess'\n\n if is_guess:\n targ_dset = self.h5_guess\n source_dset = self.guess\n else:\n statement = 'fit'\n targ_dset = self.h5_fit\n source_dset = self.fit\n\n if self._verbose:\n print('Writing data to positions: {} to {}'.format(self._start_pos, self._end_pos))\n targ_dset[self._start_pos: self._end_pos, :] = source_dset\n\n # This flag will let us resume the computation if it is aborted\n targ_dset.attrs['last_pixel'] = self._end_pos\n\n # Now update the start position\n self._start_pos = self._end_pos\n\n # flush the file\n self.h5_main.file.flush()\n if self._verbose:\n print('Finished writing ' + statement + ' results (chunk) to file!')\n\n def _create_guess_datasets(self):\n \"\"\"\n Model specific call that will write the h5 group, guess dataset, corresponding spectroscopic datasets and also\n link the guess dataset to the spectroscopic datasets. It is recommended that the ancillary datasets be populated\n within this function.\n\n The guess dataset will NOT be populated here but will be populated by the __setData function\n The fit dataset should NOT be populated here unless the user calls the optimize function.\n\n Parameters\n --------\n None\n\n Returns\n -------\n None\n\n \"\"\"\n self.guess = None # replace with actual h5 dataset\n raise NotImplementedError('Please override the _create_guess_datasets specific to your model')\n\n def _create_fit_datasets(self):\n \"\"\"\n Model specific call that will write the h5 group, fit dataset, corresponding spectroscopic datasets and also\n link the fit dataset to the spectroscopic datasets. It is recommended that the ancillary datasets be populated\n within this function.\n\n The fit dataset will NOT be populated here but will be populated by the __setData function\n The guess dataset should NOT be populated here unless the user calls the optimize function.\n\n Parameters\n --------\n None\n\n Returns\n -------\n None\n\n \"\"\"\n self.fit = None # replace with actual h5 dataset\n raise NotImplementedError('Please override the _create_fit_datasets specific to your model')\n\n def _check_for_old_guess(self):\n \"\"\"\n Returns a list of datasets where the same parameters have already been used to compute Guesses for this dataset\n\n Returns\n -------\n list\n List of datasets with results from do_guess on this dataset\n \"\"\"\n groups = check_for_old(self.h5_main, self._fitter_name, new_parms=self._parms_dict, target_dset='Guess',\n verbose=self._verbose)\n datasets = [grp['Guess'] for grp in groups]\n\n # Now sort these datasets into partial and complete:\n completed_dsets = []\n partial_dsets = []\n\n for dset in datasets:\n try:\n last_pix = get_attr(dset, 'last_pixel')\n except KeyError:\n last_pix = None\n \n # Skip datasets without last_pixel attribute\n if last_pix is None:\n continue\n elif last_pix < self.h5_main.shape[0]:\n partial_dsets.append(dset)\n else:\n completed_dsets.append(dset)\n\n return partial_dsets, completed_dsets\n\n def do_guess(self, processors=None, strategy=None, options=dict(), h5_partial_guess=None, override=False):\n \"\"\"\n Parameters\n ----------\n strategy: string (optional)\n Default is 'Wavelet_Peaks'.\n Can be one of ['wavelet_peaks', 'relative_maximum', 'gaussian_processes'].\n For updated list, run GuessMethods.methods\n processors : int (optional)\n Number of cores to use for computing. Default = all available - 2 cores\n options: dict\n Default, options for wavelet_peaks {\"peaks_widths\": np.array([10,200]), \"peak_step\":20}.\n Dictionary of options passed to strategy. For more info see GuessMethods documentation.\n h5_partial_guess : h5py.group. optional, default = None\n Datagroup containing (partially computed) guess results. do_guess will resume computation if provided.\n override : bool, optional. default = False\n By default, will simply return duplicate results to avoid recomputing or resume computation on a\n group with partial results. Set to True to force fresh computation.\n\n Returns\n -------\n h5_guess : h5py.Dataset\n Dataset containing guesses that can be passed on to do_fit()\n \"\"\"\n gm = GuessMethods()\n if strategy not in gm.methods:\n raise KeyError('Error: %s is not implemented in pycroscopy.analysis.GuessMethods to find guesses' %\n strategy)\n\n # ################## CHECK FOR DUPLICATES AND RESUME PARTIAL #######################################\n\n # Prepare the parms dict that will be used for comparison:\n self._parms_dict = options.copy()\n self._parms_dict.update({'strategy': strategy})\n\n # check for old:\n partial_dsets, completed_dsets = self._check_for_old_guess()\n\n if len(completed_dsets) == 0 and len(partial_dsets) == 0:\n print('No existing datasets found')\n override = True\n\n if not override:\n # First try to simply return any completed computation\n if len(completed_dsets) > 0:\n print('Returned previously computed results at ' + completed_dsets[-1].name)\n self.h5_guess = PycroDataset(completed_dsets[-1])\n return\n\n # Next attempt to resume automatically if nothing is provided\n if len(partial_dsets) > 0:\n # attempt to use whatever the user provided (if legal)\n target_partial_dset = partial_dsets[-1]\n if h5_partial_guess is not None:\n if not isinstance(h5_partial_guess, h5py.Dataset):\n raise ValueError('Provided parameter is not an h5py.Dataset object')\n if h5_partial_guess not in partial_dsets:\n raise ValueError('Provided dataset for partial Guesses is not compatible')\n if self._verbose:\n print('Provided partial Guess dataset was acceptable')\n target_partial_dset = h5_partial_guess\n\n # Finally resume from this dataset\n print('Resuming computation in group: ' + target_partial_dset.name)\n self.h5_guess = target_partial_dset\n self._start_pos = target_partial_dset.attrs['last_pixel']\n\n # No completed / partials available or forced via override:\n if self.h5_guess is None:\n if self._verbose:\n print('Starting a fresh computation!')\n self._start_pos = 0\n self._create_guess_datasets()\n\n # ################## BEGIN THE ACTUAL COMPUTING #######################################\n\n if processors is None:\n processors = self._maxCpus\n else:\n processors = min(int(processors), self._maxCpus)\n processors = recommend_cpu_cores(self._max_pos_per_read, processors, verbose=self._verbose)\n\n print(\"Using %s to find guesses...\\n\" % strategy)\n\n time_per_pix = 0\n num_pos = self.h5_main.shape[0] - self._start_pos\n orig_start_pos = self._start_pos\n\n print('You can abort this computation at any time and resume at a later time!\\n'\n '\\tIf you are operating in a python console, press Ctrl+C or Cmd+C to abort\\n'\n '\\tIf you are in a Jupyter notebook, click on \"Kernel\">>\"Interrupt\"\\n')\n\n self._get_data_chunk()\n while self.data is not None:\n\n t_start = tm.time()\n\n opt = Optimize(data=self.data, parallel=self._parallel)\n temp = opt.computeGuess(processors=processors, strategy=strategy, options=options)\n\n # reorder to get one numpy array out\n temp = self._reformat_results(temp, strategy)\n self.guess = np.hstack(tuple(temp))\n\n # Write to file\n self._set_results(is_guess=True)\n\n # basic timing logs\n tot_time = np.round(tm.time() - t_start, decimals=2) # in seconds\n if self._verbose:\n print('Done parallel computing in {} or {} per pixel'.format(format_time(tot_time),\n format_time(tot_time / self.data.shape[0])))\n if self._start_pos == orig_start_pos:\n time_per_pix = tot_time / self._end_pos # in seconds\n else:\n time_remaining = (num_pos - self._end_pos) * time_per_pix # in seconds\n print('Time remaining: ' + format_time(time_remaining))\n\n # get next batch of data\n self._get_data_chunk()\n\n print('Completed computing guess')\n print()\n return PycroDataset(self.h5_guess)\n\n def _reformat_results(self, results, strategy='wavelet_peaks'):\n \"\"\"\n Model specific restructuring / reformatting of the parallel compute results\n\n Parameters\n ----------\n results : array-like\n Results to be formatted for writing\n strategy : str\n The strategy used in the fit. Determines how the results will be reformatted.\n Default 'wavelet_peaks'\n\n Returns\n -------\n results : numpy.ndarray\n Formatted array that is ready to be writen to the HDF5 file \n\n \"\"\"\n return np.array(results)\n\n def _check_for_old_fit(self):\n \"\"\"\n Returns three lists of h5py.Dataset objects where the group contained:\n 1. Completed guess only\n 2. Partial Fit\n 3. Completed Fit\n\n Returns\n -------\n\n \"\"\"\n # First find all groups that match the basic condition of matching tool name\n all_groups = find_results_groups(self.h5_main, self._fitter_name)\n if self._verbose:\n print('Groups that matched the nomenclature: {}'.format(all_groups))\n\n # Next sort these groups into three categories:\n completed_guess = []\n partial_fits = []\n completed_fits = []\n\n for h5_group in all_groups:\n\n if 'Fit' in h5_group.keys():\n # check group for fit attribute\n\n h5_fit = h5_group['Fit']\n\n # check Fit dataset against parms_dict\n if not check_for_matching_attrs(h5_fit, new_parms=self._parms_dict, verbose=self._verbose):\n if self._verbose:\n print('{} did not match the given parameters'.format(h5_fit.name))\n continue\n\n # sort this dataset:\n try:\n last_pix = get_attr(h5_fit, 'last_pixel')\n except KeyError:\n last_pix = None\n\n # For now skip any fits that are missing 'last_pixel'\n if last_pix is None:\n continue\n elif last_pix < self.h5_main.shape[0]:\n partial_fits.append(h5_fit.parent)\n else:\n completed_fits.append(h5_fit)\n else:\n if 'Guess' in h5_group.keys():\n h5_guess = h5_group['Guess']\n\n # sort this dataset:\n try:\n last_pix = get_attr(h5_guess, 'last_pixel')\n except KeyError:\n last_pix = None\n\n # For now skip any fits that are missing 'last_pixel'\n if last_pix is None:\n continue\n elif last_pix == self.h5_main.shape[0]:\n if self._verbose:\n print('{} was a completed Guess'.format(h5_guess.name))\n completed_guess.append(h5_guess)\n else:\n if self._verbose:\n print('{} did not not have completed Guesses'.format(h5_guess.name))\n else:\n if self._verbose:\n print('{} did not even have Guess. Categorizing as defective Group'.format(h5_group.name))\n\n return completed_guess, partial_fits, completed_fits\n\n def do_fit(self, processors=None, solver_type='least_squares', solver_options=None, obj_func=None,\n h5_partial_fit=None, h5_guess=None, override=False):\n \"\"\"\n Generates the fit for the given dataset and writes back to file\n\n Parameters\n ----------\n processors : int\n Number of cpu cores the user wishes to run on. The minimum of this and self._maxCpus is used.\n solver_type : str\n The name of the solver in scipy.optimize to use for the fit\n solver_options : dict\n Dictionary of parameters to pass to the solver specified by `solver_type`\n obj_func : dict\n Dictionary defining the class and method containing the function to be fit as well as any \n additional function parameters.\n h5_partial_fit : h5py.group. optional, default = None\n Datagroup containing (partially computed) fit results. do_fit will resume computation if provided.\n h5_guess : h5py.group. optional, default = None\n Datagroup containing guess results. do_fit will use this if provided.\n override : bool, optional. default = False\n By default, will simply return duplicate results to avoid recomputing or resume computation on a\n group with partial results. Set to True to force fresh computation.\n\n Returns\n -------\n h5_results : h5py.Dataset object\n Dataset with the fit parameters\n \"\"\"\n\n # ################## PREPARE THE SOLVER #######################################\n\n legit_solver = solver_type in scipy.optimize.__dict__.keys()\n\n if not legit_solver:\n raise KeyError('Error: Objective Functions \"%s\" is not implemented in pycroscopy.analysis.Fit_Methods' %\n obj_func['obj_func'])\n\n obj_func_name = obj_func['obj_func']\n legit_obj_func = obj_func_name in Fit_Methods().methods\n\n if not legit_obj_func:\n raise KeyError('Error: Solver \"%s\" does not exist!. For additional info see scipy.optimize\\n' % solver_type)\n\n # ################## CHECK FOR DUPLICATES AND RESUME PARTIAL #######################################\n\n def _get_group_to_resume(legal_groups, provided_partial_fit):\n for h5_group in legal_groups:\n if h5_group['Fit'] == provided_partial_fit:\n return h5_group\n return None\n\n def _resume_fit(fitter, h5_group):\n fitter.h5_guess = h5_group['Guess']\n fitter.h5_fit = h5_group['Fit']\n fitter._start_pos = fitter.h5_fit.attrs['last_pixel']\n\n def _start_fresh_fit(fitter, h5_guess_legal):\n fitter.h5_guess = h5_guess_legal\n fitter._create_fit_datasets()\n fitter._start_pos = 0\n\n # Prepare the parms dict that will be used for comparison:\n self._parms_dict = solver_options.copy()\n self._parms_dict.update({'solver_type': solver_type})\n self._parms_dict.update(obj_func)\n\n completed_guess, partial_fit_groups, completed_fits = self._check_for_old_fit()\n\n override = override or (h5_partial_fit is not None or h5_guess is not None)\n\n if not override:\n # First try to simply return completed results\n if len(completed_fits) > 0:\n print('Returned previously computed results at ' + completed_fits[-1].name)\n self.h5_fit = PycroDataset(completed_fits[-1])\n return\n\n # Next, attempt to resume automatically:\n elif len(partial_fit_groups) > 0:\n print('Will resume fitting in {}. '\n 'You can supply a dataset using the h5_partial_fit argument'.format(partial_fit_groups[-1].name))\n _resume_fit(self, partial_fit_groups[-1])\n\n # Finally, attempt to do fresh fitting using completed Guess:\n elif len(completed_guess) > 0:\n print('Will use {} for generating new Fit. '\n 'You can supply a dataset using the h5_guess argument'.format(completed_guess[-1].name))\n _start_fresh_fit(self, completed_guess[-1])\n\n else:\n raise ValueError('Could not find a compatible Guess to use for Fit. Call do_guess() before do_fit()')\n\n else:\n if h5_partial_fit is not None:\n h5_group = _get_group_to_resume(partial_fit_groups, h5_partial_fit)\n if h5_group is None:\n raise ValueError('Provided dataset with partial Fit was not found to be compatible')\n _resume_fit(self, h5_group)\n\n elif h5_guess is not None:\n if h5_guess not in completed_guess:\n raise ValueError('Provided dataset with completed Guess was not found to be compatible')\n _start_fresh_fit(self, h5_guess)\n\n else:\n raise ValueError('Please provide a completed guess or partially completed Fit to resume')\n\n # ################## BEGIN THE ACTUAL FITTING #######################################\n\n print(\"Using solver %s and objective function %s to fit your data\\n\" % (solver_type, obj_func['obj_func']))\n\n if processors is None:\n processors = self._maxCpus\n else:\n processors = min(processors, self._maxCpus)\n processors = recommend_cpu_cores(self._max_pos_per_read, processors, verbose=self._verbose)\n\n time_per_pix = 0\n num_pos = self.h5_main.shape[0] - self._start_pos\n orig_start_pos = self._start_pos\n\n print('You can abort this computation at any time and resume at a later time!\\n'\n '\\tIf you are operating in a python console, press Ctrl+C or Cmd+C to abort\\n'\n '\\tIf you are in a Jupyter notebook, click on \"Kernel\">>\"Interrupt\"\\n')\n\n self._get_guess_chunk()\n self._get_data_chunk()\n\n while self.data is not None:\n\n t_start = tm.time()\n\n opt = Optimize(data=self.data, guess=self.guess, parallel=self._parallel)\n temp = opt.computeFit(processors=processors, solver_type=solver_type, solver_options=solver_options,\n obj_func=obj_func.copy())\n\n # TODO: need a different .reformatResults to process fitting results\n # reorder to get one numpy array out\n temp = self._reformat_results(temp, obj_func_name)\n self.fit = np.hstack(tuple(temp))\n\n # Write to file\n self._set_results(is_guess=False)\n\n # basic timing logs\n tot_time = np.round(tm.time() - t_start, decimals=2) # in seconds\n if self._verbose:\n print('Done parallel computing in {} or {} per pixel'.format(format_time(tot_time),\n format_time(\n tot_time / self.data.shape[0])))\n if self._start_pos == orig_start_pos:\n time_per_pix = tot_time / self._end_pos # in seconds\n else:\n time_remaining = (num_pos - self._end_pos) * time_per_pix # in seconds\n print('Time remaining: ' + format_time(time_remaining))\n\n # get next batch of data\n self._get_guess_chunk()\n self._get_data_chunk()\n\n print('Completed computing fit. Writing to file.')\n\n return PycroDataset(self.h5_fit)\n", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 7 21:14:25 2017\n\n@author: Suhas Somnath, Chris Smith\n\"\"\"\n\nfrom __future__ import division, print_function, unicode_literals, absolute_import\nimport sys\nimport os\nimport numpy as np\nfrom collections import Iterable\nfrom .dtype_utils import contains_integers\n\n__all__ = ['clean_string_att', 'get_aux_dset_slicing', 'make_indices_matrix', 'INDICES_DTYPE', 'VALUES_DTYPE',\n 'Dimension', 'build_ind_val_matrices', 'calc_chunks', 'create_spec_inds_from_vals']\n\nif sys.version_info.major == 3:\n unicode = str\n\nINDICES_DTYPE = np.uint32\nVALUES_DTYPE = np.float32\n\n\nclass Dimension(object):\n def __init__(self, name, units, values):\n \"\"\"\n Simple object that describes a dimension in a dataset by its name, units, and values\n Parameters\n ----------\n name : str / unicode\n Name of the dimension. For example 'Bias'\n units : str / unicode\n Units for this dimension. For example: 'V'\n values : array-like, or int\n Values over which this dimension was varied. A linearly increasing set of values will be generated if an\n integer is provided instead of an array.\n \"\"\"\n if not isinstance(name, (str, unicode)):\n raise TypeError('name should be a string')\n name = name.strip()\n if len(name) < 1:\n raise ValueError('name should not be an empty string')\n if not isinstance(units, (str, unicode)):\n raise TypeError('units should be a string')\n if isinstance(values, int):\n if values < 1:\n raise ValueError('values should at least be specified as a positive integer')\n values = np.arange(values)\n if not isinstance(values, (np.ndarray, list, tuple)):\n raise TypeError('values should be array-like')\n self.name = name\n self.units = units\n self.values = values\n\n def __repr__(self):\n return '{} ({}) : {}'.format(self.name, self.units, self.values)\n\n\ndef get_aux_dset_slicing(dim_names, last_ind=None, is_spectroscopic=False):\n \"\"\"\n Returns a dictionary of slice objects to help in creating region references in the position or spectroscopic\n indices and values datasets\n\n Parameters\n ------------\n dim_names : iterable\n List of strings denoting the names of the position axes or spectroscopic dimensions arranged in the same order\n that matches the dimensions in the indices / values dataset\n last_ind : (Optional) unsigned int, default = None\n Last pixel in the positon or spectroscopic matrix. Useful in experiments where the\n parameters have changed (eg. BEPS new data format) during the experiment.\n is_spectroscopic : bool, optional. default = True\n set to True for position datasets and False for spectroscopic datasets\n Returns\n ------------\n slice_dict : dictionary\n Dictionary of tuples containing slice objects corresponding to\n each position axis.\n \"\"\"\n if not isinstance(dim_names, Iterable):\n raise TypeError('dim_names should be and Iterable')\n if not len(dim_names) > 0:\n raise ValueError('dim_names should not be empty')\n if not np.all([isinstance(x, (str, unicode)) for x in dim_names]):\n raise TypeError('dim_names should contain strings')\n\n slice_dict = dict()\n for spat_ind, curr_dim_name in enumerate(dim_names):\n val = (slice(last_ind), slice(spat_ind, spat_ind + 1))\n if is_spectroscopic:\n val = val[::-1]\n slice_dict[str(curr_dim_name)] = val\n return slice_dict\n\n\ndef make_indices_matrix(num_steps, is_position=True):\n \"\"\"\n Makes an ancillary indices matrix given the number of steps in each dimension. In other words, this function builds\n a matrix whose rows correspond to unique combinations of the multiple dimensions provided.\n\n Parameters\n ------------\n num_steps : List / numpy array\n Number of steps in each spatial or spectral dimension\n Note that the axes must be ordered from fastest varying to slowest varying\n is_position : bool, optional, default = True\n Whether the returned matrix is meant for position (True) indices (tall and skinny) or spectroscopic (False)\n indices (short and wide)\n\n Returns\n --------------\n indices_matrix : 2D unsigned int numpy array\n arranged as [steps, spatial dimension]\n \"\"\"\n if not isinstance(num_steps, (tuple, list, np.ndarray)):\n raise TypeError('num_steps should be a list / tuple / numpy array')\n if not contains_integers(num_steps, min_val=1 + int(len(num_steps) > 0)):\n raise ValueError('num_steps should contain integers greater than equal to 1 (empty dimension) or 2')\n\n num_steps = np.array(num_steps)\n spat_dims = max(1, len(np.where(num_steps > 1)[0]))\n\n indices_matrix = np.zeros(shape=(np.prod(num_steps), spat_dims), dtype=INDICES_DTYPE)\n dim_ind = 0\n\n for indx, curr_steps in enumerate(num_steps):\n if curr_steps > 1:\n\n part1 = np.prod(num_steps[:indx + 1])\n\n if indx > 0:\n part2 = np.prod(num_steps[:indx])\n else:\n part2 = 1\n\n if indx + 1 == len(num_steps):\n part3 = 1\n else:\n part3 = np.prod(num_steps[indx + 1:])\n\n indices_matrix[:, dim_ind] = np.tile(np.floor(np.arange(part1) / part2), part3)\n dim_ind += 1\n\n if not is_position:\n indices_matrix = indices_matrix.T\n\n return indices_matrix\n\n\ndef clean_string_att(att_val):\n \"\"\"\n Replaces any unicode objects within lists with their string counterparts to ensure compatibility with python 3.\n If the attribute is indeed a list of unicodes, the changes will be made in-place\n\n Parameters\n ----------\n att_val : object\n Attribute object\n\n Returns\n -------\n att_val : object\n Attribute object\n \"\"\"\n try:\n if isinstance(att_val, Iterable):\n if type(att_val) in [unicode, str]:\n return att_val\n elif np.any([type(x) in [str, unicode, bytes, np.str_] for x in att_val]):\n return np.array(att_val, dtype='S')\n if type(att_val) == np.str_:\n return str(att_val)\n return att_val\n except TypeError:\n raise TypeError('Failed to clean: {}'.format(att_val))\n\n\ndef build_ind_val_matrices(unit_values, is_spectral=True):\n \"\"\"\n Builds indices and values matrices using given unit values for each dimension.\n\n Parameters\n ----------\n unit_values : list / tuple\n Sequence of values vectors for each dimension\n is_spectral : bool (optional), default = True\n If true, returns matrices for spectroscopic datasets, else returns matrices for Position datasets\n\n Returns\n -------\n ind_mat : 2D numpy array\n Indices matrix\n val_mat : 2D numpy array\n Values matrix\n \"\"\"\n if not isinstance(unit_values, (list, tuple)):\n raise TypeError('unit_values should be a list or tuple')\n if not np.all([np.array(x).ndim == 1 for x in unit_values]):\n raise ValueError('unit_values should only contain 1D array')\n lengths = [len(x) for x in unit_values]\n tile_size = [np.prod(lengths[x:]) for x in range(1, len(lengths))] + [1]\n rep_size = [1] + [np.prod(lengths[:x]) for x in range(1, len(lengths))]\n val_mat = np.zeros(shape=(len(lengths), np.prod(lengths)))\n ind_mat = np.zeros(shape=val_mat.shape, dtype=np.uint32)\n for ind, ts, rs, vec in zip(range(len(lengths)), tile_size, rep_size, unit_values):\n val_mat[ind] = np.tile(np.repeat(vec, rs), ts)\n ind_mat[ind] = np.tile(np.repeat(np.arange(len(vec)), rs), ts)\n if not is_spectral:\n val_mat = val_mat.T\n ind_mat = ind_mat.T\n return INDICES_DTYPE(ind_mat), VALUES_DTYPE(val_mat)\n\n\ndef create_spec_inds_from_vals(ds_spec_val_mat):\n \"\"\"\n Create new Spectroscopic Indices table from the changes in the\n Spectroscopic Values\n\n Parameters\n ----------\n ds_spec_val_mat : array-like,\n Holds the spectroscopic values to be indexed\n\n Returns\n -------\n ds_spec_inds_mat : numpy array of uints the same shape as ds_spec_val_mat\n Indices corresponding to the values in ds_spec_val_mat\n\n \"\"\"\n if not isinstance(ds_spec_val_mat, np.ndarray):\n raise TypeError('ds_spec_val_mat must be a numpy array')\n if ds_spec_val_mat.ndim != 2:\n raise ValueError('ds_spec_val_mat must be a 2D array arranged as [dimension, values]')\n\n ds_spec_inds_mat = np.zeros_like(ds_spec_val_mat, dtype=np.int32)\n\n \"\"\"\n Find how quickly the spectroscopic values are changing in each row \n and the order of row from fastest changing to slowest.\n \"\"\"\n change_count = [len(np.where([row[i] != row[i - 1] for i in range(len(row))])[0]) for row in ds_spec_val_mat]\n change_sort = np.argsort(change_count)[::-1]\n\n \"\"\"\n Determine everywhere the spectroscopic values change and build \n index table based on those changed\n \"\"\"\n indices = np.zeros(ds_spec_val_mat.shape[0])\n for jcol in range(1, ds_spec_val_mat.shape[1]):\n this_col = ds_spec_val_mat[change_sort, jcol]\n last_col = ds_spec_val_mat[change_sort, jcol - 1]\n\n \"\"\"\n Check if current column values are different than those \n in last column.\n \"\"\"\n changed = np.where(this_col != last_col)[0]\n\n \"\"\"\n If only one row changed, increment the index for that \n column\n If more than one row has changed, increment the index for \n the last row that changed and set all others to zero\n \"\"\"\n if len(changed) == 1:\n indices[changed] += 1\n elif len(changed > 1):\n for change in changed[:-1]:\n indices[change] = 0\n indices[changed[-1]] += 1\n\n \"\"\"\n Store the indices for the current column in the dataset\n \"\"\"\n ds_spec_inds_mat[change_sort, jcol] = indices\n\n return ds_spec_inds_mat\n\n\ndef calc_chunks(dimensions, dtype_byte_size, unit_chunks=None, max_chunk_mem=10240):\n \"\"\"\n Calculate the chunk size for the HDF5 dataset based on the dimensions and the\n maximum chunk size in memory\n\n Parameters\n ----------\n dimensions : array_like of int\n Shape of the data to be chunked\n dtype_byte_size : unsigned int\n Size of an entry in the data in bytes\n unit_chunks : array_like of int, optional\n Unit size of the chunking in each dimension. Must be the same size as\n the shape of `ds_main`. Default None, `unit_chunks` is set to 1 in all\n dimensions\n max_chunk_mem : int, optional\n Maximum size of the chunk in memory in bytes. Default 10240b or 10kb per h5py recommendations\n\n Returns\n -------\n chunking : tuple of int\n Calculated maximum size of a chunk in each dimension that is as close to the\n requested `max_chunk_mem` as posible while having steps based on the input\n `unit_chunks`.\n \"\"\"\n if not isinstance(dimensions, (list, tuple)):\n raise TypeError('dimensions should either be a tuple or list')\n if not isinstance(dtype_byte_size, int):\n raise TypeError('dtype_byte_size should be an integer')\n if unit_chunks is not None:\n if not isinstance(unit_chunks, (tuple, list)):\n raise TypeError('unit_chunks should either be a tuple or list')\n\n '''\n Ensure that dimensions is an array\n '''\n dimensions = np.asarray(dimensions, dtype=np.uint)\n '''\n Set the unit_chunks to all ones if not given. Ensure it is an array if it is.\n '''\n if unit_chunks is None:\n unit_chunks = np.ones_like(dimensions)\n else:\n unit_chunks = np.asarray(unit_chunks, dtype=np.uint)\n\n if unit_chunks.shape != dimensions.shape:\n raise ValueError('Unit chunk size must have the same shape as the input dataset.')\n\n '''\n Save the original size of unit_chunks to use for incrementing the chunk size during\n loop\n '''\n base_chunks = unit_chunks.copy()\n\n '''\n Loop until chunk_size is greater than the maximum chunk_mem or the chunk_size is equal to\n that of dimensions\n '''\n while np.prod(unit_chunks) * dtype_byte_size <= max_chunk_mem:\n '''\n Check if all chunk dimensions are greater or equal to the\n actual dimensions. Exit the loop if true.\n '''\n if np.all(unit_chunks >= dimensions):\n break\n\n '''\n Find the index of the next chunk to be increased and increment it by the base_chunk\n size\n '''\n ichunk = np.argmax(dimensions / unit_chunks)\n unit_chunks[ichunk] += base_chunks[ichunk]\n\n '''\n Ensure that the size of the chunks is between one and the dimension size.\n '''\n unit_chunks = np.clip(unit_chunks, np.ones_like(unit_chunks), dimensions)\n\n chunking = tuple(unit_chunks)\n\n return chunking\n\ndef write_dset_to_txt(pdRaw, output_file='output.csv'):\n \"\"\"\n Output a PycroDataset in csv format\n\n Parameters\n ----------\n pdRaw : PycroDataset\n the PycroDataset that will be exported as a csv\n output_file : str, optional\n path that the output file should be written to\n \n Returns\n -------\n output_file: str\n \n \"\"\"\n from .pycro_data import PycroDataset\n if not isinstance(pdRaw, PycroDataset):\n raise TypeError('pdRaw should be a PycroDataset')\n\n specVals = pdRaw.h5_spec_vals\n posVals = pdRaw.h5_pos_vals\n dimUnits = pdRaw.spec_dim_descriptors\n pdPosDims = pdRaw.pos_dim_labels\n pdSpecDims = pdRaw.spec_dim_labels\n \n header = ''\n for idx, spec in enumerate(specVals):\n\n \"\"\"\n Obtain the units from the spectral dimension descriptors then\n create each line of the header with a spacer between the dimensions and the data\n \"\"\"\n unitStart = dimUnits[idx].find('(') + 1\n unitEnd = dimUnits[idx].find(')')\n unit = dimUnits[idx][unitStart:unitEnd]\n header = header + ','.join(str(freq) + ' ' + unit for freq in spec) + '\\n'\n header = header + ','.join('--------------------------------------------------------------' for idx in specVals[0])\n \n \"\"\"\n Create the spectral and position labels for the dataset in string form then\n create the position value array in string form, right-strip the last comma from the \n string to deliver the correct number of values, append all of the labels and values together,\n save the data and header to a temporary csv output\n \"\"\"\n specLabel = ''\n for dim in pdSpecDims:\n specLabel = specLabel + ','.join('' for idx in pdPosDims) + str(dim) + ',\\n'\n \n posLabel = ','.join(posL for posL in pdPosDims) + ',\\n' \n \n posValOut = ''\n for val, posDim in enumerate(posVals):\n posValOut = posValOut + ','.join(str(posVal) for posVal in posVals[val]) + ',\\n'\n posValOut = posValOut.rstrip('\\n')\n output = specLabel + posLabel + posValOut\n np.savetxt('temp.csv', pdRaw, delimiter=',', header=header, comments='')\n\n left_dset = output.splitlines()\n with open('temp.csv', 'r+') as f, open(output_file, 'w') as b:\n for left_line, right_line in zip(left_dset, f):\n right_line = left_line + right_line\n b.write(right_line)\n os.remove('temp.csv')\n return output_file\n" ]
[ [ "numpy.isin", "numpy.array", "scipy.optimize.__dict__.keys", "numpy.floor" ], [ "numpy.ones_like", "numpy.asarray", "numpy.arange", "numpy.all", "numpy.argmax", "numpy.zeros_like", "numpy.prod", "numpy.savetxt", "numpy.argsort", "numpy.repeat", "numpy.array", "numpy.zeros", "numpy.where" ] ]
Maxew42/Trashedy
[ "e7e43f172ef4a039e134cac26980f59fede24423" ]
[ "web-site/server/helpers/coco_eval.py" ]
[ "import json\nimport tempfile\n\nimport numpy as np\nimport copy\nimport time\nimport torch\nimport torch._six\n\nfrom pycocotools.cocoeval import COCOeval\nfrom pycocotools.coco import COCO\nimport pycocotools.mask as mask_util\n\nfrom collections import defaultdict\n\nimport helpers.utils as utils\n\n\nclass CocoEvaluator(object):\n def __init__(self, coco_gt, iou_types):\n assert isinstance(iou_types, (list, tuple))\n coco_gt = copy.deepcopy(coco_gt)\n self.coco_gt = coco_gt\n\n self.iou_types = iou_types\n self.coco_eval = {}\n for iou_type in iou_types:\n self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)\n\n self.img_ids = []\n self.eval_imgs = {k: [] for k in iou_types}\n\n def update(self, predictions):\n img_ids = list(np.unique(list(predictions.keys())))\n self.img_ids.extend(img_ids)\n\n for iou_type in self.iou_types:\n results = self.prepare(predictions, iou_type)\n coco_dt = loadRes(self.coco_gt, results) if results else COCO()\n coco_eval = self.coco_eval[iou_type]\n\n coco_eval.cocoDt = coco_dt\n coco_eval.params.imgIds = list(img_ids)\n img_ids, eval_imgs = evaluate(coco_eval)\n\n self.eval_imgs[iou_type].append(eval_imgs)\n\n def synchronize_between_processes(self):\n for iou_type in self.iou_types:\n self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)\n create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])\n\n def accumulate(self):\n for coco_eval in self.coco_eval.values():\n coco_eval.accumulate()\n\n def summarize(self):\n for iou_type, coco_eval in self.coco_eval.items():\n print(\"IoU metric: {}\".format(iou_type))\n coco_eval.summarize()\n\n def prepare(self, predictions, iou_type):\n if iou_type == \"bbox\":\n return self.prepare_for_coco_detection(predictions)\n elif iou_type == \"segm\":\n return self.prepare_for_coco_segmentation(predictions)\n elif iou_type == \"keypoints\":\n return self.prepare_for_coco_keypoint(predictions)\n else:\n raise ValueError(\"Unknown iou type {}\".format(iou_type))\n\n def prepare_for_coco_detection(self, predictions):\n coco_results = []\n for original_id, prediction in predictions.items():\n if len(prediction) == 0:\n continue\n\n boxes = prediction[\"boxes\"]\n boxes = convert_to_xywh(boxes).tolist()\n scores = prediction[\"scores\"].tolist()\n labels = prediction[\"labels\"].tolist()\n\n coco_results.extend(\n [\n {\n \"image_id\": original_id,\n \"category_id\": labels[k],\n \"bbox\": box,\n \"score\": scores[k],\n }\n for k, box in enumerate(boxes)\n ]\n )\n return coco_results\n\n def prepare_for_coco_segmentation(self, predictions):\n coco_results = []\n for original_id, prediction in predictions.items():\n if len(prediction) == 0:\n continue\n\n scores = prediction[\"scores\"]\n labels = prediction[\"labels\"]\n masks = prediction[\"masks\"]\n\n masks = masks > 0.5\n\n scores = prediction[\"scores\"].tolist()\n labels = prediction[\"labels\"].tolist()\n\n rles = [\n mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order=\"F\"))[0]\n for mask in masks\n ]\n for rle in rles:\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\")\n\n coco_results.extend(\n [\n {\n \"image_id\": original_id,\n \"category_id\": labels[k],\n \"segmentation\": rle,\n \"score\": scores[k],\n }\n for k, rle in enumerate(rles)\n ]\n )\n return coco_results\n\n def prepare_for_coco_keypoint(self, predictions):\n coco_results = []\n for original_id, prediction in predictions.items():\n if len(prediction) == 0:\n continue\n\n boxes = prediction[\"boxes\"]\n boxes = convert_to_xywh(boxes).tolist()\n scores = prediction[\"scores\"].tolist()\n labels = prediction[\"labels\"].tolist()\n keypoints = prediction[\"keypoints\"]\n keypoints = keypoints.flatten(start_dim=1).tolist()\n\n coco_results.extend(\n [\n {\n \"image_id\": original_id,\n \"category_id\": labels[k],\n 'keypoints': keypoint,\n \"score\": scores[k],\n }\n for k, keypoint in enumerate(keypoints)\n ]\n )\n return coco_results\n\n\ndef convert_to_xywh(boxes):\n xmin, ymin, xmax, ymax = boxes.unbind(1)\n return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)\n\n\ndef merge(img_ids, eval_imgs):\n all_img_ids = utils.all_gather(img_ids)\n all_eval_imgs = utils.all_gather(eval_imgs)\n\n merged_img_ids = []\n for p in all_img_ids:\n merged_img_ids.extend(p)\n\n merged_eval_imgs = []\n for p in all_eval_imgs:\n merged_eval_imgs.append(p)\n\n merged_img_ids = np.array(merged_img_ids)\n merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)\n\n # keep only unique (and in sorted order) images\n merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)\n merged_eval_imgs = merged_eval_imgs[..., idx]\n\n return merged_img_ids, merged_eval_imgs\n\n\ndef create_common_coco_eval(coco_eval, img_ids, eval_imgs):\n img_ids, eval_imgs = merge(img_ids, eval_imgs)\n img_ids = list(img_ids)\n eval_imgs = list(eval_imgs.flatten())\n\n coco_eval.evalImgs = eval_imgs\n coco_eval.params.imgIds = img_ids\n coco_eval._paramsEval = copy.deepcopy(coco_eval.params)\n\n\n#################################################################\n# From pycocotools, just removed the prints and fixed\n# a Python3 bug about unicode not defined\n#################################################################\n\n# Ideally, pycocotools wouldn't have hard-coded prints\n# so that we could avoid copy-pasting those two functions\n\ndef createIndex(self):\n # create index\n # print('creating index...')\n anns, cats, imgs = {}, {}, {}\n imgToAnns, catToImgs = defaultdict(list), defaultdict(list)\n if 'annotations' in self.dataset:\n for ann in self.dataset['annotations']:\n imgToAnns[ann['image_id']].append(ann)\n anns[ann['id']] = ann\n\n if 'images' in self.dataset:\n for img in self.dataset['images']:\n imgs[img['id']] = img\n\n if 'categories' in self.dataset:\n for cat in self.dataset['categories']:\n cats[cat['id']] = cat\n\n if 'annotations' in self.dataset and 'categories' in self.dataset:\n for ann in self.dataset['annotations']:\n catToImgs[ann['category_id']].append(ann['image_id'])\n\n # print('index created!')\n\n # create class members\n self.anns = anns\n self.imgToAnns = imgToAnns\n self.catToImgs = catToImgs\n self.imgs = imgs\n self.cats = cats\n\n\nmaskUtils = mask_util\n\n\ndef loadRes(self, resFile):\n \"\"\"\n Load result file and return a result api object.\n Args:\n self (obj): coco object with ground truth annotations\n resFile (str): file name of result file\n Returns:\n res (obj): result api object\n \"\"\"\n res = COCO()\n res.dataset['images'] = [img for img in self.dataset['images']]\n\n # print('Loading and preparing results...')\n # tic = time.time()\n if isinstance(resFile, torch._six.string_classes):\n anns = json.load(open(resFile))\n elif type(resFile) == np.ndarray:\n anns = self.loadNumpyAnnotations(resFile)\n else:\n anns = resFile\n assert type(anns) == list, 'results in not an array of objects'\n annsImgIds = [ann['image_id'] for ann in anns]\n assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \\\n 'Results do not correspond to current coco set'\n if 'caption' in anns[0]:\n imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])\n res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]\n for id, ann in enumerate(anns):\n ann['id'] = id + 1\n elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n bb = ann['bbox']\n x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]\n if 'segmentation' not in ann:\n ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]\n ann['area'] = bb[2] * bb[3]\n ann['id'] = id + 1\n ann['iscrowd'] = 0\n elif 'segmentation' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n # now only support compressed RLE format as segmentation results\n ann['area'] = maskUtils.area(ann['segmentation'])\n if 'bbox' not in ann:\n ann['bbox'] = maskUtils.toBbox(ann['segmentation'])\n ann['id'] = id + 1\n ann['iscrowd'] = 0\n elif 'keypoints' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n s = ann['keypoints']\n x = s[0::3]\n y = s[1::3]\n x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y)\n ann['area'] = (x2 - x1) * (y2 - y1)\n ann['id'] = id + 1\n ann['bbox'] = [x1, y1, x2 - x1, y2 - y1]\n # print('DONE (t={:0.2f}s)'.format(time.time()- tic))\n\n res.dataset['annotations'] = anns\n createIndex(res)\n return res\n\n\ndef evaluate(self):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalImgs\n :return: None\n '''\n # tic = time.time()\n # print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if p.useSegm is not None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n # print('Evaluate annotation type *{}*'.format(p.iouType))\n p.imgIds = list(np.unique(p.imgIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params = p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n if p.iouType == 'segm' or p.iouType == 'bbox':\n computeIoU = self.computeIoU\n elif p.iouType == 'keypoints':\n computeIoU = self.computeOks\n self.ious = {\n (imgId, catId): computeIoU(imgId, catId)\n for imgId in p.imgIds\n for catId in catIds}\n\n evaluateImg = self.evaluateImg\n maxDet = p.maxDets[-1]\n evalImgs = [\n evaluateImg(imgId, catId, areaRng, maxDet)\n for catId in catIds\n for areaRng in p.areaRng\n for imgId in p.imgIds\n ]\n # this is NOT in the pycocotools code, but could be done outside\n evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))\n self._paramsEval = copy.deepcopy(self.params)\n # toc = time.time()\n # print('DONE (t={:0.2f}s).'.format(toc-tic))\n return p.imgIds, evalImgs\n" ]
[ [ "numpy.unique", "numpy.asarray", "numpy.min", "numpy.concatenate", "numpy.max", "torch.stack", "numpy.array" ] ]
Sinovel/QUANTAXIS
[ "97f1ea2140f58c92ff5c84b851886d9eda1f9ac3", "97f1ea2140f58c92ff5c84b851886d9eda1f9ac3", "97f1ea2140f58c92ff5c84b851886d9eda1f9ac3" ]
[ "QUANTAXIS/QAUtil/QATransform.py", "QUANTAXIS/QAARP/QAUser.py", "QUANTAXIS/QAFetch/QATdx_adv.py" ]
[ "# coding:utf-8\n#\n# The MIT License (MIT)\n#\n# Copyright (c) 2016-2019 yutiansut/QUANTAXIS\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport csv\nimport json\n\nimport numpy as np\nimport pandas as pd\n\n\ndef QA_util_to_json_from_pandas(data):\n \"\"\"\n explanation:\n 将pandas数据转换成json格式\t\t\n\n params:\n * data ->:\n meaning: pandas数据\n type: null\n optional: [null]\n\n return:\n dict\n\n demonstrate:\n Not described\n\n output:\n Not described\n \"\"\"\n\n \"\"\"需要对于datetime 和date 进行转换, 以免直接被变成了时间戳\"\"\"\n if 'datetime' in data.columns:\n data.datetime = data.datetime.apply(str)\n if 'date' in data.columns:\n data.date = data.date.apply(str)\n return json.loads(data.to_json(orient='records'))\n\n\ndef QA_util_to_json_from_numpy(data):\n pass\n\n\ndef QA_util_to_json_from_list(data):\n pass\n\n\ndef QA_util_to_list_from_pandas(data):\n \"\"\"\n explanation:\n 将pandas数据转换成列表\t\t\n\n params:\n * data ->:\n meaning: pandas数据\n type: null\n optional: [null]\n\n return:\n list\n\n demonstrate:\n Not described\n\n output:\n Not described\n \"\"\"\n\n return np.asarray(data).tolist()\n\n\ndef QA_util_to_list_from_numpy(data):\n \"\"\"\n explanation:\n 将numpy数据转换为列表\t\t\n\n params:\n * data ->:\n meaning: numpy数据\n type: null\n optional: [null]\n\n return:\n None\n\n demonstrate:\n Not described\n\n output:\n Not described\n \"\"\"\n\n return data.tolist()\n\n\ndef QA_util_to_pandas_from_json(data):\n \"\"\"\n explanation:\n 将json数据载入为pandas数据\t\t\n\n params:\n * data ->:\n meaning: json数据\n type: null\n optional: [null]\n\n return:\n DataFrame\n\n demonstrate:\n Not described\n\n output:\n Not described\n \"\"\"\n if isinstance(data, dict):\n return pd.DataFrame(data=[data, ])\n else:\n return pd.DataFrame(data=[{'value': data}])\n\n\ndef QA_util_to_pandas_from_list(data):\n \"\"\"\n explanation:\n 将列表数据转换为pandas\t\n\n params:\n * data ->:\n meaning: 列表数据\n type: list\n optional: [null]\n\n return:\n DataFrame\n\n demonstrate:\n Not described\n\n output:\n Not described\n \"\"\"\n\n if isinstance(data, list):\n return pd.DataFrame(data=data)\n", "# coding:utf-8\n#\n# The MIT License (MIT)\n#\n# Copyright (c) 2016-2019 yutiansut/QUANTAXIS\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport pandas as pd\nimport datetime\nimport uuid\nfrom pymongo import ASCENDING, DESCENDING\nfrom QUANTAXIS.QAARP.QAPortfolio import QA_Portfolio\nfrom QUANTAXIS.QAUtil.QALogs import QA_util_log_info\nfrom QUANTAXIS.QAUtil.QARandom import QA_util_random_with_topic\nfrom QUANTAXIS.QAUtil.QASetting import QA_Setting, DATABASE\nfrom QUANTAXIS.QAUtil.QADate_trade import QA_util_get_next_day, QA_util_get_real_date\nfrom QUANTAXIS.QAUtil.QAParameter import MARKET_TYPE, FREQUENCE\n\n\nclass QA_User():\n \"\"\"QA_User \n User-->Portfolio-->Account/Strategy\n\n\n\n user ==> username / user_cookie\n ||\n portfolio ==> portfolio_cookie\n ||\n accounts ==> account_cookie\n\n :::::::::::::::::::::::::::::::::::::::::::::::::\n :: :: Portfolio 1 -- Account/Strategy 1 ::\n :: USER :: -- Account/Strategy 2 ::\n :: :: Portfolio 2 -- Account/Strategy 3 ::\n :::::::::::::::::::::::::::::::::::::::::::::::::\n\n :: 需要增加对于QA_USER的支持\n\n USER作为一个单位实体, 可以自由创建 组合Portfolio (需要被记录),修改 组合Portfolio\n\n @yutiansut \n 2018/05/08\n\n @jerryw 添加注释,和 🛠todo list\n 2018/05/16\n\n @royburns 1.根据指定的user_cookie创建user; 2.添加对应的测试代码; 3.添加注释\n 2018/05/18\n \"\"\"\n\n def __init__(\n self,\n user_cookie=None,\n username='defalut',\n phone='defalut',\n level='l1',\n utype='guests',\n password='default',\n coins=10000,\n wechat_id=None,\n money=0,\n *args,\n **kwargs\n ):\n \"\"\"[summary]\n\n Keyword Arguments:\n user_cookie {[type]} -- [description] (default: {None}) 随机初始化 user_cookie 的值 Acc+4数字id+4位大小写随机\n username {str} -- [description] (default: {'defalut'})\n phone {str} -- [description] (default: {'defalut'})\n level {str} -- [description] (default: {'l1'})\n utype {str} -- [description] (default: {'guests'})\n password {str} -- [description] (default: {'default'})\n coins {int} -- [description] (default: {10000})\n\n 关于积分系统:\n\n 积分系统用于订阅策略, 取消订阅策略是不会增加积分的\n\n \"\"\"\n\n #self.setting = QA_Setting()\n self.client = DATABASE.user\n\n ## user_cookie/ username / wechat_id\n self.client.create_index(\n [\n (\"user_cookie\",\n ASCENDING),\n (\"username\",\n ASCENDING),\n (\"wechat_id\",\n ASCENDING)\n ],\n unique=True\n )\n self.portfolio_list = []\n\n # ==============================\n self.phone = phone\n self.level = level\n self.utype = utype\n self.password = password\n self.username = username\n self.wechat_id = wechat_id\n\n if wechat_id is not None:\n\n if self.username == 'default':\n \"\"\"基于web的初始化\n \"\"\"\n\n self.username = wechat_id\n self.password = 'admin'\n else:\n \"\"\"\n 另一种 无 WECHATID 的模式, 适合本地python的调试\n @yutiansut\n \"\"\"\n if self.username == 'default':\n \"\"\"基于web的初始化\n \"\"\"\n\n self.username = 'admin'\n self.password = 'admin'\n\n self.user_cookie = QA_util_random_with_topic(\n 'USER'\n ) if user_cookie is None else user_cookie\n self.coins = coins # 积分\n self.money = money # 钱\n\n # ==============================\n self._subscribed_strategy = {}\n\n \"\"\"\n self._subscribed_code: {\n 'stock_cn': {\n '000001': ['1min','5min'],\n '600010': ['tick']\n },\n 'future_cn': {\n 'rb1910.SHFE':['tick','60min'],\n 'IF1909.IFFEX':['tick','1min']\n },\n 'index_cn': {\n '000300': ['1min']\n }\n }\n\n \"\"\"\n self._subscribed_code = {\n MARKET_TYPE.STOCK_CN: [],\n MARKET_TYPE.FUTURE_CN: [],\n MARKET_TYPE.INDEX_CN: [],\n MARKET_TYPE.OPTION_CN: []\n }\n self._signals = [] # 预期收到的信号\n self._cash = []\n self._history = []\n\n # ===============================\n\n self.coins_history = []\n self.coins_history_headers = [\n 'cost_coins',\n 'strategy_id',\n 'start',\n 'last',\n 'strategy_uuid',\n 'event'\n ]\n self.sync()\n\n def __repr__(self):\n return '< QA_USER {} with {} portfolio: {} >'.format(\n self.user_cookie,\n len(self.portfolio_list),\n self.portfolio_list\n )\n\n def __getitem__(self, portfolio_cookie: str):\n \"\"\"获取user下的portfolio\n\n Arguments:\n portfolio_cookie {str} -- [description]\n\n Returns:\n [type] -- [description]\n \"\"\"\n\n try:\n return self.get_portfolio(portfolio_cookie)\n except:\n return None\n\n @property\n def table(self):\n return pd.concat(\n [self.get_portfolio(po).table for po in self.portfolio_list],\n axis=1\n )\n\n def add_coins(self, coins):\n \"\"\"积分充值\n Arguments:\n coins {[type]} -- [description]\n \"\"\"\n\n self.coins += int(coins)\n\n @property\n def coins_table(self):\n return pd.DataFrame(\n self.coins_history,\n columns=self.coins_history_headers\n )\n\n def subscribe_strategy(\n self,\n strategy_id: str,\n last: int,\n today=datetime.date.today(),\n cost_coins=10\n ):\n \"\"\"订阅一个策略\n\n 会扣减你的积分\n\n Arguments:\n strategy_id {str} -- [description]\n last {int} -- [description]\n\n Keyword Arguments:\n today {[type]} -- [description] (default: {datetime.date.today()})\n cost_coins {int} -- [description] (default: {10})\n \"\"\"\n\n if self.coins > cost_coins:\n order_id = str(uuid.uuid1())\n self._subscribed_strategy[strategy_id] = {\n 'lasttime':\n last,\n 'start':\n str(today),\n 'strategy_id':\n strategy_id,\n 'end':\n QA_util_get_next_day(\n QA_util_get_real_date(str(today),\n towards=1),\n last\n ),\n 'status':\n 'running',\n 'uuid':\n order_id\n }\n self.coins -= cost_coins\n self.coins_history.append(\n [\n cost_coins,\n strategy_id,\n str(today),\n last,\n order_id,\n 'subscribe'\n ]\n )\n return True, order_id\n else:\n # return QAERROR.\n return False, 'Not Enough Coins'\n\n def unsubscribe_stratgy(self, strategy_id):\n \"\"\"取消订阅某一个策略\n\n Arguments:\n strategy_id {[type]} -- [description]\n \"\"\"\n\n today = datetime.date.today()\n order_id = str(uuid.uuid1())\n if strategy_id in self._subscribed_strategy.keys():\n self._subscribed_strategy[strategy_id]['status'] = 'canceled'\n\n self.coins_history.append(\n [0,\n strategy_id,\n str(today),\n 0,\n order_id,\n 'unsubscribe']\n )\n\n @property\n def subscribed_strategy(self):\n \"\"\"历史(包含正在订阅的)策略\n\n Returns:\n [type] -- [description]\n \"\"\"\n\n return pd.DataFrame(list(self._subscribed_strategy.values()))\n\n @property\n def subscribing_strategy(self):\n \"\"\"订阅一个策略\n\n Returns:\n [type] -- [description]\n \"\"\"\n\n res = self.subscribed_strategy.assign(\n remains=self.subscribed_strategy.end.apply(\n lambda x: pd.Timestamp(x) - pd.Timestamp(datetime.date.today())\n )\n )\n #res['left'] = res['end_time']\n # res['remains']\n res.assign(\n status=res['remains'].apply(\n lambda x: 'running'\n if x > datetime.timedelta(days=0) else 'timeout'\n )\n )\n return res.query('status==\"running\"')\n\n def change_wechatid(self, id):\n \"\"\"修改wechat\n\n Arguments:\n id {[type]} -- [description]\n \"\"\"\n\n self.wechat_id = id\n\n def sub_code(self, code, market_type=MARKET_TYPE.STOCK_CN):\n \"\"\"订阅某个品种\n \"\"\"\n if code not in self._subscribed_code[market_type]:\n self._subscribed_code[market_type].append(code)\n\n def unsub_code(self, code, market_type=MARKET_TYPE.STOCK_CN):\n \"\"\"取消订阅品种\n\n Arguments:\n code {[type]} -- [description]\n \"\"\"\n try:\n self._subscribed_code[market_type].remove(code)\n except:\n pass\n\n @property\n def subscribed_code(self):\n \"\"\"\n 关注的品种\n Returns:\n [type] -- [description]\n \"\"\"\n\n return self._subscribed_code\n\n def new_portfolio(self, portfolio_cookie=None):\n '''\n 根据 self.user_cookie 创建一个 portfolio\n :return:\n 如果存在 返回 新建的 QA_Portfolio\n 如果已经存在 返回 这个portfolio\n '''\n\n if portfolio_cookie not in self.portfolio_list:\n self.portfolio_list.append(portfolio_cookie)\n return QA_Portfolio(\n user_cookie=self.user_cookie,\n portfolio_cookie=portfolio_cookie\n )\n else:\n print(\n \" prortfolio with user_cookie \",\n self.user_cookie,\n \" already exist!!\"\n )\n return self.get_portfolio(portfolio_cookie)\n\n def get_account(self, portfolio_cookie: str, account_cookie: str):\n \"\"\"直接从二级目录拿到account\n\n Arguments:\n portfolio_cookie {str} -- [description]\n account_cookie {str} -- [description]\n\n Returns:\n [type] -- [description]\n \"\"\"\n # QA_Portfolio(\n # user_cookie=self.user_cookie,\n # portfolio_cookie=item\n # )\n try:\n return self.get_portfolio(portfolio_cookie).get_account(account_cookie)\n except:\n return None\n\n def get_portfolio(self, portfolio_cookie: str):\n '''\n 'get a portfolio'\n 从 portfolio_list dict字典中 根据 portfolio key 获取\n :param portfolio: QA_Portfolio类型\n :return: QA_Portfolio类型\n '''\n # return self.portfolio_list[portfolio]\n # fix here use cookie as key to find value in dict\n return QA_Portfolio(user_cookie=self.user_cookie, portfolio_cookie=portfolio_cookie)\n\n def generate_simpleaccount(self):\n \"\"\"make a simple account with a easier way\n 如果当前user中没有创建portfolio, 则创建一个portfolio,并用此portfolio创建一个account\n 如果已有一个或多个portfolio,则使用第一个portfolio来创建一个account\n \"\"\"\n if len(self.portfolio_list) < 1:\n po = self.new_portfolio()\n else:\n po = self.get_portfolio(self.portfolio_list[0])\n ac = po.new_account()\n return ac, po\n\n def register_account(self, account, portfolio_cookie=None):\n '''\n 注册一个account到portfolio组合中\n account 也可以是一个策略类,实现其 on_bar 方法\n :param account: 被注册的account\n :return:\n '''\n # 查找 portfolio\n if len(self.portfolio_list) < 1:\n po = self.new_portfolio()\n elif portfolio_cookie is not None:\n po = self.get_portfolio(portfolio_cookie)\n else:\n po = self.get_portfolio(self.portfolio_list[0])\n # 把account 添加到 portfolio中去\n po.add_account(account)\n return (po, account)\n\n @property\n def message(self):\n return {\n 'user_cookie': self.user_cookie,\n 'username': self.username,\n 'password': self.password,\n 'wechat_id': self.wechat_id,\n 'phone': self.phone,\n 'level': self.level,\n 'utype': self.utype,\n 'coins': self.coins,\n 'coins_history': self.coins_history,\n 'money': self.money,\n 'subuscribed_strategy': self._subscribed_strategy,\n 'subscribed_code': self.subscribed_code,\n 'portfolio_list': self.portfolio_list,\n 'lastupdatetime': str(datetime.datetime.now())\n }\n\n def save(self):\n \"\"\"\n 将QA_USER的信息存入数据库\n\n ATTENTION:\n\n 在save user的时候, 需要同时调用 user/portfolio/account链条上所有的实例化类 同时save\n\n \"\"\"\n if self.wechat_id is not None:\n self.client.update(\n {'wechat_id': self.wechat_id},\n {'$set': self.message},\n upsert=True\n )\n else:\n self.client.update(\n {\n 'username': self.username,\n 'password': self.password\n },\n {'$set': self.message},\n upsert=True\n )\n\n # user ==> portfolio 的存储\n # account的存储在 portfolio.save ==> account.save 中\n # for portfolio in list(self.portfolio_list.values()):\n # portfolio.save()\n\n def sync(self):\n \"\"\"基于账户/密码去sync数据库\n \"\"\"\n if self.wechat_id is not None:\n\n res = self.client.find_one({'wechat_id': self.wechat_id})\n else:\n res = self.client.find_one(\n {\n 'username': self.username,\n 'password': self.password\n }\n )\n if res is None:\n\n if self.client.find_one({'username': self.username}) is None:\n self.client.insert_one(self.message)\n return self\n else:\n raise RuntimeError('账户名已存在且账户密码不匹配')\n\n else:\n self.reload(res)\n\n return self\n\n # @property\n # def node_view(self):\n\n # links = [\n # {\n # 'source': self.username,\n # 'target': item\n # } for item in self.portfolio_list.keys()\n # ]\n # data = [{'name': self.username, 'symbolSize': 100, 'value': 1}]\n # for port in self.portfolio_list.values():\n # links.extend(port.node_view['links'])\n # data.append(\n # {\n # 'name': port.portfolio_cookie,\n # 'symbolSize': 80,\n # 'value': 2\n # }\n # )\n # for acc in port.accounts.values():\n # data.append(\n # {\n # 'name': acc.account_cookie,\n # 'symbolSize': 50,\n # 'value': 3\n # }\n # )\n\n # return {\n # 'node_name':\n # self.username,\n # 'sub_node':\n # [portfolio.node_view for portfolio in self.portfolio_list.values()],\n # 'links':\n # links,\n # 'data':\n # data\n # }\n\n def reload(self, message):\n \"\"\"恢复方法\n\n Arguments:\n message {[type]} -- [description]\n \"\"\"\n\n self.phone = message.get('phone')\n self.level = message.get('level')\n self.utype = message.get('utype')\n self.coins = message.get('coins')\n self.wechat_id = message.get('wechat_id')\n self.coins_history = message.get('coins_history')\n self.money = message.get('money')\n self._subscribed_strategy = message.get('subuscribed_strategy')\n subscribed_code = message.get('subscribed_code')\n if isinstance(subscribed_code, list):\n pass\n else:\n self._subscribed_code = subscribed_code\n self.username = message.get('username')\n self.password = message.get('password')\n self.user_cookie = message.get('user_cookie')\n #\n self.portfolio_list = list(set([\n item['portfolio_cookie'] for item in DATABASE.portfolio.find(\n {'user_cookie': self.user_cookie},\n {\n 'portfolio_cookie': 1,\n '_id': 0\n }\n )\n ]))\n\n # portfolio_list = message.get('portfolio_list')\n # if len(portfolio_list) > 0:\n # self.portfolio_list = dict(\n # zip(\n # portfolio_list,\n # [\n # QA_Portfolio(\n # user_cookie=self.user_cookie,\n # portfolio_cookie=item\n # ) for item in portfolio_list\n # ]\n # )\n # )\n # else:\n # self.portfolio_list = {}\n\n\nif __name__ == '__main__':\n\n # 测试不对\n user = QA_User(user_cookie='user_admin')\n folio = user.new_portfolio('folio_admin')\n ac1 = user.get_portfolio(folio).new_account('account_admin')\n\n print(user)\n print(user.get_portfolio(folio))\n print(user.get_portfolio(folio).get_account(ac1))\n", "# coding:utf-8\n#\n# The MIT License (MIT)\n#\n# Copyright (c) 2016-2019 yutiansut/QUANTAXIS\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\nimport datetime\nimport queue\nimport time\nimport click\nfrom concurrent.futures import ThreadPoolExecutor\nfrom threading import Thread, Timer\n\nimport pandas as pd\nfrom pytdx.hq import TdxHq_API\n\nfrom QUANTAXIS.QAEngine.QAThreadEngine import QA_Thread\nfrom QUANTAXIS.QAUtil.QADate_trade import QA_util_if_tradetime\nfrom QUANTAXIS.QAUtil.QASetting import DATABASE, stock_ip_list\nfrom QUANTAXIS.QAUtil.QASql import QA_util_sql_mongo_sort_ASCENDING\nfrom QUANTAXIS.QAUtil.QATransform import QA_util_to_json_from_pandas\n\n\n\"\"\"\n准备做一个多连接的连接池执行器Executor\n当持续获取数据/批量数据的时候,可以减小服务器的压力,并且可以更快的进行并行处理\n\"\"\"\n\n\nclass QA_Tdx_Executor(QA_Thread):\n def __init__(self, thread_num=2, timeout=1, sleep_time=1, *args, **kwargs):\n super().__init__(name='QATdxExecutor')\n self.thread_num = thread_num\n self._queue = queue.Queue(maxsize=200)\n self.api_no_connection = TdxHq_API()\n self._api_worker = Thread(\n target=self.api_worker, args=(), name='API Worker')\n self._api_worker.start()\n self.timeout = timeout\n self.executor = ThreadPoolExecutor(self.thread_num)\n self.sleep_time = sleep_time\n\n def __getattr__(self, item):\n try:\n api = self.get_available()\n func = api.__getattribute__(item)\n\n def wrapper(*args, **kwargs):\n res = self.executor.submit(func, *args, **kwargs)\n self._queue.put(api)\n return res\n return wrapper\n except:\n return self.__getattr__(item)\n\n def _queue_clean(self):\n self._queue = queue.Queue(maxsize=200)\n\n def _test_speed(self, ip, port=7709):\n\n api = TdxHq_API(raise_exception=True, auto_retry=False)\n _time = datetime.datetime.now()\n # print(self.timeout)\n try:\n with api.connect(ip, port, time_out=1):\n res = api.get_security_list(0, 1)\n # print(res)\n # print(len(res))\n if len(api.get_security_list(0, 1)) > 800:\n return (datetime.datetime.now() - _time).total_seconds()\n else:\n return datetime.timedelta(9, 9, 0).total_seconds()\n except Exception as e:\n return datetime.timedelta(9, 9, 0).total_seconds()\n\n def get_market(self, code):\n code = str(code)\n if code[0] in ['5', '6', '9'] or code[:3] in [\"009\", \"126\", \"110\", \"201\", \"202\", \"203\", \"204\"]:\n return 1\n return 0\n\n def get_frequence(self, frequence):\n if frequence in ['day', 'd', 'D', 'DAY', 'Day']:\n frequence = 9\n elif frequence in ['w', 'W', 'Week', 'week']:\n frequence = 5\n elif frequence in ['month', 'M', 'm', 'Month']:\n frequence = 6\n elif frequence in ['Q', 'Quarter', 'q']:\n frequence = 10\n elif frequence in ['y', 'Y', 'year', 'Year']:\n frequence = 11\n elif str(frequence) in ['5', '5m', '5min', 'five']:\n frequence = 0\n elif str(frequence) in ['1', '1m', '1min', 'one']:\n frequence = 8\n elif str(frequence) in ['15', '15m', '15min', 'fifteen']:\n frequence = 1\n elif str(frequence) in ['30', '30m', '30min', 'half']:\n frequence = 2\n elif str(frequence) in ['60', '60m', '60min', '1h']:\n frequence = 3\n\n return frequence\n\n @property\n def ipsize(self):\n return len(self._queue.qsize())\n\n @property\n def api(self):\n return self.get_available()\n\n def get_available(self):\n\n if self._queue.empty() is False:\n return self._queue.get_nowait()\n else:\n Timer(0, self.api_worker).start()\n return self._queue.get()\n\n def api_worker(self):\n data = []\n if self._queue.qsize() < 80:\n for item in stock_ip_list:\n if self._queue.full():\n break\n _sec = self._test_speed(ip=item['ip'], port=item['port'])\n if _sec < self.timeout*3:\n try:\n self._queue.put(TdxHq_API(heartbeat=False).connect(\n ip=item['ip'], port=item['port'], time_out=self.timeout*2))\n except:\n pass\n else:\n self._queue_clean()\n Timer(0, self.api_worker).start()\n Timer(300, self.api_worker).start()\n\n def _singal_job(self, context, id_, time_out=0.7):\n try:\n _api = self.get_available()\n\n __data = context.append(self.api_no_connection.to_df(_api.get_security_quotes(\n [(self._select_market_code(x), x) for x in code[80 * id_:80 * (id_ + 1)]])))\n __data['datetime'] = datetime.datetime.now()\n self._queue.put(_api) # 加入注销\n return __data\n except:\n return self.singal_job(context, id_)\n\n def get_realtime(self, code):\n context = pd.DataFrame()\n\n code = [code] if isinstance(code, str) is str else code\n try:\n for id_ in range(int(len(code) / 80) + 1):\n context = self._singal_job(context, id_)\n\n data = context[['datetime', 'last_close', 'code', 'open', 'high', 'low', 'price', 'cur_vol',\n 's_vol', 'b_vol', 'vol', 'ask1', 'ask_vol1', 'bid1', 'bid_vol1', 'ask2', 'ask_vol2',\n 'bid2', 'bid_vol2', 'ask3', 'ask_vol3', 'bid3', 'bid_vol3', 'ask4',\n 'ask_vol4', 'bid4', 'bid_vol4', 'ask5', 'ask_vol5', 'bid5', 'bid_vol5']]\n data['datetime'] = data['datetime'].apply(lambda x: str(x))\n return data.set_index('code', drop=False, inplace=False)\n except:\n return None\n\n def get_realtime_concurrent(self, code):\n code = [code] if isinstance(code, str) is str else code\n\n try:\n data = {self.get_security_quotes([(self.get_market(\n x), x) for x in code[80 * pos:80 * (pos + 1)]]) for pos in range(int(len(code) / 80) + 1)}\n return (pd.concat([self.api_no_connection.to_df(i.result()) for i in data]), datetime.datetime.now())\n except:\n pass\n\n def get_security_bar_concurrent(self, code, _type, lens):\n try:\n\n data = {self.get_security_bars(self.get_frequence(_type), self.get_market(\n str(code)), str(code), 0, lens) for code in code}\n\n return [i.result() for i in data]\n\n except:\n raise Exception\n\n def _get_security_bars(self, context, code, _type, lens):\n try:\n _api = self.get_available()\n for i in range(1, int(lens / 800) + 2):\n context.extend(_api.get_security_bars(self.get_frequence(\n _type), self.get_market(str(code)), str(code), (i - 1) * 800, 800))\n print(context)\n self._queue.put(_api)\n return context\n except Exception as e:\n return self._get_security_bars(context, code, _type, lens)\n\n def get_security_bar(self, code, _type, lens):\n code = [code] if isinstance(code, str) is str else code\n context = []\n try:\n for item in code:\n context = self._get_security_bars(context, item, _type, lens)\n return context\n except Exception as e:\n raise e\n\n def save_mongo(self, data, client=DATABASE):\n database = DATABASE.get_collection(\n 'realtime_{}'.format(datetime.date.today()))\n\n database.insert_many(QA_util_to_json_from_pandas(data))\n\n def run(self):\n\n sleep = int(self.sleep_time)\n _time1 = datetime.datetime.now()\n database = DATABASE.get_collection(\n 'realtime_{}'.format(datetime.date.today()))\n database.create_index([('code', QA_util_sql_mongo_sort_ASCENDING)])\n database.create_index([('datetime', QA_util_sql_mongo_sort_ASCENDING)])\n\n from QUANTAXIS.QAFetch.QAQuery_Advance import QA_fetch_stock_block_adv\n code = QA_fetch_stock_block_adv().code\n\n while True:\n _time = datetime.datetime.now()\n if QA_util_if_tradetime(_time): # 如果在交易时间\n data = self.get_realtime_concurrent(code)\n \n data[0]['datetime'] = data[1]\n self.save_mongo(data[0])\n\n print('Time {}'.format(\n (datetime.datetime.now() - _time).total_seconds()))\n time.sleep(sleep)\n print('Connection Pool NOW LEFT {} Available IP'.format(\n self._queue.qsize()))\n print('Program Last Time {}'.format(\n (datetime.datetime.now() - _time1).total_seconds()))\n else:\n print('Not Trading time {}'.format(_time))\n time.sleep(sleep)\n\n\ndef get_bar(timeout=1, sleep=1):\n sleep = int(sleep)\n _time1 = datetime.datetime.now()\n from QUANTAXIS.QAFetch.QAQuery_Advance import QA_fetch_stock_block_adv\n code = QA_fetch_stock_block_adv().code\n print(len(code))\n x = QA_Tdx_Executor(timeout=float(timeout))\n print(x._queue.qsize())\n print(x.get_available())\n\n while True:\n _time = datetime.datetime.now()\n if QA_util_if_tradetime(_time): # 如果在交易时间\n data = x.get_security_bar_concurrent(code, 'day', 1)\n\n print('Time {}'.format(\n (datetime.datetime.now() - _time).total_seconds()))\n time.sleep(sleep)\n print('Connection Pool NOW LEFT {} Available IP'.format(\n x._queue.qsize()))\n print('Program Last Time {}'.format(\n (datetime.datetime.now() - _time1).total_seconds()))\n\n return data\n else:\n print('Not Trading time {}'.format(_time))\n time.sleep(sleep)\n\n\ndef get_day_once():\n\n _time1 = datetime.datetime.now()\n from QUANTAXIS.QAFetch.QAQuery_Advance import QA_fetch_stock_block_adv\n code = QA_fetch_stock_block_adv().code\n x = QA_Tdx_Executor()\n return x.get_security_bar_concurrent(code, 'day', 1)\n\n\[email protected]()\[email protected]('--timeout', default=0.2, help='timeout param')\[email protected]('--sleep', default=1, help='sleep step')\ndef bat(timeout=0.2, sleep=1):\n QA_Tdx_Executor(timeout=timeout, sleep_time=sleep).start()\n\n\nif __name__ == '__main__':\n QA_Tdx_Executor().start()\n" ]
[ [ "numpy.asarray", "pandas.DataFrame" ], [ "pandas.Timestamp", "pandas.DataFrame" ], [ "pandas.DataFrame" ] ]
google/parallel_accel
[ "b58fda1c3a22f2aaa9a97337d602cd72c49ee8be" ]
[ "parallel_accel/Analysis/benchmarks/gralt/gralt_benchmarks.py" ]
[ "# Copyright 2021 The ParallelAccel Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"\nTest the speed of GRALTool on standard benchmark acyclic_graphs.\n\nThis is deprecated code and is included for reference. New benchmarks should use the\nBenchmark and BenchmarkSuite models.\n\"\"\"\n\nimport json\nimport os\nimport time\n\nimport benchmarks.acyclic_graphs.benchmark_acyclic_graphs as acyclic_graphs\nfrom benchmarks.acyclic_graphs import pbaxisum\nimport benchmarks.gralt.settings as settings\n\nimport linear_algebra\nimport tensorflow as tf\nimport grapal_tool as gralt\n\nsample_subgraph = gralt.subgraphs.Sample()\nexpectation_subgraph = gralt.subgraphs.Expectation()\nstate_subgraph = gralt.subgraphs.State()\n\n\ndef exp_and_grad_call(\n acyclic_graph_t, symbol_names_t, symbol_values_t, ops_t, num_samples_t):\n with tf.GradientTape() as g:\n g.watch(symbol_values_t)\n exp = expectation_subgraph(\n acyclic_graph_t, symbol_names=symbol_names_t, symbol_values=symbol_values_t,\n operators=ops_t)\n grad = g.gradient(exp, symbol_values_t)\n return exp, grad\n\n\ncall_dict = {\n \"samples\": lambda acyclic_graph_t, symbol_names_t, symbol_values_t, ops_t,\n num_samples_t: sample_subgraph(\n acyclic_graph_t, symbol_names=symbol_names_t, symbol_values=symbol_values_t,\n repetitions=num_samples_t),\n \"exp\": lambda acyclic_graph_t, symbol_names_t, symbol_values_t, ops_t,\n num_samples_t: expectation_subgraph(\n acyclic_graph_t, symbol_names=symbol_names_t, symbol_values=symbol_values_t,\n operators=ops_t),\n \"exp_and_grad\": exp_and_grad_call,\n \"state\": lambda acyclic_graph_t, symbol_names_t, symbol_values_t, ops_t,\n num_samples_t: state_subgraph(\n acyclic_graph_t, symbol_names=symbol_names_t, symbol_values=symbol_values_t),\n}\n\n\nget_num_samples_dict = {\n \"samples\": lambda settings_dict:\n tf.constant([settings_dict[\"num_samples\"]]),\n \"exp\": lambda settings_dict: tf.constant([0]),\n \"exp_and_grad\": lambda settings_dict: tf.constant([0]),\n \"state\": lambda settings_dict: tf.constant([0]),\n}\n\n\nget_ops_dict = {\n \"samples\": lambda discretes: tf.constant(\"\"),\n \"exp\": lambda discretes:\n gralt.convert_to_tensor([[pbaxisum.get_random_prob_basis_axis_sum(discretes)]]),\n \"exp_and_grad\": lambda discretes:\n gralt.convert_to_tensor([[pbaxisum.get_random_prob_basis_axis_sum(discretes)]]),\n \"state\": lambda discretes: tf.constant(\"\"),\n}\n\n\ndef run_gralt_benchmarks(\n min_subgraphs, max_subgraphs, skip_subgraphs, min_discretes, max_discretes, iterations,\n num_samples, rounding_digits, acyclic_graph_type, sim_type, rel_save_dir,\n save_dir_prefix=os.getcwd()):\n\n if acyclic_graph_type == \"approxopt\":\n acyclic_graph_builder = acyclic_graphs.approxopt\n elif acyclic_graph_type == \"hea\":\n acyclic_graph_builder = acyclic_graphs.hea\n else:\n raise ValueError(acyclic_graph_type + \" is not a valid type of test acyclic_graph.\")\n\n if sim_type in {\"samples\", \"exp\", \"exp_and_grad\", \"state\"}:\n call_subgraph = call_dict[sim_type]\n get_num_samples = get_num_samples_dict[sim_type]\n get_ops = get_ops_dict[sim_type]\n else:\n raise ValueError(sim_type + \" is not a valid simulation types.\")\n\n # Save settings.\n full_save_dir = os.path.join(save_dir_prefix, rel_save_dir)\n settings.set_settings(\n min_subgraphs=min_subgraphs,\n max_subgraphs=max_subgraphs,\n skip_subgraphs=skip_subgraphs,\n min_discretes=min_discretes,\n max_discretes=max_discretes,\n iterations=iterations,\n num_samples=num_samples,\n rounding_digits=rounding_digits,\n acyclic_graph_type=acyclic_graph_type,\n sim_type=sim_type,\n full_save_dir=full_save_dir\n )\n settings_dict = settings.load_settings(full_save_dir)\n\n # Run benchmarks.\n num_samples_t = get_num_samples(settings_dict)\n for q in range(settings_dict[\"min_discretes\"], settings_dict[\"max_discretes\"] + 1):\n print(f\"Current discrete size: {q}\")\n benchmarks_dict = dict()\n discretes = linear_algebra.GridSpace.rect(1, q)\n ops_t = get_ops(discretes)\n for l in range(\n settings_dict[\"min_subgraphs\"], settings_dict[\"max_subgraphs\"] + 1,\n settings_dict[\"skip_subgraphs\"]):\n print(f\"Current number of subgraphs: {l}\")\n benchmarks_dict[l] = {}\n acyclic_graph, symbols = acyclic_graph_builder(discretes, l, acyclic_graph_type)\n is_acyclic_graph_compiled = False\n symbol_names_t = tf.constant([str(s) for s in symbols])\n for r in range(settings_dict[\"iterations\"]):\n symbol_values_t = tf.random.uniform(\n [1, len(symbols)], minval=-2.0, maxval=2.0)\n start = time.time()\n if not is_acyclic_graph_compiled:\n compiled_acyclic_graph = gralt.convert_to_tensor([acyclic_graph])\n is_acyclic_graph_compiled = True\n result = call_subgraph(\n compiled_acyclic_graph, symbol_names_t, symbol_values_t,\n ops_t, num_samples_t)\n stop = time.time()\n this_runtime = round(stop - start, rounding_digits)\n if r == 0:\n # First run is special because it considers the compilation time\n benchmarks_dict[l][\"initial\"] = this_runtime\n benchmarks_dict[l][\"remaining\"] = []\n print(\"initial runtime of {} seconds\".format(this_runtime))\n else:\n print(\"subsequent runtime of {} seconds\".format(this_runtime))\n benchmarks_dict[l][\"remaining\"].append(this_runtime)\n benchmarks_dict[l][\"depth\"] = len(acyclic_graph)\n\n # Checkpoint the benchmarks after each discrete number.\n benchmarks_filename = \"benchmarks_dict_{}.json\".format(q)\n benchmarks_data_file = os.path.join(full_save_dir, benchmarks_filename)\n with open(benchmarks_data_file, 'w') as datafile:\n json.dump(benchmarks_dict, datafile)\n" ]
[ [ "tensorflow.constant", "tensorflow.GradientTape" ] ]
ChuanTianML/mxnet_word_lm
[ "231b67370712a5dccae9433858dd66800005a00f" ]
[ "data.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport os, gzip\nimport sys\nimport mxnet as mx\nimport numpy as np\n\nclass Dictionary(object):\n \"\"\"字典类\n @func add_word(word): 在字典中添加单词word\n \"\"\"\n def __init__(self):\n self.word2idx = {} #单词到id\n self.idx2word = [] #id到单词\n self.word_count = [] #统计每个单词在语料中出现的次数,index为单词id\n\n def add_word(self, word): #尝试添加一个单词\n if word not in self.word2idx:\n self.idx2word.append(word)\n self.word2idx[word] = len(self.idx2word) - 1\n self.word_count.append(0)\n index = self.word2idx[word]\n self.word_count[index] += 1\n return index\n\n def __len__(self):\n return len(self.idx2word)\n\nclass Corpus(object):\n def __init__(self, path):\n \"\"\"\n @param path: 数据所在目录\n \"\"\"\n self.dictionary = Dictionary() #构造字典实例,准备根据语料构造字典\n self.train = self.tokenize(path + 'train.txt') #tokenize train/valid/test语料,同时获得字典\n self.valid = self.tokenize(path + 'valid.txt')\n self.test = self.tokenize(path + 'test.txt')\n\n def tokenize(self, path):\n \"\"\"构建词表,tokenize语料(转wordid)\n @param path: 语料文件路径\n @return: 转为wordid的语料, 形状为(token数量,)\n @notes: 1.添加了句子结束符'<eos>'\n 2.语料中所有token均被添加到字典\n 3.最后的ids怎么不分行,而是把整个语料文件存进一个长数组?\n \"\"\"\n \"\"\"Tokenizes a text file.\"\"\"\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r') as f:\n tokens = 0 #tokens记录整个文件的token数量\n for line in f:\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r') as f:\n ids = np.zeros((tokens,), dtype='int32') #ids是整个语料文件所有token的wordid\n token = 0\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n\n return mx.nd.array(ids, dtype='int32')\n\ndef batchify(data, batch_size):\n \"\"\"\n @param data: (Corpus.[train/valid/test]) tokenize后的数据\n @param batch_size: batch size\n @return: 按batch分好的数据,形状为(batch数量,batch size)\n @notes: source corpus: [我,爱,你,们,大,家,好,啊,晚上,吃,的,什么,你,是,哪,位,今天,天气,怎么,样,不,告,诉,你]\n reshape(3,8): [[我, 爱, 你, 们, 大, 家, 好, 啊],\n [晚上, 吃, 的, 什么, 你, 是, 哪, 位],\n [今天, 天气, 怎么, 样, 不, 告, 诉, 你]]\n 即reshape((batch_size=3, nbatch=8),得到形状(batch_size, batch_num*sentence_len)\n 最清晰的数据形状应该是(batch_num, batch_size, sentence_len),因为这里仅仅保留了2个维度,所以nbatch=batch_num*sentence_len,所以上面的形状不直观\n\n T: [[我, 晚上, 今天],\n [爱, 吃, 天气],\n [你, 的, 怎么],\n [们, 什么, 样]\n [大, 你, 不]\n [家, 是, 告]\n [好, 哪, 诉]\n [啊, 位, 你]]\n 得到形状(batch_num*sentence_len, batch_size)\n\n iter_next()函数取一个batch的操作是:假设bptt=4,也就是上面每个句子的长度\n 第一次取得到: [[我, 晚上, 今天],\n [爱, 吃, 天气],\n [你, 的, 怎么],\n [们, 什么, 样]]\n 第二次取得到: [[大, 你, 不]\n [家, 是, 告]\n [好, 哪, 诉]\n [啊, 位, 你]]\n 即,在0维度上,一次取一个sentence_len,也就是去了batch_num次\n \"\"\"\n \"\"\"Reshape data into (num_example, batch_size)\"\"\"\n nbatch = data.shape[0] // batch_size #获取batch的数量,1.从这里的逻辑来看,batch_size单位是token而不是句子? 2.使用整数除法,尾巴舍弃不要了啊?\n data = data[:nbatch * batch_size] #两个目的吧,一是转list,二是去除尾巴,即每个batch都是满的\n data = data.reshape((batch_size, nbatch)).T #转形状,为(bptt*batch_num,batch_size)\n return data\n\nclass CorpusIter(mx.io.DataIter):\n \"\"\"数据迭代器\n \"\"\"\n \"An iterator that returns the a batch of sequence each time\"\n def __init__(self, source, batch_size, bptt):\n \"\"\"初始化数据迭代器\n @param source: (Corpus.[train/valid/test]) tokenize后的数据\n @param batch_size: batch size\n @param bptt: 句子长度\n \"\"\"\n super(CorpusIter, self).__init__()\n self.batch_size = batch_size\n self.provide_data = [('data', (bptt, batch_size), np.int32)] #一个list,只有一个tuple元素,tuple有3个元素。 输入数据的形状(bptt, batch_size)\n self.provide_label = [('label', (bptt, batch_size))] #一个list,只要一个tuple元素,tuple有2个元素。 输入label的形状(bptt, batch_size)\n self._index = 0\n self._bptt = bptt\n self._source = batchify(source, batch_size) #数据按batch分好,得到形状为(batch数量,batch size)的数据\n\n def iter_next(self):\n \"\"\"mxnet: move to the next batch\n \"\"\"\n i = self._index #记录当前取到的位置\n if i+self._bptt > self._source.shape[0] - 1:\n return False\n self._next_data = self._source[i:i+self._bptt] #得到形状(bptt, batch_size)\n self._next_label = self._source[i+1:i+1+self._bptt].astype(np.float32) #得到形状(bptt, batch_size)\n self._index += self._bptt\n return True\n\n def next(self):\n \"\"\"mxnet: get next data batch from iterator\n \"\"\"\n if self.iter_next(): #还有数据可取,则返回数据\n return mx.io.DataBatch(data=self.getdata(), label=self.getlabel()) #\n else: #数据已经取完,则抛出终止迭代错误\n raise StopIteration\n\n def reset(self):\n self._index = 0\n self._next_data = None\n self._next_label = None\n\n def getdata(self):\n \"\"\"mxnet: get data of current batch\n \"\"\"\n return [self._next_data] #形状(1, bptt, batch_size)\n\n def getlabel(self):\n \"\"\"mxnet: get label of current batch\n \"\"\"\n return [self._next_label] #形状(1, bptt, batch_size)\n" ]
[ [ "numpy.zeros" ] ]
JosephRynkiewicz/CIFAR100
[ "2eeef95480fdc8454296cbe2f90011aef660c6a8" ]
[ "utils.py" ]
[ "'''Some helper functions for PyTorch, including:\n - get_mean_and_std: calculate the mean and std value of dataset.\n - msr_init: net parameter initialization.\n - progress_bar: progress bar mimic xlua.progress.\n'''\nimport os\nimport sys\nimport time\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\n\n\ndef get_mean_and_std(dataset):\n '''Compute the mean and std value of dataset.'''\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=2)\n mean = torch.zeros(3)\n std = torch.zeros(3)\n print('==> Computing mean and std..')\n for inputs, targets in dataloader:\n for i in range(3):\n mean[i] += inputs[:,i,:,:].mean()\n std[i] += inputs[:,i,:,:].std()\n mean.div_(len(dataset))\n std.div_(len(dataset))\n return mean, std\n\n#def init_params(net):\n# '''Init layer parameters.'''\n# for m in net.modules():\n# if isinstance(m, nn.Conv2d):\n# init.kaiming_normal(m.weight, mode='fan_out')\n# if m.bias:\n# init.constant(m.bias, 0)\n# elif isinstance(m, nn.BatchNorm2d):\n# init.constant(m.weight, 1)\n# init.constant(m.bias, 0)\n# elif isinstance(m, nn.Linear):\n# init.normal(m.weight, std=1e-3)\n# if m.bias:\n# init.constant(m.bias, 0)\n\n\n_, term_width = os.popen('stty size', 'r').read().split()\nterm_width = int(term_width)\n\nTOTAL_BAR_LENGTH = 65.\nlast_time = time.time()\nbegin_time = last_time\ndef progress_bar(current, total, msg=None):\n global last_time, begin_time\n if current == 0:\n begin_time = time.time() # Reset for new bar.\n\n cur_len = int(TOTAL_BAR_LENGTH*current/total)\n rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1\n\n sys.stdout.write(' [')\n for i in range(cur_len):\n sys.stdout.write('=')\n sys.stdout.write('>')\n for i in range(rest_len):\n sys.stdout.write('.')\n sys.stdout.write(']')\n\n cur_time = time.time()\n step_time = cur_time - last_time\n last_time = cur_time\n tot_time = cur_time - begin_time\n\n L = []\n L.append(' Step: %s' % format_time(step_time))\n L.append(' | Tot: %s' % format_time(tot_time))\n if msg:\n L.append(' | ' + msg)\n\n msg = ''.join(L)\n sys.stdout.write(msg)\n for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):\n sys.stdout.write(' ')\n\n # Go back to the center of the bar.\n for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):\n sys.stdout.write('\\b')\n sys.stdout.write(' %d/%d ' % (current+1, total))\n\n if current < total-1:\n sys.stdout.write('\\r')\n else:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\ndef format_time(seconds):\n days = int(seconds / 3600/24)\n seconds = seconds - days*3600*24\n hours = int(seconds / 3600)\n seconds = seconds - hours*3600\n minutes = int(seconds / 60)\n seconds = seconds - minutes*60\n secondsf = int(seconds)\n seconds = seconds - secondsf\n millis = int(seconds*1000)\n\n f = ''\n i = 1\n if days > 0:\n f += str(days) + 'D'\n i += 1\n if hours > 0 and i <= 2:\n f += str(hours) + 'h'\n i += 1\n if minutes > 0 and i <= 2:\n f += str(minutes) + 'm'\n i += 1\n if secondsf > 0 and i <= 2:\n f += str(secondsf) + 's'\n i += 1\n if millis > 0 and i <= 2:\n f += str(millis) + 'ms'\n i += 1\n if f == '':\n f = '0ms'\n return f\n\n\ndef get_lr(step, base_lr=0.003):\n \"\"\"Returns learning-rate for `step` or None at the end.\"\"\"\n supports = [500, 3000, 6000, 9000, 10_000]\n # Linear warmup\n if step < supports[0]:\n return base_lr * step / supports[0]\n # End of training\n elif step >= supports[-1]:\n return None\n # Staircase decays by factor of 10\n else:\n for s in supports[1:]:\n if s < step:\n base_lr /= 10\n return base_lr\n\n\ndef recycle(iterable):\n \"\"\"Variant of itertools.cycle that does not save iterates.\"\"\"\n while True:\n for i in iterable:\n yield i\n" ]
[ [ "torch.utils.data.DataLoader", "torch.zeros" ] ]
dmitry-vorobiev/kaggle-deepfake-detection-challenge
[ "d8b545e1944342ba25209f1f62d9ca70314ab73a" ]
[ "src/model/loss.py" ]
[ "import torch\nimport torch.nn.functional as F\nfrom torch import FloatTensor, LongTensor, Tensor\nfrom typing import Dict, Tuple\n\nfrom . import ModelOut\nfrom .ops import act, ones, zeros, reshape_as\nfrom torch import nn\n\nBatch = Tuple[FloatTensor, LongTensor]\n\n\ndef activation_loss(x: Tensor, y: LongTensor) -> Tensor:\n device = x.device\n pos = y.nonzero().reshape(-1)\n neg = (y - 1).nonzero().reshape(-1)\n x0, x1 = x[neg], x[pos]\n n0, n1 = x0.size(0), x1.size(0)\n \n a0_x0 = act(x0, zeros(n0, device))\n a1_x0 = act(x0, ones(n0, device))\n \n a1_x1 = act(x1, ones(n1, device))\n a0_x1 = act(x1, zeros(n1, device))\n \n neg_loss = (a0_x0 - 1).abs() + a1_x0\n pos_loss = (a1_x1 - 1).abs() + a0_x1\n\n return (neg_loss.sum() + pos_loss.sum()) / y.size(0)\n\n\ndef activation_loss_fixed_shape(x: Tensor, y: LongTensor) -> Tensor:\n N = y.size(0)\n device = x.device\n\n a0 = act(x, zeros(N, device))\n a1 = act(x, ones(N, device))\n\n y1 = reshape_as(y, a0)\n y0 = 1 - y1\n \n neg_loss = ((a0 - 1) * y0).abs() + a1 * y0\n pos_loss = ((a1 - 1) * y1).abs() + a0 * y1\n\n return (neg_loss.sum() + pos_loss.sum()) / y.size(0)\n\n\nclass ForensicTransferLoss(object):\n def __init__(self, act_w: int, rec_w: int):\n self.act_w = act_w\n self.rec_w = rec_w\n\n def __call__(self, model_outs: Tuple[FloatTensor, FloatTensor],\n inputs: Batch) -> Dict[str, Tensor]:\n h, x_hat = model_outs\n x, y = inputs\n act_loss = activation_loss(h, y)\n rec_loss = F.l1_loss(x_hat, x, reduction='mean')\n total_loss = act_loss * self.act_w + rec_loss * self.rec_w\n out = dict(\n loss=total_loss,\n act_loss=act_loss,\n rec_loss=rec_loss)\n return out\n\n @staticmethod\n def keys():\n return ['loss', 'act_loss', 'rec_loss']\n\n\nclass TripleLoss(ForensicTransferLoss):\n def __init__(self, act_w: int, rec_w: int, bce_w: int):\n super(TripleLoss, self).__init__(act_w, rec_w)\n self.bce_w = bce_w\n\n def __call__(self, model_outs: ModelOut, inputs: Batch) -> Dict[str, Tensor]:\n h, x_hat, y_hat = model_outs\n x, y = inputs\n out = super().__call__((h, x_hat), inputs)\n bce_loss = F.binary_cross_entropy_with_logits(\n y_hat.squeeze(1), y.float())\n out['loss'] += bce_loss * self.bce_w\n out['bce_loss'] = bce_loss\n return out\n\n @staticmethod\n def keys():\n return ['loss', 'act_loss', 'rec_loss', 'bce_loss']\n\n\nclass BCELoss(object):\n def __call__(self, model_out: Tuple[Tensor, any], batch: Batch) -> Dict[str, Tensor]:\n y_hat = model_out[0]\n x, y = batch\n bce_loss = F.binary_cross_entropy_with_logits(\n y_hat.squeeze(1), y.float())\n out = dict(loss=bce_loss)\n return out\n\n @staticmethod\n def keys():\n return ['loss']\n\n\n# https://github.com/fastai/course-v3/blob/master/nbs/dl2/exp/nb_10b.py\ndef reduce_loss(loss, reduction='mean'):\n return loss.mean() if reduction == 'mean' else loss.sum() if reduction == 'sum' else loss\n\n\n# https://github.com/fastai/course-v3/blob/8faeb66c03fc6719c5a6cf4ef5befa79a424f838/nbs/dl2/exp/nb_09.py#L127\ndef lin_comb(v1, v2, beta): return beta*v1 + (1-beta)*v2\n\n\n# https://github.com/fastai/course-v3/blob/master/nbs/dl2/exp/nb_10b.py\nclass LabelSmoothingCrossEntropy(nn.Module):\n def __init__(self, ε: float = 0.1, reduction='mean'):\n super().__init__()\n self.ε = ε\n self.reduction = reduction\n\n def forward(self, output, target):\n c = output.size()[-1]\n log_preds = F.log_softmax(output, dim=-1)\n loss = reduce_loss(-log_preds.sum(dim=-1), self.reduction)\n nll = F.nll_loss(log_preds, target, reduction=self.reduction)\n return lin_comb(loss/c, nll, self.ε)\n\n\nclass SmoothBCELoss(object):\n def __init__(self, eps=0.1):\n self.func = LabelSmoothingCrossEntropy(eps)\n\n def __call__(self, model_out: Tuple[Tensor, any], batch: Batch) -> Dict[str, Tensor]:\n y_hat = model_out[0]\n x, y = batch\n y_hat = torch.cat([(1 - y_hat), y_hat], dim=1)\n bce_loss = self.func(y_hat, y)\n out = dict(loss=bce_loss)\n return out\n\n @staticmethod\n def keys():\n return ['loss']\n" ]
[ [ "torch.nn.functional.log_softmax", "torch.nn.functional.nll_loss", "torch.nn.functional.l1_loss", "torch.cat" ] ]
srisharaan/tensorflow
[ "c787e6cdbbf57434599a42bbdc5e4d4df98ed045" ]
[ "tensorflow/python/compat/compat.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities for API compatibility between TensorFlow release versions.\n\nSee [Version\nCompatibility](https://tensorflow.org/guide/version_compat#backward_forward)\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport datetime\nimport os\n\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util.tf_export import tf_export\n\n# This value changes every day with an automatic CL. It can be modified in code\n# via `forward_compatibility_horizon()` or with the environment variable\n# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.\n_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 1, 28)\n_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = \"TF_FORWARD_COMPATIBILITY_DELTA_DAYS\"\n_FORWARD_COMPATIBILITY_DATE_NUMBER = None\n\n\ndef _date_to_date_number(year, month, day):\n return (year << 9) | (month << 5) | day\n\n\ndef _update_forward_compatibility_date_number(date_to_override=None):\n \"\"\"Update the base date to compare in forward_compatible function.\"\"\"\n\n global _FORWARD_COMPATIBILITY_DATE_NUMBER\n\n if date_to_override:\n date = date_to_override\n else:\n date = _FORWARD_COMPATIBILITY_HORIZON\n delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)\n if delta_days:\n date += datetime.timedelta(days=int(delta_days))\n\n _FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(\n date.year, date.month, date.day)\n\n\n_update_forward_compatibility_date_number()\n\n\n@tf_export(\"compat.forward_compatible\")\ndef forward_compatible(year, month, day):\n \"\"\"Return true if the forward compatibility window has expired.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n Forward-compatibility refers to scenarios where the producer of a TensorFlow\n model (a GraphDef or SavedModel) is compiled against a version of the\n TensorFlow library newer than what the consumer was compiled against. The\n \"producer\" is typically a Python program that constructs and trains a model\n while the \"consumer\" is typically another program that loads and serves the\n model.\n\n TensorFlow has been supporting a 3 week forward-compatibility window for\n programs compiled from source at HEAD.\n\n For example, consider the case where a new operation `MyNewAwesomeAdd` is\n created with the intent of replacing the implementation of an existing Python\n wrapper - `tf.add`. The Python wrapper implementation should change from\n something like:\n\n ```python\n def add(inputs, name=None):\n return gen_math_ops.add(inputs, name)\n ```\n\n to:\n\n ```python\n from tensorflow.python.compat import compat\n\n def add(inputs, name=None):\n if compat.forward_compatible(year, month, day):\n # Can use the awesome new implementation.\n return gen_math_ops.my_new_awesome_add(inputs, name)\n # To maintain forward compatibiltiy, use the old implementation.\n return gen_math_ops.add(inputs, name)\n ```\n\n Where `year`, `month`, and `day` specify the date beyond which binaries\n that consume a model are expected to have been updated to include the\n new operations. This date is typically at least 3 weeks beyond the date\n the code that adds the new operation is committed.\n\n Args:\n year: A year (e.g., 2018). Must be an `int`.\n month: A month (1 <= month <= 12) in year. Must be an `int`.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an\n `int`.\n\n Returns:\n True if the caller can expect that serialized TensorFlow graphs produced\n can be consumed by programs that are compiled with the TensorFlow library\n source code after (year, month, day).\n \"\"\"\n return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(\n year, month, day)\n\n\n@tf_export(\"compat.forward_compatibility_horizon\")\n@tf_contextlib.contextmanager\ndef forward_compatibility_horizon(year, month, day):\n \"\"\"Context manager for testing forward compatibility of generated graphs.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n To ensure forward compatibility of generated graphs (see `forward_compatible`)\n with older binaries, new features can be gated with:\n\n ```python\n if compat.forward_compatible(year=2018, month=08, date=01):\n generate_graph_with_new_features()\n else:\n generate_graph_so_older_binaries_can_consume_it()\n ```\n\n However, when adding new features, one may want to unittest it before\n the forward compatibility window expires. This context manager enables\n such tests. For example:\n\n ```python\n from tensorflow.python.compat import compat\n\n def testMyNewFeature(self):\n with compat.forward_compatibility_horizon(2018, 08, 02):\n # Test that generate_graph_with_new_features() has an effect\n ```\n\n Args:\n year: A year (e.g., 2018). Must be an `int`.\n month: A month (1 <= month <= 12) in year. Must be an `int`.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an\n `int`.\n\n Yields:\n Nothing.\n \"\"\"\n try:\n _update_forward_compatibility_date_number(datetime.date(year, month, day))\n yield\n finally:\n _update_forward_compatibility_date_number()\n" ]
[ [ "tensorflow.python.util.tf_export.tf_export" ] ]
tkipf/gym-gridworld
[ "807c88373023dc4240e8688e2744ea3dccd560bc" ]
[ "utils.py" ]
[ "\"\"\"Utility functions.\"\"\"\n\nimport h5py\nimport numpy as np\n\nfrom torch.utils import data\n\n\ndef save_dict_h5py(data, fname):\n \"\"\"Save dictionary containing numpy arrays to h5py file.\"\"\"\n with h5py.File(fname, 'w') as hf:\n for key in data.keys():\n hf.create_dataset(key, data=data[key])\n\n\ndef load_dict_h5py(fname):\n \"\"\"Restore dictionary containing numpy arrays from h5py file.\"\"\"\n data = dict()\n with h5py.File(fname, 'r') as hf:\n for key in hf.keys():\n data[key] = hf[key][:]\n return data\n\n\ndef to_float(np_array):\n \"\"\"Convert numpy array to float32.\"\"\"\n return np.array(np_array, dtype=np.float32)\n\n\nclass TrajectoryDataset(data.Dataset):\n \"\"\"Create dataset of (o_t, a_t) trajectories from replay buffer.\"\"\"\n\n def __init__(self, hdf5_file):\n \"\"\"\n Args:\n hdf5_file (string): Path to the hdf5 file that contains experience\n buffer\n \"\"\"\n self.experience_buffer = load_dict_h5py(hdf5_file)\n\n def __len__(self):\n return len(self.experience_buffer['actions'])\n\n def __getitem__(self, idx):\n sample = {\n 'obs': to_float(self.experience_buffer['observations'][idx]),\n 'action': self.experience_buffer['actions'][idx],\n }\n return sample\n" ]
[ [ "numpy.array" ] ]
sanidhya-singh/ktrain
[ "f91f703e3ecd189c035a532590e6c6ec26a733a3" ]
[ "ktrain/tests/test_chinese_text.py" ]
[ "#!/usr/bin/env python3\n\"\"\"\nTests of ktrain text classification flows\n\"\"\"\nimport sys\nimport os\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\";\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\nsys.path.insert(0,'../..')\nimport IPython\nfrom unittest import TestCase, main, skip\nimport numpy as np\nimport ktrain\nfrom ktrain import text as txt\nTEST_DOC = '还好,床很大而且很干净,前台很友好,很满意,下次还来。'\n\nclass TestTextClassification(TestCase):\n\n\n def test_fasttext_chinese(self):\n trn, val, preproc = txt.texts_from_csv('./text_data/chinese_hotel_reviews.csv',\n 'content',\n label_columns = [\"pos\", \"neg\"],\n max_features=30000, maxlen=75,\n preprocess_mode='standard', sep='|')\n model = txt.text_classifier('fasttext', train_data=trn)\n learner = ktrain.get_learner(model, train_data=trn, val_data=val, batch_size=32)\n lr = 5e-3\n hist = learner.autofit(lr, 10)\n\n # test training results\n self.assertAlmostEqual(max(hist.history['lr']), lr)\n self.assertGreater(max(hist.history['val_acc']), 0.85)\n\n\n # test top losses\n obs = learner.top_losses(n=1, val_data=None)\n self.assertIn(obs[0][0], list(range(len(val[0]))))\n learner.view_top_losses(preproc=preproc, n=1, val_data=None)\n\n # test weight decay\n self.assertEqual(len(learner.get_weight_decay()), 2)\n self.assertEqual(learner.get_weight_decay()[0], None)\n learner.set_weight_decay(1e-4)\n self.assertAlmostEqual(learner.get_weight_decay()[0], 1e-4)\n\n # test load and save model\n learner.save_model('/tmp/test_model')\n learner.load_model('/tmp/test_model')\n\n # test validate\n cm = learner.validate()\n print(cm)\n for i, row in enumerate(cm):\n self.assertEqual(np.argmax(row), i)\n\n # test predictor\n p = ktrain.get_predictor(learner.model, preproc)\n self.assertEqual(p.predict([TEST_DOC])[0], 'pos')\n p.save('/tmp/test_predictor')\n p = ktrain.load_predictor('/tmp/test_predictor')\n self.assertEqual(p.predict(TEST_DOC), 'pos')\n self.assertEqual(np.argmax(p.predict_proba([TEST_DOC])[0]), 0)\n self.assertEqual(type(p.explain(TEST_DOC)), IPython.core.display.HTML)\n #self.assertEqual(type(p.explain(TEST_DOC)), type(None))\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.argmax" ] ]
SueSu-Wish/incubator-airflow
[ "5813c0c8e1e9832d403e5a8f5783d0cb77f2748c" ]
[ "airflow/www/views.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom past.builtins import basestring, unicode\n\nimport ast\nimport datetime as dt\nimport logging\nimport os\nimport pkg_resources\nimport socket\nfrom functools import wraps\nfrom datetime import timedelta\nimport copy\nimport math\nimport json\n#import bleach\nimport pendulum\nimport codecs\nfrom collections import defaultdict\n\nimport inspect\nfrom textwrap import dedent\nimport traceback\n\nimport sqlalchemy as sqla\nfrom sqlalchemy import or_, desc, and_, union_all\n\nfrom flask import (\n abort, redirect, url_for, request, Markup, Response, current_app, render_template,\n make_response)\nfrom flask_admin import BaseView, expose, AdminIndexView\nfrom flask_admin.contrib.sqla import ModelView\nfrom flask_admin.actions import action\nfrom flask_admin.babel import lazy_gettext\nfrom flask_admin.tools import iterdecode\nfrom flask import flash\nfrom flask._compat import PY2\n\nfrom jinja2.sandbox import ImmutableSandboxedEnvironment\nfrom jinja2 import escape\n\nimport markdown\nimport nvd3\n\nfrom wtforms import (\n Form, SelectField, TextAreaField, PasswordField,\n StringField, validators)\nfrom flask_admin.form.fields import DateTimeField\n\nfrom pygments import highlight, lexers\nfrom pygments.formatters import HtmlFormatter\n\nimport airflow\nfrom airflow import configuration as conf\nfrom airflow import models\nfrom airflow import settings\nfrom airflow.api.common.experimental.mark_tasks import set_dag_run_state\nfrom airflow.exceptions import AirflowException\nfrom airflow.settings import Session\nfrom airflow.models import XCom, DagRun\nfrom airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, SCHEDULER_DEPS\n\nfrom airflow.models import BaseOperator\nfrom airflow.operators.subdag_operator import SubDagOperator\n\nfrom airflow.utils import timezone\nfrom airflow.utils.json import json_ser\nfrom airflow.utils.state import State\nfrom airflow.utils.db import create_session, provide_session\nfrom airflow.utils.helpers import alchemy_to_dict\nfrom airflow.utils.dates import infer_time_unit, scale_time_units, parse_execution_date\nfrom airflow.utils.timezone import datetime\nfrom airflow.utils.net import get_hostname\nfrom airflow.www import utils as wwwutils\nfrom airflow.www.forms import DateTimeForm, DateTimeWithNumRunsForm\nfrom airflow.www.validators import GreaterEqualThan\n\nQUERY_LIMIT = 100000\nCHART_LIMIT = 200000\n\nUTF8_READER = codecs.getreader('utf-8')\n\ndagbag = models.DagBag(settings.DAGS_FOLDER)\n\nlogin_required = airflow.login.login_required\ncurrent_user = airflow.login.current_user\nlogout_user = airflow.login.logout_user\n\nFILTER_BY_OWNER = False\n\nPAGE_SIZE = conf.getint('webserver', 'page_size')\n\nif conf.getboolean('webserver', 'FILTER_BY_OWNER'):\n # filter_by_owner if authentication is enabled and filter_by_owner is true\n FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']\n\n\ndef dag_link(v, c, m, p):\n if m.dag_id is None:\n return Markup()\n\n #dag_id = bleach.clean(m.dag_id)\n dag_id = m.dag_id\n url = url_for(\n 'airflow.graph',\n dag_id=dag_id,\n execution_date=m.execution_date)\n return Markup(\n '<a href=\"{}\">{}</a>'.format(url, dag_id))\n\n\ndef log_url_formatter(v, c, m, p):\n return Markup(\n '<a href=\"{m.log_url}\">'\n ' <span class=\"glyphicon glyphicon-book\" aria-hidden=\"true\">'\n '</span></a>').format(**locals())\n\n\ndef dag_run_link(v, c, m, p):\n #dag_id = bleach.clean(m.dag_id)\n dag_id = m.dag_id\n url = url_for(\n 'airflow.graph',\n dag_id=m.dag_id,\n run_id=m.run_id,\n execution_date=m.execution_date)\n return Markup('<a href=\"{url}\">{m.run_id}</a>'.format(**locals()))\n\n\ndef task_instance_link(v, c, m, p):\n #dag_id = bleach.clean(m.dag_id)\n dag_id = m.dag_id\n #task_id = bleach.clean(m.task_id)\n task_id = m.task_id\n url = url_for(\n 'airflow.task',\n dag_id=dag_id,\n task_id=task_id,\n execution_date=m.execution_date.isoformat())\n url_root = url_for(\n 'airflow.graph',\n dag_id=dag_id,\n root=task_id,\n execution_date=m.execution_date.isoformat())\n return Markup(\n \"\"\"\n <span style=\"white-space: nowrap;\">\n <a href=\"{url}\">{task_id}</a>\n <a href=\"{url_root}\" title=\"Filter on this task and upstream\">\n <span class=\"glyphicon glyphicon-filter\" style=\"margin-left: 0px;\"\n aria-hidden=\"true\"></span>\n </a>\n </span>\n \"\"\".format(**locals()))\n\n\ndef state_token(state):\n color = State.color(state)\n return Markup(\n '<span class=\"label\" style=\"background-color:{color};\">'\n '{state}</span>'.format(**locals()))\n\n\ndef parse_datetime_f(value):\n if not isinstance(value, dt.datetime):\n return value\n\n return timezone.make_aware(value)\n\n\ndef state_f(v, c, m, p):\n return state_token(m.state)\n\n\ndef duration_f(v, c, m, p):\n if m.end_date and m.duration:\n return timedelta(seconds=m.duration)\n\n\ndef datetime_f(v, c, m, p):\n attr = getattr(m, p)\n dttm = attr.isoformat() if attr else ''\n if timezone.utcnow().isoformat()[:4] == dttm[:4]:\n dttm = dttm[5:]\n return Markup(\"<nobr>{}</nobr>\".format(dttm))\n\n\ndef nobr_f(v, c, m, p):\n return Markup(\"<nobr>{}</nobr>\".format(getattr(m, p)))\n\n\ndef label_link(v, c, m, p):\n try:\n default_params = ast.literal_eval(m.default_params)\n except:\n default_params = {}\n url = url_for(\n 'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,\n **default_params)\n return Markup(\"<a href='{url}'>{m.label}</a>\".format(**locals()))\n\n\ndef pool_link(v, c, m, p):\n url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool\n return Markup(\"<a href='{url}'>{m.pool}</a>\".format(**locals()))\n\n\ndef pygment_html_render(s, lexer=lexers.TextLexer):\n return highlight(\n s,\n lexer(),\n HtmlFormatter(linenos=True),\n )\n\n\ndef render(obj, lexer):\n out = \"\"\n if isinstance(obj, basestring):\n out += pygment_html_render(obj, lexer)\n elif isinstance(obj, (tuple, list)):\n for i, s in enumerate(obj):\n out += \"<div>List item #{}</div>\".format(i)\n out += \"<div>\" + pygment_html_render(s, lexer) + \"</div>\"\n elif isinstance(obj, dict):\n for k, v in obj.items():\n out += '<div>Dict item \"{}\"</div>'.format(k)\n out += \"<div>\" + pygment_html_render(v, lexer) + \"</div>\"\n return out\n\n\ndef wrapped_markdown(s):\n return '<div class=\"rich_doc\">' + markdown.markdown(s) + \"</div>\"\n\n\nattr_renderer = {\n 'bash_command': lambda x: render(x, lexers.BashLexer),\n 'hql': lambda x: render(x, lexers.SqlLexer),\n 'sql': lambda x: render(x, lexers.SqlLexer),\n 'doc': lambda x: render(x, lexers.TextLexer),\n 'doc_json': lambda x: render(x, lexers.JsonLexer),\n 'doc_rst': lambda x: render(x, lexers.RstLexer),\n 'doc_yaml': lambda x: render(x, lexers.YamlLexer),\n 'doc_md': wrapped_markdown,\n 'python_callable': lambda x: render(\n inspect.getsource(x), lexers.PythonLexer),\n}\n\n\ndef data_profiling_required(f):\n \"\"\"Decorator for views requiring data profiling access\"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if (\n current_app.config['LOGIN_DISABLED'] or\n (not current_user.is_anonymous() and current_user.data_profiling())\n ):\n return f(*args, **kwargs)\n else:\n flash(\"This page requires data profiling privileges\", \"error\")\n return redirect(url_for('admin.index'))\n\n return decorated_function\n\n\ndef fused_slots(v, c, m, p):\n url = (\n '/admin/taskinstance/' +\n '?flt1_pool_equals=' + m.pool +\n '&flt2_state_equals=running')\n return Markup(\"<a href='{0}'>{1}</a>\".format(url, m.used_slots()))\n\n\ndef fqueued_slots(v, c, m, p):\n url = (\n '/admin/taskinstance/' +\n '?flt1_pool_equals=' + m.pool +\n '&flt2_state_equals=queued&sort=10&desc=1')\n return Markup(\"<a href='{0}'>{1}</a>\".format(url, m.queued_slots()))\n\n\ndef recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag):\n if isinstance(tasks, list):\n for task in tasks:\n recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)\n return\n if isinstance(tasks, SubDagOperator):\n subtasks = tasks.subdag.tasks\n dag_ids.append(tasks.subdag.dag_id)\n for subtask in subtasks:\n if subtask.task_id not in task_ids:\n task_ids.append(subtask.task_id)\n task_id_to_dag[subtask.task_id] = tasks.subdag\n recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag)\n if isinstance(tasks, BaseOperator):\n task_id_to_dag[tasks.task_id] = tasks.dag\n\n\ndef get_chart_height(dag):\n \"\"\"\n TODO(aoen): See [AIRFLOW-1263] We use the number of tasks in the DAG as a heuristic to\n approximate the size of generated chart (otherwise the charts are tiny and unreadable\n when DAGs have a large number of tasks). Ideally nvd3 should allow for dynamic-height\n charts, that is charts that take up space based on the size of the components within.\n \"\"\"\n return 600 + len(dag.tasks) * 10\n\n\nclass Airflow(BaseView):\n def is_visible(self):\n return False\n\n @expose('/')\n @login_required\n def index(self):\n return self.render('airflow/dags.html')\n\n @expose('/chart_data')\n @data_profiling_required\n @wwwutils.gzipped\n # @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)\n def chart_data(self):\n from airflow import macros\n import pandas as pd\n if conf.getboolean('core', 'secure_mode'):\n abort(404)\n\n with create_session() as session:\n chart_id = request.args.get('chart_id')\n csv = request.args.get('csv') == \"true\"\n chart = session.query(models.Chart).filter_by(id=chart_id).first()\n db = session.query(\n models.Connection).filter_by(conn_id=chart.conn_id).first()\n\n payload = {\n \"state\": \"ERROR\",\n \"error\": \"\"\n }\n\n # Processing templated fields\n try:\n args = ast.literal_eval(chart.default_params)\n if type(args) is not type(dict()):\n raise AirflowException('Not a dict')\n except:\n args = {}\n payload['error'] += (\n \"Default params is not valid, string has to evaluate as \"\n \"a Python dictionary. \")\n\n request_dict = {k: request.args.get(k) for k in request.args}\n args.update(request_dict)\n args['macros'] = macros\n sandbox = ImmutableSandboxedEnvironment()\n sql = sandbox.from_string(chart.sql).render(**args)\n label = sandbox.from_string(chart.label).render(**args)\n payload['sql_html'] = Markup(highlight(\n sql,\n lexers.SqlLexer(), # Lexer call\n HtmlFormatter(noclasses=True))\n )\n payload['label'] = label\n\n pd.set_option('display.max_colwidth', 100)\n hook = db.get_hook()\n try:\n df = hook.get_pandas_df(\n wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))\n df = df.fillna(0)\n except Exception as e:\n payload['error'] += \"SQL execution failed. Details: \" + str(e)\n\n if csv:\n return Response(\n response=df.to_csv(index=False),\n status=200,\n mimetype=\"application/text\")\n\n if not payload['error'] and len(df) == CHART_LIMIT:\n payload['warning'] = (\n \"Data has been truncated to {0}\"\n \" rows. Expect incomplete results.\").format(CHART_LIMIT)\n\n if not payload['error'] and len(df) == 0:\n payload['error'] += \"Empty result set. \"\n elif (\n not payload['error'] and\n chart.sql_layout == 'series' and\n chart.chart_type != \"datatable\" and\n len(df.columns) < 3):\n payload['error'] += \"SQL needs to return at least 3 columns. \"\n elif (\n not payload['error'] and\n chart.sql_layout == 'columns' and\n len(df.columns) < 2):\n payload['error'] += \"SQL needs to return at least 2 columns. \"\n elif not payload['error']:\n import numpy as np\n chart_type = chart.chart_type\n\n data = None\n if chart.show_datatable or chart_type == \"datatable\":\n data = df.to_dict(orient=\"split\")\n data['columns'] = [{'title': c} for c in data['columns']]\n payload['data'] = data\n\n # Trying to convert time to something Highcharts likes\n x_col = 1 if chart.sql_layout == 'series' else 0\n if chart.x_is_date:\n try:\n # From string to datetime\n df[df.columns[x_col]] = pd.to_datetime(\n df[df.columns[x_col]])\n df[df.columns[x_col]] = df[df.columns[x_col]].apply(\n lambda x: int(x.strftime(\"%s\")) * 1000)\n except Exception as e:\n payload['error'] = \"Time conversion failed\"\n\n if chart_type == 'datatable':\n payload['state'] = 'SUCCESS'\n return wwwutils.json_response(payload)\n else:\n if chart.sql_layout == 'series':\n # User provides columns (series, x, y)\n xaxis_label = df.columns[1]\n yaxis_label = df.columns[2]\n df[df.columns[2]] = df[df.columns[2]].astype(np.float)\n df = df.pivot_table(\n index=df.columns[1],\n columns=df.columns[0],\n values=df.columns[2], aggfunc=np.sum)\n else:\n # User provides columns (x, y, metric1, metric2, ...)\n xaxis_label = df.columns[0]\n yaxis_label = 'y'\n df.index = df[df.columns[0]]\n df = df.sort(df.columns[0])\n del df[df.columns[0]]\n for col in df.columns:\n df[col] = df[col].astype(np.float)\n\n df = df.fillna(0)\n NVd3ChartClass = chart_mapping.get(chart.chart_type)\n NVd3ChartClass = getattr(nvd3, NVd3ChartClass)\n nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date)\n\n for col in df.columns:\n nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist())\n try:\n nvd3_chart.buildcontent()\n payload['chart_type'] = nvd3_chart.__class__.__name__\n payload['htmlcontent'] = nvd3_chart.htmlcontent\n except Exception as e:\n payload['error'] = str(e)\n\n payload['state'] = 'SUCCESS'\n payload['request_dict'] = request_dict\n return wwwutils.json_response(payload)\n\n @expose('/chart')\n @data_profiling_required\n def chart(self):\n if conf.getboolean('core', 'secure_mode'):\n abort(404)\n\n with create_session() as session:\n chart_id = request.args.get('chart_id')\n embed = request.args.get('embed')\n chart = session.query(models.Chart).filter_by(id=chart_id).first()\n\n NVd3ChartClass = chart_mapping.get(chart.chart_type)\n if not NVd3ChartClass:\n flash(\n \"Not supported anymore as the license was incompatible, \"\n \"sorry\",\n \"danger\")\n redirect('/admin/chart/')\n\n sql = \"\"\n if chart.show_sql:\n sql = Markup(highlight(\n chart.sql,\n lexers.SqlLexer(), # Lexer call\n HtmlFormatter(noclasses=True))\n )\n return self.render(\n 'airflow/nvd3.html',\n chart=chart,\n title=\"Airflow - Chart\",\n sql=sql,\n label=chart.label,\n embed=embed)\n\n @expose('/dag_stats')\n @login_required\n @provide_session\n def dag_stats(self, session=None):\n ds = models.DagStat\n\n ds.update(\n dag_ids=[dag.dag_id for dag in dagbag.dags.values() if not dag.is_subdag]\n )\n\n qry = (\n session.query(ds.dag_id, ds.state, ds.count)\n )\n\n data = {}\n for dag_id, state, count in qry:\n if dag_id not in data:\n data[dag_id] = {}\n data[dag_id][state] = count\n\n payload = {}\n for dag in dagbag.dags.values():\n payload[dag.safe_dag_id] = []\n for state in State.dag_states:\n try:\n count = data[dag.dag_id][state]\n except Exception:\n count = 0\n d = {\n 'state': state,\n 'count': count,\n 'dag_id': dag.dag_id,\n 'color': State.color(state)\n }\n payload[dag.safe_dag_id].append(d)\n return wwwutils.json_response(payload)\n\n @expose('/task_stats')\n @login_required\n @provide_session\n def task_stats(self, session=None):\n TI = models.TaskInstance\n DagRun = models.DagRun\n Dag = models.DagModel\n\n LastDagRun = (\n session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))\n .join(Dag, Dag.dag_id == DagRun.dag_id)\n .filter(DagRun.state != State.RUNNING)\n .filter(Dag.is_active == True)\n .filter(Dag.is_subdag == False)\n .group_by(DagRun.dag_id)\n .subquery('last_dag_run')\n )\n RunningDagRun = (\n session.query(DagRun.dag_id, DagRun.execution_date)\n .join(Dag, Dag.dag_id == DagRun.dag_id)\n .filter(DagRun.state == State.RUNNING)\n .filter(Dag.is_active == True)\n .filter(Dag.is_subdag == False)\n .subquery('running_dag_run')\n )\n\n # Select all task_instances from active dag_runs.\n # If no dag_run is active, return task instances from most recent dag_run.\n LastTI = (\n session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))\n .join(LastDagRun, and_(\n LastDagRun.c.dag_id == TI.dag_id,\n LastDagRun.c.execution_date == TI.execution_date))\n )\n RunningTI = (\n session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))\n .join(RunningDagRun, and_(\n RunningDagRun.c.dag_id == TI.dag_id,\n RunningDagRun.c.execution_date == TI.execution_date))\n )\n\n UnionTI = union_all(LastTI, RunningTI).alias('union_ti')\n qry = (\n session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count())\n .group_by(UnionTI.c.dag_id, UnionTI.c.state)\n )\n\n data = {}\n for dag_id, state, count in qry:\n if dag_id not in data:\n data[dag_id] = {}\n data[dag_id][state] = count\n session.commit()\n\n payload = {}\n for dag in dagbag.dags.values():\n payload[dag.safe_dag_id] = []\n for state in State.task_states:\n try:\n count = data[dag.dag_id][state]\n except:\n count = 0\n d = {\n 'state': state,\n 'count': count,\n 'dag_id': dag.dag_id,\n 'color': State.color(state)\n }\n payload[dag.safe_dag_id].append(d)\n return wwwutils.json_response(payload)\n\n @expose('/code')\n @login_required\n def code(self):\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n title = dag_id\n try:\n with open(dag.fileloc, 'r') as f:\n code = f.read()\n html_code = highlight(\n code, lexers.PythonLexer(), HtmlFormatter(linenos=True))\n except IOError as e:\n html_code = str(e)\n\n return self.render(\n 'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,\n root=request.args.get('root'),\n demo_mode=conf.getboolean('webserver', 'demo_mode'))\n\n @expose('/dag_details')\n @login_required\n @provide_session\n def dag_details(self, session=None):\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n title = \"DAG details\"\n\n TI = models.TaskInstance\n states = (\n session.query(TI.state, sqla.func.count(TI.dag_id))\n .filter(TI.dag_id == dag_id)\n .group_by(TI.state)\n .all()\n )\n return self.render(\n 'airflow/dag_details.html',\n dag=dag, title=title, states=states, State=State)\n\n @current_app.errorhandler(404)\n def circles(self):\n return render_template(\n 'airflow/circles.html', hostname=get_hostname()), 404\n\n @current_app.errorhandler(500)\n def show_traceback(self):\n from airflow.utils import asciiart as ascii_\n return render_template(\n 'airflow/traceback.html',\n hostname=get_hostname(),\n nukular=ascii_.nukular,\n info=traceback.format_exc()), 500\n\n @expose('/noaccess')\n def noaccess(self):\n return self.render('airflow/noaccess.html')\n\n @expose('/pickle_info')\n @login_required\n def pickle_info(self):\n d = {}\n dag_id = request.args.get('dag_id')\n dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()\n for dag in dags:\n if not dag.is_subdag:\n d[dag.dag_id] = dag.pickle_info()\n return wwwutils.json_response(d)\n\n @expose('/login', methods=['GET', 'POST'])\n def login(self):\n return airflow.login.login(self, request)\n\n @expose('/logout')\n def logout(self):\n logout_user()\n flash('You have been logged out.')\n return redirect(url_for('admin.index'))\n\n @expose('/rendered')\n @login_required\n @wwwutils.action_logging\n def rendered(self):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n execution_date = request.args.get('execution_date')\n dttm = pendulum.parse(execution_date)\n form = DateTimeForm(data={'execution_date': dttm})\n dag = dagbag.get_dag(dag_id)\n task = copy.copy(dag.get_task(task_id))\n ti = models.TaskInstance(task=task, execution_date=dttm)\n try:\n ti.render_templates()\n except Exception as e:\n flash(\"Error rendering template: \" + str(e), \"error\")\n title = \"Rendered Template\"\n html_dict = {}\n for template_field in task.__class__.template_fields:\n content = getattr(task, template_field)\n if template_field in attr_renderer:\n html_dict[template_field] = attr_renderer[template_field](content)\n else:\n html_dict[template_field] = (\n \"<pre><code>\" + str(content) + \"</pre></code>\")\n\n return self.render(\n 'airflow/ti_code.html',\n html_dict=html_dict,\n dag=dag,\n task_id=task_id,\n execution_date=execution_date,\n form=form,\n title=title, )\n\n @expose('/log')\n @login_required\n @wwwutils.action_logging\n @provide_session\n def log(self, session=None):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n execution_date = request.args.get('execution_date')\n dttm = pendulum.parse(execution_date)\n form = DateTimeForm(data={'execution_date': dttm})\n dag = dagbag.get_dag(dag_id)\n ti = session.query(models.TaskInstance).filter(\n models.TaskInstance.dag_id == dag_id,\n models.TaskInstance.task_id == task_id,\n models.TaskInstance.execution_date == dttm).first()\n if ti is None:\n logs = [\"*** Task instance did not exist in the DB\\n\"]\n else:\n logger = logging.getLogger('airflow.task')\n task_log_reader = conf.get('core', 'task_log_reader')\n handler = next((handler for handler in logger.handlers\n if handler.name == task_log_reader), None)\n try:\n ti.task = dag.get_task(ti.task_id)\n logs = handler.read(ti)\n except AttributeError as e:\n logs = [\"Task log handler {} does not support read logs.\\n{}\\n\" \\\n .format(task_log_reader, str(e))]\n\n for i, log in enumerate(logs):\n if PY2 and not isinstance(log, unicode):\n logs[i] = log.decode('utf-8')\n\n return self.render(\n 'airflow/ti_log.html',\n logs=logs, dag=dag, title=\"Log by attempts\", task_id=task_id,\n execution_date=execution_date, form=form)\n\n @expose('/task')\n @login_required\n @wwwutils.action_logging\n def task(self):\n TI = models.TaskInstance\n\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n # Carrying execution_date through, even though it's irrelevant for\n # this context\n execution_date = request.args.get('execution_date')\n dttm = pendulum.parse(execution_date)\n form = DateTimeForm(data={'execution_date': dttm})\n dag = dagbag.get_dag(dag_id)\n\n if not dag or task_id not in dag.task_ids:\n flash(\n \"Task [{}.{}] doesn't seem to exist\"\n \" at the moment\".format(dag_id, task_id),\n \"error\")\n return redirect('/admin/')\n task = copy.copy(dag.get_task(task_id))\n task.resolve_template_files()\n ti = TI(task=task, execution_date=dttm)\n ti.refresh_from_db()\n\n ti_attrs = []\n for attr_name in dir(ti):\n if not attr_name.startswith('_'):\n attr = getattr(ti, attr_name)\n if type(attr) != type(self.task):\n ti_attrs.append((attr_name, str(attr)))\n\n task_attrs = []\n for attr_name in dir(task):\n if not attr_name.startswith('_'):\n attr = getattr(task, attr_name)\n if type(attr) != type(self.task) and \\\n attr_name not in attr_renderer:\n task_attrs.append((attr_name, str(attr)))\n\n # Color coding the special attributes that are code\n special_attrs_rendered = {}\n for attr_name in attr_renderer:\n if hasattr(task, attr_name):\n source = getattr(task, attr_name)\n special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)\n\n no_failed_deps_result = [(\n \"Unknown\",\n dedent(\"\"\"\\\n All dependencies are met but the task instance is not running. In most cases this just means that the task will probably be scheduled soon unless:<br/>\n - The scheduler is down or under heavy load<br/>\n {}\n <br/>\n If this task instance does not start soon please contact your Airflow \"\"\"\n \"\"\"administrator for assistance.\"\"\"\n .format(\n \"- This task instance already ran and had its state changed \"\n \"manually (e.g. cleared in the UI)<br/>\"\n if ti.state == State.NONE else \"\")))]\n\n # Use the scheduler's context to figure out which dependencies are not met\n dep_context = DepContext(SCHEDULER_DEPS)\n failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in\n ti.get_failed_dep_statuses(\n dep_context=dep_context)]\n\n title = \"Task Instance Details\"\n return self.render(\n 'airflow/task.html',\n task_attrs=task_attrs,\n ti_attrs=ti_attrs,\n failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,\n task_id=task_id,\n execution_date=execution_date,\n special_attrs_rendered=special_attrs_rendered,\n form=form,\n dag=dag, title=title)\n\n @expose('/xcom')\n @login_required\n @wwwutils.action_logging\n @provide_session\n def xcom(self, session=None):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n # Carrying execution_date through, even though it's irrelevant for\n # this context\n execution_date = request.args.get('execution_date')\n dttm = pendulum.parse(execution_date)\n form = DateTimeForm(data={'execution_date': dttm})\n dag = dagbag.get_dag(dag_id)\n if not dag or task_id not in dag.task_ids:\n flash(\n \"Task [{}.{}] doesn't seem to exist\"\n \" at the moment\".format(dag_id, task_id),\n \"error\")\n return redirect('/admin/')\n\n xcomlist = session.query(XCom).filter(\n XCom.dag_id == dag_id, XCom.task_id == task_id,\n XCom.execution_date == dttm).all()\n\n attributes = []\n for xcom in xcomlist:\n if not xcom.key.startswith('_'):\n attributes.append((xcom.key, xcom.value))\n\n title = \"XCom\"\n return self.render(\n 'airflow/xcom.html',\n attributes=attributes,\n task_id=task_id,\n execution_date=execution_date,\n form=form,\n dag=dag, title=title)\n\n @expose('/run')\n @login_required\n @wwwutils.action_logging\n @wwwutils.notify_owner\n def run(self):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n origin = request.args.get('origin')\n dag = dagbag.get_dag(dag_id)\n task = dag.get_task(task_id)\n\n execution_date = request.args.get('execution_date')\n execution_date = pendulum.parse(execution_date)\n ignore_all_deps = request.args.get('ignore_all_deps') == \"true\"\n ignore_task_deps = request.args.get('ignore_task_deps') == \"true\"\n ignore_ti_state = request.args.get('ignore_ti_state') == \"true\"\n\n try:\n from airflow.executors import GetDefaultExecutor\n from airflow.executors.celery_executor import CeleryExecutor\n executor = GetDefaultExecutor()\n if not isinstance(executor, CeleryExecutor):\n flash(\"Only works with the CeleryExecutor, sorry\", \"error\")\n return redirect(origin)\n except ImportError:\n # in case CeleryExecutor cannot be imported it is not active either\n flash(\"Only works with the CeleryExecutor, sorry\", \"error\")\n return redirect(origin)\n\n ti = models.TaskInstance(task=task, execution_date=execution_date)\n ti.refresh_from_db()\n\n # Make sure the task instance can be queued\n dep_context = DepContext(\n deps=QUEUE_DEPS,\n ignore_all_deps=ignore_all_deps,\n ignore_task_deps=ignore_task_deps,\n ignore_ti_state=ignore_ti_state)\n failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))\n if failed_deps:\n failed_deps_str = \", \".join(\n [\"{}: {}\".format(dep.dep_name, dep.reason) for dep in failed_deps])\n flash(\"Could not queue task instance for execution, dependencies not met: \"\n \"{}\".format(failed_deps_str),\n \"error\")\n return redirect(origin)\n\n executor.start()\n executor.queue_task_instance(\n ti,\n ignore_all_deps=ignore_all_deps,\n ignore_task_deps=ignore_task_deps,\n ignore_ti_state=ignore_ti_state)\n executor.heartbeat()\n flash(\n \"Sent {} to the message queue, \"\n \"it should start any moment now.\".format(ti))\n return redirect(origin)\n\n @expose('/trigger')\n @login_required\n @wwwutils.action_logging\n @wwwutils.notify_owner\n def trigger(self):\n dag_id = request.args.get('dag_id')\n origin = request.args.get('origin') or \"/admin/\"\n dag = dagbag.get_dag(dag_id)\n\n if not dag:\n flash(\"Cannot find dag {}\".format(dag_id))\n return redirect(origin)\n\n execution_date = timezone.utcnow()\n run_id = \"manual__{0}\".format(execution_date.isoformat())\n\n dr = DagRun.find(dag_id=dag_id, run_id=run_id)\n if dr:\n flash(\"This run_id {} already exists\".format(run_id))\n return redirect(origin)\n\n run_conf = {}\n\n dag.create_dagrun(\n run_id=run_id,\n execution_date=execution_date,\n state=State.RUNNING,\n conf=run_conf,\n external_trigger=True\n )\n\n flash(\n \"Triggered {}, \"\n \"it should start any moment now.\".format(dag_id))\n return redirect(origin)\n\n def _clear_dag_tis(self, dag, start_date, end_date, origin,\n recursive=False, confirmed=False):\n if confirmed:\n count = dag.clear(\n start_date=start_date,\n end_date=end_date,\n include_subdags=recursive)\n\n flash(\"{0} task instances have been cleared\".format(count))\n return redirect(origin)\n\n tis = dag.clear(\n start_date=start_date,\n end_date=end_date,\n include_subdags=recursive,\n dry_run=True)\n if not tis:\n flash(\"No task instances to clear\", 'error')\n response = redirect(origin)\n else:\n details = \"\\n\".join([str(t) for t in tis])\n\n response = self.render(\n 'airflow/confirm.html',\n message=(\"Here's the list of task instances you are about \"\n \"to clear:\"),\n details=details)\n\n return response\n\n @expose('/clear')\n @login_required\n @wwwutils.action_logging\n @wwwutils.notify_owner\n def clear(self):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n origin = request.args.get('origin')\n dag = dagbag.get_dag(dag_id)\n\n execution_date = request.args.get('execution_date')\n execution_date = pendulum.parse(execution_date)\n confirmed = request.args.get('confirmed') == \"true\"\n upstream = request.args.get('upstream') == \"true\"\n downstream = request.args.get('downstream') == \"true\"\n future = request.args.get('future') == \"true\"\n past = request.args.get('past') == \"true\"\n recursive = request.args.get('recursive') == \"true\"\n\n dag = dag.sub_dag(\n task_regex=r\"^{0}$\".format(task_id),\n include_downstream=downstream,\n include_upstream=upstream)\n\n end_date = execution_date if not future else None\n start_date = execution_date if not past else None\n\n return self._clear_dag_tis(dag, start_date, end_date, origin,\n recursive=recursive, confirmed=confirmed)\n\n @expose('/dagrun_clear')\n @login_required\n @wwwutils.action_logging\n @wwwutils.notify_owner\n def dagrun_clear(self):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n origin = request.args.get('origin')\n execution_date = request.args.get('execution_date')\n confirmed = request.args.get('confirmed') == \"true\"\n\n dag = dagbag.get_dag(dag_id)\n execution_date = pendulum.parse(execution_date)\n start_date = execution_date\n end_date = execution_date\n\n return self._clear_dag_tis(dag, start_date, end_date, origin,\n recursive=True, confirmed=confirmed)\n\n @expose('/blocked')\n @login_required\n @provide_session\n def blocked(self, session=None):\n DR = models.DagRun\n dags = (\n session.query(DR.dag_id, sqla.func.count(DR.id))\n .filter(DR.state == State.RUNNING)\n .group_by(DR.dag_id)\n .all()\n )\n payload = []\n for dag_id, active_dag_runs in dags:\n max_active_runs = 0\n if dag_id in dagbag.dags:\n max_active_runs = dagbag.dags[dag_id].max_active_runs\n payload.append({\n 'dag_id': dag_id,\n 'active_dag_run': active_dag_runs,\n 'max_active_runs': max_active_runs,\n })\n return wwwutils.json_response(payload)\n\n @expose('/dagrun_success')\n @login_required\n @wwwutils.action_logging\n @wwwutils.notify_owner\n def dagrun_success(self):\n dag_id = request.args.get('dag_id')\n execution_date = request.args.get('execution_date')\n confirmed = request.args.get('confirmed') == 'true'\n origin = request.args.get('origin')\n\n if not execution_date:\n flash('Invalid execution date', 'error')\n return redirect(origin)\n\n execution_date = pendulum.parse(execution_date)\n dag = dagbag.get_dag(dag_id)\n\n if not dag:\n flash('Cannot find DAG: {}'.format(dag_id), 'error')\n return redirect(origin)\n\n new_dag_state = set_dag_run_state(dag, execution_date, state=State.SUCCESS,\n commit=confirmed)\n\n if confirmed:\n flash('Marked success on {} task instances'.format(len(new_dag_state)))\n return redirect(origin)\n\n else:\n details = '\\n'.join([str(t) for t in new_dag_state])\n\n response = self.render('airflow/confirm.html',\n message=(\"Here's the list of task instances you are \"\n \"about to mark as successful:\"),\n details=details)\n\n return response\n\n @expose('/success')\n @login_required\n @wwwutils.action_logging\n @wwwutils.notify_owner\n def success(self):\n dag_id = request.args.get('dag_id')\n task_id = request.args.get('task_id')\n origin = request.args.get('origin')\n dag = dagbag.get_dag(dag_id)\n task = dag.get_task(task_id)\n task.dag = dag\n\n execution_date = request.args.get('execution_date')\n execution_date = pendulum.parse(execution_date)\n confirmed = request.args.get('confirmed') == \"true\"\n upstream = request.args.get('upstream') == \"true\"\n downstream = request.args.get('downstream') == \"true\"\n future = request.args.get('future') == \"true\"\n past = request.args.get('past') == \"true\"\n\n if not dag:\n flash(\"Cannot find DAG: {}\".format(dag_id))\n return redirect(origin)\n\n if not task:\n flash(\"Cannot find task {} in DAG {}\".format(task_id, dag.dag_id))\n return redirect(origin)\n\n from airflow.api.common.experimental.mark_tasks import set_state\n\n if confirmed:\n altered = set_state(task=task, execution_date=execution_date,\n upstream=upstream, downstream=downstream,\n future=future, past=past, state=State.SUCCESS,\n commit=True)\n\n flash(\"Marked success on {} task instances\".format(len(altered)))\n return redirect(origin)\n\n to_be_altered = set_state(task=task, execution_date=execution_date,\n upstream=upstream, downstream=downstream,\n future=future, past=past, state=State.SUCCESS,\n commit=False)\n\n details = \"\\n\".join([str(t) for t in to_be_altered])\n\n response = self.render(\"airflow/confirm.html\",\n message=(\"Here's the list of task instances you are \"\n \"about to mark as successful:\"),\n details=details)\n\n return response\n\n @expose('/tree')\n @login_required\n @wwwutils.gzipped\n @wwwutils.action_logging\n @provide_session\n def tree(self, session=None):\n dag_id = request.args.get('dag_id')\n blur = conf.getboolean('webserver', 'demo_mode')\n dag = dagbag.get_dag(dag_id)\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_downstream=False,\n include_upstream=True)\n\n base_date = request.args.get('base_date')\n num_runs = request.args.get('num_runs')\n num_runs = int(num_runs) if num_runs else 25\n\n if base_date:\n base_date = timezone.parse(base_date)\n else:\n base_date = dag.latest_execution_date or timezone.utcnow()\n\n dates = dag.date_range(base_date, num=-abs(num_runs))\n min_date = dates[0] if dates else datetime(2000, 1, 1)\n\n DR = models.DagRun\n dag_runs = (\n session.query(DR)\n .filter(\n DR.dag_id == dag.dag_id,\n DR.execution_date <= base_date,\n DR.execution_date >= min_date)\n .all()\n )\n dag_runs = {\n dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}\n\n dates = sorted(list(dag_runs.keys()))\n max_date = max(dates) if dates else None\n\n tis = dag.get_task_instances(\n session, start_date=min_date, end_date=base_date)\n task_instances = {}\n for ti in tis:\n tid = alchemy_to_dict(ti)\n dr = dag_runs.get(ti.execution_date)\n tid['external_trigger'] = dr['external_trigger'] if dr else False\n task_instances[(ti.task_id, ti.execution_date)] = tid\n\n expanded = []\n # The default recursion traces every path so that tree view has full\n # expand/collapse functionality. After 5,000 nodes we stop and fall\n # back on a quick DFS search for performance. See PR #320.\n node_count = [0]\n node_limit = 5000 / max(1, len(dag.roots))\n\n def recurse_nodes(task, visited):\n visited.add(task)\n node_count[0] += 1\n\n children = [\n recurse_nodes(t, visited) for t in task.upstream_list\n if node_count[0] < node_limit or t not in visited]\n\n # D3 tree uses children vs _children to define what is\n # expanded or not. The following block makes it such that\n # repeated nodes are collapsed by default.\n children_key = 'children'\n if task.task_id not in expanded:\n expanded.append(task.task_id)\n elif children:\n children_key = \"_children\"\n\n def set_duration(tid):\n if (isinstance(tid, dict) and tid.get(\"state\") == State.RUNNING and\n tid[\"start_date\"] is not None):\n d = timezone.utcnow() - pendulum.parse(tid[\"start_date\"])\n tid[\"duration\"] = d.total_seconds()\n return tid\n\n return {\n 'name': task.task_id,\n 'instances': [\n set_duration(task_instances.get((task.task_id, d))) or {\n 'execution_date': d.isoformat(),\n 'task_id': task.task_id\n }\n for d in dates],\n children_key: children,\n 'num_dep': len(task.upstream_list),\n 'operator': task.task_type,\n 'retries': task.retries,\n 'owner': task.owner,\n 'start_date': task.start_date,\n 'end_date': task.end_date,\n 'depends_on_past': task.depends_on_past,\n 'ui_color': task.ui_color,\n }\n\n data = {\n 'name': '[DAG]',\n 'children': [recurse_nodes(t, set()) for t in dag.roots],\n 'instances': [\n dag_runs.get(d) or {'execution_date': d.isoformat()}\n for d in dates],\n }\n\n data = json.dumps(data, indent=4, default=json_ser)\n session.commit()\n\n form = DateTimeWithNumRunsForm(data={'base_date': max_date,\n 'num_runs': num_runs})\n return self.render(\n 'airflow/tree.html',\n operators=sorted(\n list(set([op.__class__ for op in dag.tasks])),\n key=lambda x: x.__name__\n ),\n root=root,\n form=form,\n dag=dag, data=data, blur=blur)\n\n @expose('/graph')\n @login_required\n @wwwutils.gzipped\n @wwwutils.action_logging\n @provide_session\n def graph(self, session=None):\n dag_id = request.args.get('dag_id')\n blur = conf.getboolean('webserver', 'demo_mode')\n dag = dagbag.get_dag(dag_id)\n if dag_id not in dagbag.dags:\n flash('DAG \"{0}\" seems to be missing.'.format(dag_id), \"error\")\n return redirect('/admin/')\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n arrange = request.args.get('arrange', dag.orientation)\n\n nodes = []\n edges = []\n for task in dag.tasks:\n nodes.append({\n 'id': task.task_id,\n 'value': {\n 'label': task.task_id,\n 'labelStyle': \"fill:{0};\".format(task.ui_fgcolor),\n 'style': \"fill:{0};\".format(task.ui_color),\n }\n })\n\n def get_upstream(task):\n for t in task.upstream_list:\n edge = {\n 'u': t.task_id,\n 'v': task.task_id,\n }\n if edge not in edges:\n edges.append(edge)\n get_upstream(t)\n\n for t in dag.roots:\n get_upstream(t)\n\n dttm = request.args.get('execution_date')\n if dttm:\n dttm = pendulum.parse(dttm)\n else:\n dttm = dag.latest_execution_date or timezone.utcnow()\n\n DR = models.DagRun\n drs = (\n session.query(DR)\n .filter_by(dag_id=dag_id)\n .order_by(desc(DR.execution_date)).all()\n )\n dr_choices = []\n dr_state = None\n for dr in drs:\n dr_choices.append((dr.execution_date.isoformat(), dr.run_id))\n if dttm == dr.execution_date:\n dr_state = dr.state\n\n class GraphForm(Form):\n execution_date = SelectField(\"DAG run\", choices=dr_choices)\n arrange = SelectField(\"Layout\", choices=(\n ('LR', \"Left->Right\"),\n ('RL', \"Right->Left\"),\n ('TB', \"Top->Bottom\"),\n ('BT', \"Bottom->Top\"),\n ))\n\n form = GraphForm(\n data={'execution_date': dttm.isoformat(), 'arrange': arrange})\n\n task_instances = {\n ti.task_id: alchemy_to_dict(ti)\n for ti in dag.get_task_instances(session, dttm, dttm)}\n tasks = {\n t.task_id: {\n 'dag_id': t.dag_id,\n 'task_type': t.task_type,\n }\n for t in dag.tasks}\n if not tasks:\n flash(\"No tasks found\", \"error\")\n session.commit()\n doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') and dag.doc_md else ''\n\n return self.render(\n 'airflow/graph.html',\n dag=dag,\n form=form,\n width=request.args.get('width', \"100%\"),\n height=request.args.get('height', \"800\"),\n execution_date=dttm.isoformat(),\n state_token=state_token(dr_state),\n doc_md=doc_md,\n arrange=arrange,\n operators=sorted(\n list(set([op.__class__ for op in dag.tasks])),\n key=lambda x: x.__name__\n ),\n blur=blur,\n root=root or '',\n task_instances=json.dumps(task_instances, indent=2),\n tasks=json.dumps(tasks, indent=2),\n nodes=json.dumps(nodes, indent=2),\n edges=json.dumps(edges, indent=2), )\n\n @expose('/duration')\n @login_required\n @wwwutils.action_logging\n @provide_session\n def duration(self, session=None):\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n base_date = request.args.get('base_date')\n num_runs = request.args.get('num_runs')\n num_runs = int(num_runs) if num_runs else 25\n\n if base_date:\n base_date = pendulum.parse(base_date)\n else:\n base_date = dag.latest_execution_date or timezone.utcnow()\n\n dates = dag.date_range(base_date, num=-abs(num_runs))\n min_date = dates[0] if dates else datetime(2000, 1, 1)\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n chart_height = get_chart_height(dag)\n chart = nvd3.lineChart(\n name=\"lineChart\", x_is_date=True, height=chart_height, width=\"1200\")\n cum_chart = nvd3.lineChart(\n name=\"cumLineChart\", x_is_date=True, height=chart_height, width=\"1200\")\n\n y = defaultdict(list)\n x = defaultdict(list)\n cum_y = defaultdict(list)\n\n tis = dag.get_task_instances(\n session, start_date=min_date, end_date=base_date)\n TF = models.TaskFail\n ti_fails = (\n session\n .query(TF)\n .filter(\n TF.dag_id == dag.dag_id,\n TF.execution_date >= min_date,\n TF.execution_date <= base_date,\n TF.task_id.in_([t.task_id for t in dag.tasks]))\n .all()\n )\n\n fails_totals = defaultdict(int)\n for tf in ti_fails:\n dict_key = (tf.dag_id, tf.task_id, tf.execution_date)\n fails_totals[dict_key] += tf.duration\n\n for ti in tis:\n if ti.duration:\n dttm = wwwutils.epoch(ti.execution_date)\n x[ti.task_id].append(dttm)\n y[ti.task_id].append(float(ti.duration))\n fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)\n fails_total = fails_totals[fails_dict_key]\n cum_y[ti.task_id].append(float(ti.duration + fails_total))\n\n # determine the most relevant time unit for the set of task instance\n # durations for the DAG\n y_unit = infer_time_unit([d for t in y.values() for d in t])\n cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])\n # update the y Axis on both charts to have the correct time units\n chart.create_y_axis('yAxis', format='.02f', custom_format=False,\n label='Duration ({})'.format(y_unit))\n chart.axislist['yAxis']['axisLabelDistance'] = '40'\n cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,\n label='Duration ({})'.format(cum_y_unit))\n cum_chart.axislist['yAxis']['axisLabelDistance'] = '40'\n for task in dag.tasks:\n if x[task.task_id]:\n chart.add_serie(name=task.task_id, x=x[task.task_id],\n y=scale_time_units(y[task.task_id], y_unit))\n cum_chart.add_serie(name=task.task_id, x=x[task.task_id],\n y=scale_time_units(cum_y[task.task_id],\n cum_y_unit))\n\n dates = sorted(list({ti.execution_date for ti in tis}))\n max_date = max([ti.execution_date for ti in tis]) if dates else None\n\n session.commit()\n\n form = DateTimeWithNumRunsForm(data={'base_date': max_date,\n 'num_runs': num_runs})\n chart.buildcontent()\n cum_chart.buildcontent()\n s_index = cum_chart.htmlcontent.rfind('});')\n cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +\n \"$(function() {$( document ).trigger('chartload') })\" +\n cum_chart.htmlcontent[s_index:])\n\n return self.render(\n 'airflow/duration_chart.html',\n dag=dag,\n demo_mode=conf.getboolean('webserver', 'demo_mode'),\n root=root,\n form=form,\n chart=chart.htmlcontent,\n cum_chart=cum_chart.htmlcontent\n )\n\n @expose('/tries')\n @login_required\n @wwwutils.action_logging\n @provide_session\n def tries(self, session=None):\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n base_date = request.args.get('base_date')\n num_runs = request.args.get('num_runs')\n num_runs = int(num_runs) if num_runs else 25\n\n if base_date:\n base_date = pendulum.parse(base_date)\n else:\n base_date = dag.latest_execution_date or timezone.utcnow()\n\n dates = dag.date_range(base_date, num=-abs(num_runs))\n min_date = dates[0] if dates else datetime(2000, 1, 1)\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n chart_height = get_chart_height(dag)\n chart = nvd3.lineChart(\n name=\"lineChart\", x_is_date=True, y_axis_format='d', height=chart_height,\n width=\"1200\")\n\n for task in dag.tasks:\n y = []\n x = []\n for ti in task.get_task_instances(session, start_date=min_date,\n end_date=base_date):\n dttm = wwwutils.epoch(ti.execution_date)\n x.append(dttm)\n y.append(ti.try_number)\n if x:\n chart.add_serie(name=task.task_id, x=x, y=y)\n\n tis = dag.get_task_instances(\n session, start_date=min_date, end_date=base_date)\n tries = sorted(list({ti.try_number for ti in tis}))\n max_date = max([ti.execution_date for ti in tis]) if tries else None\n\n session.commit()\n\n form = DateTimeWithNumRunsForm(data={'base_date': max_date,\n 'num_runs': num_runs})\n\n chart.buildcontent()\n\n return self.render(\n 'airflow/chart.html',\n dag=dag,\n demo_mode=conf.getboolean('webserver', 'demo_mode'),\n root=root,\n form=form,\n chart=chart.htmlcontent\n )\n\n @expose('/landing_times')\n @login_required\n @wwwutils.action_logging\n @provide_session\n def landing_times(self, session=None):\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n base_date = request.args.get('base_date')\n num_runs = request.args.get('num_runs')\n num_runs = int(num_runs) if num_runs else 25\n\n if base_date:\n base_date = pendulum.parse(base_date)\n else:\n base_date = dag.latest_execution_date or timezone.utcnow()\n\n dates = dag.date_range(base_date, num=-abs(num_runs))\n min_date = dates[0] if dates else datetime(2000, 1, 1)\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n chart_height = get_chart_height(dag)\n chart = nvd3.lineChart(\n name=\"lineChart\", x_is_date=True, height=chart_height, width=\"1200\")\n y = {}\n x = {}\n for task in dag.tasks:\n y[task.task_id] = []\n x[task.task_id] = []\n for ti in task.get_task_instances(session, start_date=min_date,\n end_date=base_date):\n ts = ti.execution_date\n if dag.schedule_interval and dag.following_schedule(ts):\n ts = dag.following_schedule(ts)\n if ti.end_date:\n dttm = wwwutils.epoch(ti.execution_date)\n secs = (ti.end_date - ts).total_seconds()\n x[ti.task_id].append(dttm)\n y[ti.task_id].append(secs)\n\n # determine the most relevant time unit for the set of landing times\n # for the DAG\n y_unit = infer_time_unit([d for t in y.values() for d in t])\n # update the y Axis to have the correct time units\n chart.create_y_axis('yAxis', format='.02f', custom_format=False,\n label='Landing Time ({})'.format(y_unit))\n chart.axislist['yAxis']['axisLabelDistance'] = '40'\n for task in dag.tasks:\n if x[task.task_id]:\n chart.add_serie(name=task.task_id, x=x[task.task_id],\n y=scale_time_units(y[task.task_id], y_unit))\n\n tis = dag.get_task_instances(\n session, start_date=min_date, end_date=base_date)\n dates = sorted(list({ti.execution_date for ti in tis}))\n max_date = max([ti.execution_date for ti in tis]) if dates else None\n\n form = DateTimeWithNumRunsForm(data={'base_date': max_date,\n 'num_runs': num_runs})\n chart.buildcontent()\n return self.render(\n 'airflow/chart.html',\n dag=dag,\n chart=chart.htmlcontent,\n height=str(chart_height + 100) + \"px\",\n demo_mode=conf.getboolean('webserver', 'demo_mode'),\n root=root,\n form=form,\n )\n\n @expose('/paused', methods=['POST'])\n @login_required\n @wwwutils.action_logging\n @provide_session\n def paused(self, session=None):\n DagModel = models.DagModel\n dag_id = request.args.get('dag_id')\n orm_dag = session.query(\n DagModel).filter(DagModel.dag_id == dag_id).first()\n if request.args.get('is_paused') == 'false':\n orm_dag.is_paused = True\n else:\n orm_dag.is_paused = False\n session.merge(orm_dag)\n session.commit()\n\n dagbag.get_dag(dag_id)\n return \"OK\"\n\n @expose('/refresh')\n @login_required\n @wwwutils.action_logging\n @provide_session\n def refresh(self, session=None):\n DagModel = models.DagModel\n dag_id = request.args.get('dag_id')\n orm_dag = session.query(\n DagModel).filter(DagModel.dag_id == dag_id).first()\n\n if orm_dag:\n orm_dag.last_expired = timezone.utcnow()\n session.merge(orm_dag)\n session.commit()\n\n dagbag.get_dag(dag_id)\n flash(\"DAG [{}] is now fresh as a daisy\".format(dag_id))\n return redirect(request.referrer)\n\n @expose('/refresh_all')\n @login_required\n @wwwutils.action_logging\n def refresh_all(self):\n dagbag.collect_dags(only_if_updated=False)\n flash(\"All DAGs are now up to date\")\n return redirect('/')\n\n @expose('/gantt')\n @login_required\n @wwwutils.action_logging\n @provide_session\n def gantt(self, session=None):\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n demo_mode = conf.getboolean('webserver', 'demo_mode')\n\n root = request.args.get('root')\n if root:\n dag = dag.sub_dag(\n task_regex=root,\n include_upstream=True,\n include_downstream=False)\n\n dttm = request.args.get('execution_date')\n if dttm:\n dttm = pendulum.parse(dttm)\n else:\n dttm = dag.latest_execution_date or timezone.utcnow()\n\n form = DateTimeForm(data={'execution_date': dttm})\n\n tis = [\n ti for ti in dag.get_task_instances(session, dttm, dttm)\n if ti.start_date]\n tis = sorted(tis, key=lambda ti: ti.start_date)\n\n tasks = []\n for ti in tis:\n end_date = ti.end_date if ti.end_date else timezone.utcnow()\n tasks.append({\n 'startDate': wwwutils.epoch(ti.start_date),\n 'endDate': wwwutils.epoch(end_date),\n 'isoStart': ti.start_date.isoformat()[:-4],\n 'isoEnd': end_date.isoformat()[:-4],\n 'taskName': ti.task_id,\n 'duration': \"{}\".format(end_date - ti.start_date)[:-4],\n 'status': ti.state,\n 'executionDate': ti.execution_date.isoformat(),\n })\n states = {ti.state: ti.state for ti in tis}\n data = {\n 'taskNames': [ti.task_id for ti in tis],\n 'tasks': tasks,\n 'taskStatus': states,\n 'height': len(tis) * 25 + 25,\n }\n\n session.commit()\n\n return self.render(\n 'airflow/gantt.html',\n dag=dag,\n execution_date=dttm.isoformat(),\n form=form,\n data=json.dumps(data, indent=2),\n base_date='',\n demo_mode=demo_mode,\n root=root,\n )\n\n @expose('/object/task_instances')\n @login_required\n @wwwutils.action_logging\n @provide_session\n def task_instances(self, session=None):\n dag_id = request.args.get('dag_id')\n dag = dagbag.get_dag(dag_id)\n\n dttm = request.args.get('execution_date')\n if dttm:\n dttm = pendulum.parse(dttm)\n else:\n return (\"Error: Invalid execution_date\")\n\n task_instances = {\n ti.task_id: alchemy_to_dict(ti)\n for ti in dag.get_task_instances(session, dttm, dttm)}\n\n return json.dumps(task_instances)\n\n @expose('/variables/<form>', methods=[\"GET\", \"POST\"])\n @login_required\n @wwwutils.action_logging\n def variables(self, form):\n try:\n if request.method == 'POST':\n data = request.json\n if data:\n with create_session() as session:\n var = models.Variable(key=form, val=json.dumps(data))\n session.add(var)\n session.commit()\n return \"\"\n else:\n return self.render(\n 'airflow/variables/{}.html'.format(form)\n )\n except:\n # prevent XSS\n form = escape(form)\n return (\"Error: form airflow/variables/{}.html \"\n \"not found.\").format(form), 404\n\n @expose('/varimport', methods=[\"GET\", \"POST\"])\n @login_required\n @wwwutils.action_logging\n def varimport(self):\n try:\n d = json.load(UTF8_READER(request.files['file']))\n except Exception as e:\n flash(\"Missing file or syntax error: {}.\".format(e))\n else:\n for k, v in d.items():\n models.Variable.set(k, v, serialize_json=isinstance(v, dict))\n flash(\"{} variable(s) successfully updated.\".format(len(d)))\n return redirect('/admin/variable')\n\n\nclass HomeView(AdminIndexView):\n @expose(\"/\")\n @login_required\n @provide_session\n def index(self, session=None):\n DM = models.DagModel\n\n # restrict the dags shown if filter_by_owner and current user is not superuser\n do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())\n owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower()\n\n hide_paused_dags_by_default = conf.getboolean('webserver',\n 'hide_paused_dags_by_default')\n show_paused_arg = request.args.get('showPaused', 'None')\n\n def get_int_arg(value, default=0):\n try:\n return int(value)\n except ValueError:\n return default\n\n arg_current_page = request.args.get('page', '0')\n arg_search_query = request.args.get('search', None)\n\n dags_per_page = PAGE_SIZE\n current_page = get_int_arg(arg_current_page, default=0)\n\n if show_paused_arg.strip().lower() == 'false':\n hide_paused = True\n elif show_paused_arg.strip().lower() == 'true':\n hide_paused = False\n else:\n hide_paused = hide_paused_dags_by_default\n\n # read orm_dags from the db\n sql_query = session.query(DM)\n\n if do_filter and owner_mode == 'ldapgroup':\n sql_query = sql_query.filter(\n ~DM.is_subdag,\n DM.is_active,\n DM.owners.in_(current_user.ldap_groups)\n )\n elif do_filter and owner_mode == 'user':\n sql_query = sql_query.filter(\n ~DM.is_subdag, DM.is_active,\n DM.owners == current_user.user.username\n )\n else:\n sql_query = sql_query.filter(\n ~DM.is_subdag, DM.is_active\n )\n\n # optionally filter out \"paused\" dags\n if hide_paused:\n sql_query = sql_query.filter(~DM.is_paused)\n\n orm_dags = {dag.dag_id: dag for dag\n in sql_query\n .all()}\n\n import_errors = session.query(models.ImportError).all()\n for ie in import_errors:\n flash(\n \"Broken DAG: [{ie.filename}] {ie.stacktrace}\".format(ie=ie),\n \"error\")\n\n # get a list of all non-subdag dags visible to everyone\n # optionally filter out \"paused\" dags\n if hide_paused:\n unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if\n not dag.parent_dag and not dag.is_paused]\n\n else:\n unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if\n not dag.parent_dag]\n\n # optionally filter to get only dags that the user should see\n if do_filter and owner_mode == 'ldapgroup':\n # only show dags owned by someone in @current_user.ldap_groups\n webserver_dags = {\n dag.dag_id: dag\n for dag in unfiltered_webserver_dags\n if dag.owner in current_user.ldap_groups\n }\n elif do_filter and owner_mode == 'user':\n # only show dags owned by @current_user.user.username\n webserver_dags = {\n dag.dag_id: dag\n for dag in unfiltered_webserver_dags\n if dag.owner == current_user.user.username\n }\n else:\n webserver_dags = {\n dag.dag_id: dag\n for dag in unfiltered_webserver_dags\n }\n\n if arg_search_query:\n lower_search_query = arg_search_query.lower()\n # filter by dag_id\n webserver_dags_filtered = {\n dag_id: dag\n for dag_id, dag in webserver_dags.items()\n if (lower_search_query in dag_id.lower() or\n lower_search_query in dag.owner.lower())\n }\n\n all_dag_ids = (set([dag.dag_id for dag in orm_dags.values()\n if lower_search_query in dag.dag_id.lower() or\n lower_search_query in dag.owners.lower()]) |\n set(webserver_dags_filtered.keys()))\n\n sorted_dag_ids = sorted(all_dag_ids)\n else:\n webserver_dags_filtered = webserver_dags\n sorted_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys()))\n\n start = current_page * dags_per_page\n end = start + dags_per_page\n\n num_of_all_dags = len(sorted_dag_ids)\n page_dag_ids = sorted_dag_ids[start:end]\n num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))\n\n auto_complete_data = set()\n for dag in webserver_dags_filtered.values():\n auto_complete_data.add(dag.dag_id)\n auto_complete_data.add(dag.owner)\n for dag in orm_dags.values():\n auto_complete_data.add(dag.dag_id)\n auto_complete_data.add(dag.owners)\n\n return self.render(\n 'airflow/dags.html',\n webserver_dags=webserver_dags_filtered,\n orm_dags=orm_dags,\n hide_paused=hide_paused,\n current_page=current_page,\n search_query=arg_search_query if arg_search_query else '',\n page_size=dags_per_page,\n num_of_pages=num_of_pages,\n num_dag_from=start + 1,\n num_dag_to=min(end, num_of_all_dags),\n num_of_all_dags=num_of_all_dags,\n paging=wwwutils.generate_pages(current_page, num_of_pages,\n search=arg_search_query,\n showPaused=not hide_paused),\n dag_ids_in_page=page_dag_ids,\n auto_complete_data=auto_complete_data)\n\n\nclass QueryView(wwwutils.DataProfilingMixin, BaseView):\n @expose('/', methods=['POST', 'GET'])\n @wwwutils.gzipped\n @provide_session\n def query(self, session=None):\n dbs = session.query(models.Connection).order_by(\n models.Connection.conn_id).all()\n session.expunge_all()\n db_choices = list(\n ((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))\n conn_id_str = request.form.get('conn_id')\n csv = request.form.get('csv') == \"true\"\n sql = request.form.get('sql')\n\n class QueryForm(Form):\n conn_id = SelectField(\"Layout\", choices=db_choices)\n sql = TextAreaField(\"SQL\", widget=wwwutils.AceEditorWidget())\n\n data = {\n 'conn_id': conn_id_str,\n 'sql': sql,\n }\n results = None\n has_data = False\n error = False\n if conn_id_str:\n db = [db for db in dbs if db.conn_id == conn_id_str][0]\n hook = db.get_hook()\n try:\n df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))\n # df = hook.get_pandas_df(sql)\n has_data = len(df) > 0\n df = df.fillna('')\n results = df.to_html(\n classes=[\n 'table', 'table-bordered', 'table-striped', 'no-wrap'],\n index=False,\n na_rep='',\n ) if has_data else ''\n except Exception as e:\n flash(str(e), 'error')\n error = True\n\n if has_data and len(df) == QUERY_LIMIT:\n flash(\n \"Query output truncated at \" + str(QUERY_LIMIT) +\n \" rows\", 'info')\n\n if not has_data and error:\n flash('No data', 'error')\n\n if csv:\n return Response(\n response=df.to_csv(index=False),\n status=200,\n mimetype=\"application/text\")\n\n form = QueryForm(request.form, data=data)\n session.commit()\n return self.render(\n 'airflow/query.html', form=form,\n title=\"Ad Hoc Query\",\n results=results or '',\n has_data=has_data)\n\n\nclass AirflowModelView(ModelView):\n list_template = 'airflow/model_list.html'\n edit_template = 'airflow/model_edit.html'\n create_template = 'airflow/model_create.html'\n column_display_actions = True\n page_size = PAGE_SIZE\n\n\nclass ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):\n \"\"\"\n Modifying the base ModelView class for non edit, browse only operations\n \"\"\"\n named_filter_urls = True\n can_create = False\n can_edit = False\n can_delete = False\n column_display_pk = True\n\n\nclass PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):\n column_list = ('pool', 'slots', 'used_slots', 'queued_slots')\n column_formatters = dict(\n pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)\n named_filter_urls = True\n form_args = {\n 'pool': {\n 'validators': [\n validators.DataRequired(),\n ]\n }\n }\n\n\nclass SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):\n verbose_name_plural = \"SLA misses\"\n verbose_name = \"SLA miss\"\n column_list = (\n 'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')\n column_formatters = dict(\n task_id=task_instance_link,\n execution_date=datetime_f,\n timestamp=datetime_f,\n dag_id=dag_link)\n named_filter_urls = True\n column_searchable_list = ('dag_id', 'task_id',)\n column_filters = (\n 'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')\n filter_converter = wwwutils.UtcFilterConverter()\n form_widget_args = {\n 'email_sent': {'disabled': True},\n 'timestamp': {'disabled': True},\n }\n\n\n@provide_session\ndef _connection_ids(session=None):\n return [\n (c.conn_id, c.conn_id)\n for c in (\n session.query(models.Connection.conn_id)\n .group_by(models.Connection.conn_id)\n )\n ]\n\n\nclass ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):\n verbose_name = \"chart\"\n verbose_name_plural = \"charts\"\n form_columns = (\n 'label',\n 'owner',\n 'conn_id',\n 'chart_type',\n 'show_datatable',\n 'x_is_date',\n 'y_log_scale',\n 'show_sql',\n 'height',\n 'sql_layout',\n 'sql',\n 'default_params',\n )\n column_list = (\n 'label',\n 'conn_id',\n 'chart_type',\n 'owner',\n 'last_modified',\n )\n column_sortable_list = (\n 'label',\n 'conn_id',\n 'chart_type',\n ('owner', 'owner.username'),\n 'last_modified',\n )\n column_formatters = dict(label=label_link, last_modified=datetime_f)\n column_default_sort = ('last_modified', True)\n create_template = 'airflow/chart/create.html'\n edit_template = 'airflow/chart/edit.html'\n column_filters = ('label', 'owner.username', 'conn_id')\n column_searchable_list = ('owner.username', 'label', 'sql')\n column_descriptions = {\n 'label': \"Can include {{ templated_fields }} and {{ macros }}\",\n 'chart_type': \"The type of chart to be displayed\",\n 'sql': \"Can include {{ templated_fields }} and {{ macros }}.\",\n 'height': \"Height of the chart, in pixels.\",\n 'conn_id': \"Source database to run the query against\",\n 'x_is_date': (\n \"Whether the X axis should be casted as a date field. Expect most \"\n \"intelligible date formats to get casted properly.\"\n ),\n 'owner': (\n \"The chart's owner, mostly used for reference and filtering in \"\n \"the list view.\"\n ),\n 'show_datatable':\n \"Whether to display an interactive data table under the chart.\",\n 'default_params': (\n 'A dictionary of {\"key\": \"values\",} that define what the '\n 'templated fields (parameters) values should be by default. '\n 'To be valid, it needs to \"eval\" as a Python dict. '\n 'The key values will show up in the url\\'s querystring '\n 'and can be altered there.'\n ),\n 'show_sql': \"Whether to display the SQL statement as a collapsible \"\n \"section in the chart page.\",\n 'y_log_scale': \"Whether to use a log scale for the Y axis.\",\n 'sql_layout': (\n \"Defines the layout of the SQL that the application should \"\n \"expect. Depending on the tables you are sourcing from, it may \"\n \"make more sense to pivot / unpivot the metrics.\"\n ),\n }\n column_labels = {\n 'sql': \"SQL\",\n 'height': \"Chart Height\",\n 'sql_layout': \"SQL Layout\",\n 'show_sql': \"Display the SQL Statement\",\n 'default_params': \"Default Parameters\",\n }\n form_choices = {\n 'chart_type': [\n ('line', 'Line Chart'),\n ('spline', 'Spline Chart'),\n ('bar', 'Bar Chart'),\n ('column', 'Column Chart'),\n ('area', 'Overlapping Area Chart'),\n ('stacked_area', 'Stacked Area Chart'),\n ('percent_area', 'Percent Area Chart'),\n ('datatable', 'No chart, data table only'),\n ],\n 'sql_layout': [\n ('series', 'SELECT series, x, y FROM ...'),\n ('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),\n ],\n 'conn_id': _connection_ids()\n }\n\n def on_model_change(self, form, model, is_created=True):\n if model.iteration_no is None:\n model.iteration_no = 0\n else:\n model.iteration_no += 1\n if not model.user_id and current_user and hasattr(current_user, 'id'):\n model.user_id = current_user.id\n model.last_modified = timezone.utcnow()\n\n\nchart_mapping = (\n ('line', 'lineChart'),\n ('spline', 'lineChart'),\n ('bar', 'multiBarChart'),\n ('column', 'multiBarChart'),\n ('area', 'stackedAreaChart'),\n ('stacked_area', 'stackedAreaChart'),\n ('percent_area', 'stackedAreaChart'),\n ('datatable', 'datatable'),\n)\nchart_mapping = dict(chart_mapping)\n\n\nclass KnownEventView(wwwutils.DataProfilingMixin, AirflowModelView):\n verbose_name = \"known event\"\n verbose_name_plural = \"known events\"\n form_columns = (\n 'label',\n 'event_type',\n 'start_date',\n 'end_date',\n 'reported_by',\n 'description',\n )\n form_args = {\n 'label': {\n 'validators': [\n validators.DataRequired(),\n ],\n },\n 'event_type': {\n 'validators': [\n validators.DataRequired(),\n ],\n },\n 'start_date': {\n 'validators': [\n validators.DataRequired(),\n ],\n 'filters': [\n parse_datetime_f,\n ],\n },\n 'end_date': {\n 'validators': [\n validators.DataRequired(),\n GreaterEqualThan(fieldname='start_date'),\n ],\n 'filters': [\n parse_datetime_f,\n ]\n },\n 'reported_by': {\n 'validators': [\n validators.DataRequired(),\n ],\n }\n }\n column_list = (\n 'label',\n 'event_type',\n 'start_date',\n 'end_date',\n 'reported_by',\n )\n column_default_sort = (\"start_date\", True)\n column_sortable_list = (\n 'label',\n # todo: yes this has a spelling error\n ('event_type', 'event_type.know_event_type'),\n 'start_date',\n 'end_date',\n ('reported_by', 'reported_by.username'),\n )\n filter_converter = wwwutils.UtcFilterConverter()\n form_overrides = dict(start_date=DateTimeField, end_date=DateTimeField)\n\n\nclass KnownEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):\n pass\n\n\n# NOTE: For debugging / troubleshooting\n# mv = KnowEventTypeView(\n# models.KnownEventType,\n# Session, name=\"Known Event Types\", category=\"Manage\")\n# admin.add_view(mv)\n# class DagPickleView(SuperUserMixin, ModelView):\n# pass\n# mv = DagPickleView(\n# models.DagPickle,\n# Session, name=\"Pickles\", category=\"Manage\")\n# admin.add_view(mv)\n\n\nclass VariableView(wwwutils.DataProfilingMixin, AirflowModelView):\n verbose_name = \"Variable\"\n verbose_name_plural = \"Variables\"\n list_template = 'airflow/variable_list.html'\n\n def hidden_field_formatter(view, context, model, name):\n if wwwutils.should_hide_value_for_key(model.key):\n return Markup('*' * 8)\n try:\n return getattr(model, name)\n except AirflowException:\n return Markup('<span class=\"label label-danger\">Invalid</span>')\n\n form_columns = (\n 'key',\n 'val',\n )\n column_list = ('key', 'val', 'is_encrypted',)\n column_filters = ('key', 'val')\n column_searchable_list = ('key', 'val')\n column_default_sort = ('key', False)\n form_widget_args = {\n 'is_encrypted': {'disabled': True},\n 'val': {\n 'rows': 20,\n }\n }\n form_args = {\n 'key': {\n 'validators': {\n validators.DataRequired(),\n },\n },\n }\n column_sortable_list = (\n 'key',\n 'val',\n 'is_encrypted',\n )\n column_formatters = {\n 'val': hidden_field_formatter,\n }\n\n # Default flask-admin export functionality doesn't handle serialized json\n @action('varexport', 'Export', None)\n @provide_session\n def action_varexport(self, ids, session=None):\n V = models.Variable\n qry = session.query(V).filter(V.id.in_(ids)).all()\n\n var_dict = {}\n d = json.JSONDecoder()\n for var in qry:\n val = None\n try:\n val = d.decode(var.val)\n except:\n val = var.val\n var_dict[var.key] = val\n\n response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))\n response.headers[\"Content-Disposition\"] = \"attachment; filename=variables.json\"\n return response\n\n def on_form_prefill(self, form, id):\n if wwwutils.should_hide_value_for_key(form.key.data):\n form.val.data = '*' * 8\n\n\nclass XComView(wwwutils.SuperUserMixin, AirflowModelView):\n verbose_name = \"XCom\"\n verbose_name_plural = \"XComs\"\n\n form_columns = (\n 'key',\n 'value',\n 'execution_date',\n 'task_id',\n 'dag_id',\n )\n\n form_extra_fields = {\n 'value': StringField('Value'),\n }\n\n form_args = {\n 'execution_date': {\n 'filters': [\n parse_datetime_f,\n ]\n }\n }\n\n column_filters = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')\n column_searchable_list = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')\n filter_converter = wwwutils.UtcFilterConverter()\n form_overrides = dict(execution_date=DateTimeField)\n\n\nclass JobModelView(ModelViewOnly):\n verbose_name_plural = \"jobs\"\n verbose_name = \"job\"\n column_display_actions = False\n column_default_sort = ('start_date', True)\n column_filters = (\n 'job_type', 'dag_id', 'state',\n 'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')\n column_formatters = dict(\n start_date=datetime_f,\n end_date=datetime_f,\n hostname=nobr_f,\n state=state_f,\n latest_heartbeat=datetime_f)\n filter_converter = wwwutils.UtcFilterConverter()\n\n\nclass DagRunModelView(ModelViewOnly):\n verbose_name_plural = \"DAG Runs\"\n can_edit = True\n can_create = True\n column_editable_list = ('state',)\n verbose_name = \"dag run\"\n column_default_sort = ('execution_date', True)\n form_choices = {\n 'state': [\n ('success', 'success'),\n ('running', 'running'),\n ('failed', 'failed'),\n ],\n }\n form_args = dict(\n dag_id=dict(validators=[validators.DataRequired()])\n )\n column_list = (\n 'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')\n column_filters = column_list\n filter_converter = wwwutils.UtcFilterConverter()\n column_searchable_list = ('dag_id', 'state', 'run_id')\n column_formatters = dict(\n execution_date=datetime_f,\n state=state_f,\n start_date=datetime_f,\n dag_id=dag_link,\n run_id=dag_run_link\n )\n\n @action('new_delete', \"Delete\", \"Are you sure you want to delete selected records?\")\n @provide_session\n def action_new_delete(self, ids, session=None):\n deleted = set(session.query(models.DagRun)\n .filter(models.DagRun.id.in_(ids))\n .all())\n session.query(models.DagRun) \\\n .filter(models.DagRun.id.in_(ids)) \\\n .delete(synchronize_session='fetch')\n session.commit()\n dirty_ids = []\n for row in deleted:\n dirty_ids.append(row.dag_id)\n models.DagStat.update(dirty_ids, dirty_only=False, session=session)\n\n @action('set_running', \"Set state to 'running'\", None)\n def action_set_running(self, ids):\n self.set_dagrun_state(ids, State.RUNNING)\n\n @action('set_failed', \"Set state to 'failed'\", None)\n def action_set_failed(self, ids):\n self.set_dagrun_state(ids, State.FAILED)\n\n @action('set_success', \"Set state to 'success'\", None)\n def action_set_success(self, ids):\n self.set_dagrun_state(ids, State.SUCCESS)\n\n @provide_session\n def set_dagrun_state(self, ids, target_state, session=None):\n try:\n DR = models.DagRun\n count = 0\n dirty_ids = []\n for dr in session.query(DR).filter(DR.id.in_(ids)).all():\n dirty_ids.append(dr.dag_id)\n count += 1\n dr.state = target_state\n if target_state == State.RUNNING:\n dr.start_date = timezone.utcnow()\n else:\n dr.end_date = timezone.utcnow()\n session.commit()\n models.DagStat.update(dirty_ids, session=session)\n flash(\n \"{count} dag runs were set to '{target_state}'\".format(**locals()))\n except Exception as ex:\n if not self.handle_view_exception(ex):\n raise Exception(\"Ooops\")\n flash('Failed to set state', 'error')\n\n\nclass LogModelView(ModelViewOnly):\n verbose_name_plural = \"logs\"\n verbose_name = \"log\"\n column_display_actions = False\n column_default_sort = ('dttm', True)\n column_filters = ('dag_id', 'task_id', 'execution_date')\n filter_converter = wwwutils.UtcFilterConverter()\n column_formatters = dict(\n dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)\n\n\nclass TaskInstanceModelView(ModelViewOnly):\n verbose_name_plural = \"task instances\"\n verbose_name = \"task instance\"\n column_filters = (\n 'state', 'dag_id', 'task_id', 'execution_date', 'hostname',\n 'queue', 'pool', 'operator', 'start_date', 'end_date')\n filter_converter = wwwutils.UtcFilterConverter()\n named_filter_urls = True\n column_formatters = dict(\n log_url=log_url_formatter,\n task_id=task_instance_link,\n hostname=nobr_f,\n state=state_f,\n execution_date=datetime_f,\n start_date=datetime_f,\n end_date=datetime_f,\n queued_dttm=datetime_f,\n dag_id=dag_link,\n run_id=dag_run_link,\n duration=duration_f)\n column_searchable_list = ('dag_id', 'task_id', 'state')\n column_default_sort = ('job_id', True)\n form_choices = {\n 'state': [\n ('success', 'success'),\n ('running', 'running'),\n ('failed', 'failed'),\n ],\n }\n column_list = (\n 'state', 'dag_id', 'task_id', 'execution_date', 'operator',\n 'start_date', 'end_date', 'duration', 'job_id', 'hostname',\n 'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',\n 'pool', 'log_url')\n page_size = PAGE_SIZE\n\n @action('set_running', \"Set state to 'running'\", None)\n def action_set_running(self, ids):\n self.set_task_instance_state(ids, State.RUNNING)\n\n @action('set_failed', \"Set state to 'failed'\", None)\n def action_set_failed(self, ids):\n self.set_task_instance_state(ids, State.FAILED)\n\n @action('set_success', \"Set state to 'success'\", None)\n def action_set_success(self, ids):\n self.set_task_instance_state(ids, State.SUCCESS)\n\n @action('set_retry', \"Set state to 'up_for_retry'\", None)\n def action_set_retry(self, ids):\n self.set_task_instance_state(ids, State.UP_FOR_RETRY)\n\n @provide_session\n @action('clear',\n lazy_gettext('Clear'),\n lazy_gettext(\n 'Are you sure you want to clear the state of the selected task instance(s)'\n ' and set their dagruns to the running state?'))\n def action_clear(self, ids, session=None):\n try:\n TI = models.TaskInstance\n\n dag_to_task_details = {}\n dag_to_tis = {}\n\n # Collect dags upfront as dagbag.get_dag() will reset the session\n for id_str in ids:\n task_id, dag_id, execution_date = id_str.split(',')\n dag = dagbag.get_dag(dag_id)\n task_details = dag_to_task_details.setdefault(dag, [])\n task_details.append((task_id, execution_date))\n\n for dag, task_details in dag_to_task_details.items():\n for task_id, execution_date in task_details:\n execution_date = parse_execution_date(execution_date)\n\n ti = session.query(TI).filter(TI.task_id == task_id,\n TI.dag_id == dag.dag_id,\n TI.execution_date == execution_date).one()\n\n tis = dag_to_tis.setdefault(dag, [])\n tis.append(ti)\n\n for dag, tis in dag_to_tis.items():\n models.clear_task_instances(tis, session, dag=dag)\n\n session.commit()\n\n flash(\"{0} task instances have been cleared\".format(len(ids)))\n\n except Exception as ex:\n if not self.handle_view_exception(ex):\n raise Exception(\"Ooops\")\n flash('Failed to clear task instances', 'error')\n\n @provide_session\n def set_task_instance_state(self, ids, target_state, session=None):\n try:\n TI = models.TaskInstance\n count = len(ids)\n for id in ids:\n task_id, dag_id, execution_date = id.split(',')\n execution_date = parse_execution_date(execution_date)\n\n ti = session.query(TI).filter(TI.task_id == task_id,\n TI.dag_id == dag_id,\n TI.execution_date == execution_date).one()\n ti.state = target_state\n session.commit()\n flash(\n \"{count} task instances were set to '{target_state}'\".format(**locals()))\n except Exception as ex:\n if not self.handle_view_exception(ex):\n raise Exception(\"Ooops\")\n flash('Failed to set state', 'error')\n\n def get_one(self, id):\n \"\"\"\n As a workaround for AIRFLOW-252, this method overrides Flask-Admin's ModelView.get_one().\n\n TODO: this method should be removed once the below bug is fixed on Flask-Admin side.\n https://github.com/flask-admin/flask-admin/issues/1226\n \"\"\"\n task_id, dag_id, execution_date = iterdecode(id)\n execution_date = pendulum.parse(execution_date)\n return self.session.query(self.model).get((task_id, dag_id, execution_date))\n\n\nclass ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):\n create_template = 'airflow/conn_create.html'\n edit_template = 'airflow/conn_edit.html'\n list_template = 'airflow/conn_list.html'\n form_columns = (\n 'conn_id',\n 'conn_type',\n 'host',\n 'schema',\n 'login',\n 'password',\n 'port',\n 'extra',\n 'extra__jdbc__drv_path',\n 'extra__jdbc__drv_clsname',\n 'extra__google_cloud_platform__project',\n 'extra__google_cloud_platform__key_path',\n 'extra__google_cloud_platform__keyfile_dict',\n 'extra__google_cloud_platform__scope',\n )\n verbose_name = \"Connection\"\n verbose_name_plural = \"Connections\"\n column_default_sort = ('conn_id', False)\n column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',)\n form_overrides = dict(_password=PasswordField, _extra=TextAreaField)\n form_widget_args = {\n 'is_extra_encrypted': {'disabled': True},\n 'is_encrypted': {'disabled': True},\n }\n # Used to customized the form, the forms elements get rendered\n # and results are stored in the extra field as json. All of these\n # need to be prefixed with extra__ and then the conn_type ___ as in\n # extra__{conn_type}__name. You can also hide form elements and rename\n # others from the connection_form.js file\n form_extra_fields = {\n 'extra__jdbc__drv_path': StringField('Driver Path'),\n 'extra__jdbc__drv_clsname': StringField('Driver Class'),\n 'extra__google_cloud_platform__project': StringField('Project Id'),\n 'extra__google_cloud_platform__key_path': StringField('Keyfile Path'),\n 'extra__google_cloud_platform__keyfile_dict': PasswordField('Keyfile JSON'),\n 'extra__google_cloud_platform__scope': StringField('Scopes (comma separated)'),\n\n }\n form_choices = {\n 'conn_type': models.Connection._types\n }\n\n def on_model_change(self, form, model, is_created):\n formdata = form.data\n if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:\n extra = {\n key: formdata[key]\n for key in self.form_extra_fields.keys() if key in formdata}\n model.extra = json.dumps(extra)\n\n @classmethod\n def alert_fernet_key(cls):\n fk = None\n try:\n fk = conf.get('core', 'fernet_key')\n except:\n pass\n return fk is None\n\n @classmethod\n def is_secure(cls):\n \"\"\"\n Used to display a message in the Connection list view making it clear\n that the passwords and `extra` field can't be encrypted.\n \"\"\"\n is_secure = False\n try:\n import cryptography\n conf.get('core', 'fernet_key')\n is_secure = True\n except:\n pass\n return is_secure\n\n def on_form_prefill(self, form, id):\n try:\n d = json.loads(form.data.get('extra', '{}'))\n except Exception:\n d = {}\n\n for field in list(self.form_extra_fields.keys()):\n value = d.get(field, '')\n if value:\n field = getattr(form, field)\n field.data = value\n\n\nclass UserModelView(wwwutils.SuperUserMixin, AirflowModelView):\n verbose_name = \"User\"\n verbose_name_plural = \"Users\"\n column_default_sort = 'username'\n\n\nclass VersionView(wwwutils.SuperUserMixin, BaseView):\n @expose('/')\n def version(self):\n # Look at the version from setup.py\n try:\n airflow_version = pkg_resources.require(\"apache-airflow\")[0].version\n except Exception as e:\n airflow_version = None\n logging.error(e)\n\n # Get the Git repo and git hash\n git_version = None\n try:\n with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f:\n git_version = f.readline()\n except Exception as e:\n logging.error(e)\n\n # Render information\n title = \"Version Info\"\n return self.render('airflow/version.html',\n title=title,\n airflow_version=airflow_version,\n git_version=git_version)\n\n\nclass ConfigurationView(wwwutils.SuperUserMixin, BaseView):\n @expose('/')\n def conf(self):\n raw = request.args.get('raw') == \"true\"\n title = \"Airflow Configuration\"\n subtitle = conf.AIRFLOW_CONFIG\n if conf.getboolean(\"webserver\", \"expose_config\"):\n with open(conf.AIRFLOW_CONFIG, 'r') as f:\n config = f.read()\n table = [(section, key, value, source)\n for section, parameters in conf.as_dict(True, True).items()\n for key, (value, source) in parameters.items()]\n\n else:\n config = (\n \"# Your Airflow administrator chose not to expose the \"\n \"configuration, most likely for security reasons.\")\n table = None\n if raw:\n return Response(\n response=config,\n status=200,\n mimetype=\"application/text\")\n else:\n code_html = Markup(highlight(\n config,\n lexers.IniLexer(), # Lexer call\n HtmlFormatter(noclasses=True))\n )\n return self.render(\n 'airflow/config.html',\n pre_subtitle=settings.HEADER + \" v\" + airflow.__version__,\n code_html=code_html, title=title, subtitle=subtitle,\n table=table)\n\n\nclass DagModelView(wwwutils.SuperUserMixin, ModelView):\n column_list = ('dag_id', 'owners')\n column_editable_list = ('is_paused',)\n form_excluded_columns = ('is_subdag', 'is_active')\n column_searchable_list = ('dag_id',)\n column_filters = (\n 'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',\n 'last_scheduler_run', 'last_expired')\n filter_converter = wwwutils.UtcFilterConverter()\n form_widget_args = {\n 'last_scheduler_run': {'disabled': True},\n 'fileloc': {'disabled': True},\n 'is_paused': {'disabled': True},\n 'last_pickled': {'disabled': True},\n 'pickle_id': {'disabled': True},\n 'last_loaded': {'disabled': True},\n 'last_expired': {'disabled': True},\n 'pickle_size': {'disabled': True},\n 'scheduler_lock': {'disabled': True},\n 'owners': {'disabled': True},\n }\n column_formatters = dict(\n dag_id=dag_link,\n )\n can_delete = False\n can_create = False\n page_size = PAGE_SIZE\n list_template = 'airflow/list_dags.html'\n named_filter_urls = True\n\n def get_query(self):\n \"\"\"\n Default filters for model\n \"\"\"\n return (\n super(DagModelView, self)\n .get_query()\n .filter(or_(models.DagModel.is_active, models.DagModel.is_paused))\n .filter(~models.DagModel.is_subdag)\n )\n\n def get_count_query(self):\n \"\"\"\n Default filters for model\n \"\"\"\n return (\n super(DagModelView, self)\n .get_count_query()\n .filter(models.DagModel.is_active)\n .filter(~models.DagModel.is_subdag)\n )\n" ]
[ [ "pandas.set_option", "pandas.to_datetime" ] ]
emanuele-albini/emutils
[ "d5e3939da8a14b629879f06d87d4bd371e7117ab", "d5e3939da8a14b629879f06d87d4bd371e7117ab", "d5e3939da8a14b629879f06d87d4bd371e7117ab" ]
[ "src/emutils/tf/math.py", "src/emutils/dsutils/utils.py", "snippets/reduction.py" ]
[ "import tensorflow as tf\n\n\ndef cov(x):\n mean_x = tf.reduce_mean(x, axis=0, keepdims=True)\n mx = tf.matmul(tf.transpose(mean_x), mean_x)\n vx = tf.matmul(tf.transpose(x), x) / tf.cast(tf.shape(x)[0], tf.float64)\n cov_xx = vx - mx\n return cov_xx\n\n\ndef inv_cov(x):\n return tf.linalg.inv(cov(x))\n", "import numpy as np\nimport pandas as pd\n\nfrom typing import Union, Iterable, Dict\n\nfrom ..mlutils import MultiColumnLabelEncoderDecoder\n\n\ndef generate_combinatorial_dataset(names: Dict[str, Union[np.ndarray, list]], variables: Iterable[str] = None):\n \"\"\"\n Generate the combinatorial dataset for a list of variables\n\n names : a dictionary\n names.key = name of the variable\n names.value = values that the variable can assume\n variables : the iterables with the list of variables/columns for which to generate the dataset\n If variables = None, all the names.keys() will be used\n\n Returns\n -----------------------\n pd.DataFrame\n\n A dataframe with the combinatorial dataset\n \"\"\"\n variables = np.array(variables) if variables is not None else np.array(list(names.keys()))\n generator = [names[node] for node in variables]\n return pd.DataFrame(np.array(np.meshgrid(*generator)).T.reshape(-1, len(variables)), columns=variables)\n\n\ndef number_of_combinations_encoder(encdec: MultiColumnLabelEncoderDecoder, variables: Iterable = None):\n if variables is None:\n variables = encdec.get_encoder().keys()\n\n combs = 1\n for var in variables:\n combs *= len(encdec.get_encoder()[var])\n return combs\n\n\ndef random_combinatorial_sample(encdec: MultiColumnLabelEncoderDecoder, size: int, variables: Iterable = None, replace=True):\n if variables is None:\n variables = encdec.get_encoder().keys()\n if not replace:\n size = min(size, number_of_combinations_encoder(encdec, variables))\n sample = pd.DataFrame()\n while len(sample) < size:\n tempsample = pd.DataFrame()\n for var in variables:\n var_bowl = np.array(list(encdec.get_encoder()[var].keys()))\n tempsample[var] = np.random.choice(var_bowl, size - len(sample), replace=True)\n sample = pd.concat([sample, tempsample], axis=0)\n if not replace:\n sample.drop_duplicates(inplace=True)\n sample.reset_index(inplace=True, drop=True)\n return sample\n\n\ndef get_features_encodings(X: pd.DataFrame):\n \"\"\"\n Return the uniques values for each feature\n \"\"\"\n feature_encodings = {}\n for f in X.columns:\n u = X[f].unique()\n feature_encodings[f] = sorted(u)\n return feature_encodings\n\n\ndef number_of_combinations_categorical(X: [pd.DataFrame, MultiColumnLabelEncoderDecoder]):\n \"\"\"\n Return the number of combinations of inputs\n \"\"\"\n combs = 1\n feature_encodings = get_features_encodings(X)\n for f, encods in feature_encodings.items():\n combs *= len(encods)\n return combs", "import plotly.express as px\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nfrom sklearn.manifold import TSNE\r\nfrom sklearn.manifold import LocallyLinearEmbedding\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.random_projection import johnson_lindenstrauss_min_dim, GaussianRandomProjection\r\n\r\n\r\ndef dim_reduce(\r\n points,\r\n technique='tsne',\r\n random_state=2021,\r\n n_components=2,\r\n):\r\n if technique == 'tsne':\r\n tsne = TSNE(\r\n n_components=n_components,\r\n perplexity=30.0,\r\n early_exaggeration=12.0,\r\n learning_rate=200.0,\r\n n_iter=1000,\r\n n_iter_without_progress=300,\r\n min_grad_norm=1e-07,\r\n metric='euclidean',\r\n init='random',\r\n verbose=100,\r\n random_state=random_state,\r\n method='barnes_hut',\r\n angle=0.5,\r\n n_jobs=None,\r\n )\r\n transformed_points = tsne.fit_transform(points)\r\n elif technique == 'jlt':\r\n print(\r\n 'Minimum JL components (eps = .99): ',\r\n johnson_lindenstrauss_min_dim(len(points), eps=1 - np.finfo(float).eps)\r\n )\r\n grp = GaussianRandomProjection(n_components=n_components, eps=.99)\r\n transformed_points = grp.fit_transform(points)\r\n elif technique == 'pca':\r\n pca = PCA(\r\n n_components=n_components,\r\n copy=True,\r\n whiten=False,\r\n svd_solver='auto',\r\n tol=0.0,\r\n iterated_power='auto',\r\n random_state=random_state,\r\n )\r\n transformed_points = pca.fit_transform(points)\r\n\r\n elif technique == 'lle':\r\n lle = LocallyLinearEmbedding(\r\n n_components=n_components,\r\n random_state=random_state,\r\n )\r\n return lle.fit_transform(points)\r\n\r\n else:\r\n raise ValueError('Invalid technique.')\r\n\r\n return transformed_points\r\n\r\n\r\ndef plot_points_in_clusters(\r\n points,\r\n clusters=None,\r\n axis_names='x',\r\n clusters_name='group',\r\n title='Points in clusters',\r\n):\r\n # Get number of components and names\r\n n_components = len(points[0])\r\n assert n_components == 2 or n_components == 3\r\n\r\n if isinstance(axis_names, str):\r\n axis_names = [axis_names + f'_{i}' for i in range(n_components)]\r\n\r\n df = pd.DataFrame(points, columns=axis_names)\r\n kwargs = dict(\r\n x=axis_names[0],\r\n y=axis_names[1],\r\n title=title,\r\n )\r\n if clusters is not None:\r\n df[clusters_name] = clusters\r\n kwargs['color'] = clusters_name\r\n\r\n if n_components > 2:\r\n kwargs['z'] = axis_names[2]\r\n\r\n if n_components == 2:\r\n px.scatter(df.sample(frac=1), **kwargs).show()\r\n else:\r\n px.scatter_3d(df.sample(frac=1), **kwargs).show()\r\n" ]
[ [ "tensorflow.transpose", "tensorflow.shape", "tensorflow.reduce_mean" ], [ "pandas.concat", "numpy.array", "pandas.DataFrame", "numpy.meshgrid" ], [ "sklearn.random_projection.GaussianRandomProjection", "sklearn.manifold.LocallyLinearEmbedding", "pandas.DataFrame", "numpy.finfo", "sklearn.manifold.TSNE", "sklearn.decomposition.PCA" ] ]
lite-david/polymath
[ "cf1addc75e203fa606ebc6d32bc552fb3975ea99" ]
[ "polymath/srdfg/base.py" ]
[ "\nfrom polymath import UNSET_SHAPE, DEFAULT_SHAPES\nimport builtins\nimport operator\nfrom collections import OrderedDict, Mapping, Sequence, deque\nimport functools\nfrom numbers import Integral, Rational, Real\nimport contextlib\nimport traceback\nimport uuid\nimport numpy as np\nimport importlib\nfrom .graph import Graph\nfrom .domain import Domain\nfrom .util import _noop_callback, _flatten_iterable, node_hash, \\\n _is_node_type_instance, is_iterable\n\nclass Node(object):\n \"\"\"\n Base class for nodes.\n\n Parameters\n ----------\n args : tuple\n Positional arguments passed to the `_evaluate` method.\n name : str or None\n Name of the node or `None` to use a random, unique identifier.\n shape : tuple or None\n Shape of the output for a node. This can be a tuple of integers or parameter node names.\n graph : Node or None\n Parent graph of this node. If graph is `None`, this is the top-level graph.\n op_name : str\n Operation name which describes the node functionality.\n value : Any or None\n If a node has a default value to use for execution, it can be set using `value`.\n kwargs : dict\n Keyword arguments passed to the `_evaluate` method.\n \"\"\"\n _graph_stack = deque([None])\n _eval_stack = []\n stack_size = 5\n evaluated_nodes = 0\n def __init__(self, *args,\n name=None,\n shape=None,\n graph=None,\n dependencies=None,\n op_name=None,\n value=None,\n **kwargs):\n self.nodes = Graph()\n self.value = value\n self.dependencies = []\n self._args = []\n self._predeecessors = []\n self._succesors = []\n self.args = args\n if \"name\" in kwargs:\n kwargs.pop(\"name\")\n self.added_attrs = []\n # TODO: CHange this to underscore private variable\n self.kwargs = kwargs\n self.graph = graph\n self._shape = OrderedDict()\n self.shape = shape or tuple([])\n\n\n # Get a list of all dependencies relevant to this node\n self.dependencies = [] if dependencies is None else dependencies\n if self.graph:\n self.dependencies.extend(self.graph.dependencies)\n # Choose a name for the node and add the node to the graph\n self._name = None\n self.name = name or uuid.uuid4().hex\n self._op_name = None\n self.op_name = op_name\n # Get the stack context so we can report where the node was defined\n self._stack = traceback.extract_stack(limit=1)\n\n\n @property\n def graph(self):\n \"\"\"\n polymath.srdfg.graph.Graph : Parent graph of this node. If graph is `None`, this is the top-level graph.\n \"\"\"\n return self._graph\n\n def preds(self):\n return self._preds\n\n def succs(self):\n return self._preds\n\n def add_predecessor(self, pred):\n if isinstance(pred, Node):\n self._predecessors.append(pred.gname)\n else:\n self._predecessors.append(pred)\n\n def add_successor(self, succ):\n if isinstance(succ, Node):\n self._succesors.append(succ.gname)\n else:\n self._succesors.append(succ)\n\n def set_edges(self):\n for e in self.args:\n self.add_predecessor(e)\n if isinstance(e, Node):\n e.add_successor(self)\n\n @property\n def domain(self):\n return Domain(tuple([]))\n\n @property\n def args(self):\n \"\"\"\n tuple : Positional arguments which are used for executing this node.\n \"\"\"\n return tuple(self._args)\n\n @property\n def argnames(self):\n return [a.name if isinstance(a, Node) else a for a in self.args]\n\n @property\n def shape(self):\n \"\"\"\n tuple : Shape of the output for a node. This can be a tuple of integers or parameter node names.\n \"\"\"\n return self._shape\n\n @property\n def var(self):\n return self\n\n @property\n def name(self):\n \"\"\"str : Unique name of the node\"\"\"\n return self._name\n\n @property\n def op_name(self):\n \"\"\"\n str : Operation name which describes the node functionality.\n\n \"\"\"\n return self._op_name\n\n @op_name.setter\n def op_name(self, op_name):\n\n if op_name:\n self._op_name = op_name\n elif self.__class__.__name__ == \"Node\":\n self._op_name = self.name\n else:\n self._op_name = self.__class__.__name__\n\n @name.setter\n def name(self, name):\n self.set_name(name)\n\n @args.setter\n def args(self, args):\n new_args = []\n for arg in args:\n if isinstance(arg, Node):\n if self.__class__.__name__ == \"Node\":\n self.nodes[arg.name] = self.graph[arg.name]\n new_args.append(arg)\n self._args = tuple(new_args)\n\n @shape.setter\n def shape(self, shape):\n self.set_shape(shape, init=True)\n\n @graph.setter\n def graph(self, graph):\n self._graph = Node.get_active_graph(graph)\n\n @property\n def gname(self):\n scope_names = [self.name]\n cgraph = self.graph\n while cgraph:\n scope_names.append(cgraph.name)\n cgraph = cgraph.graph\n return \"/\".join(list(reversed(scope_names)))\n\n def __enter__(self):\n Node._graph_stack.append(self)\n return self\n\n def __exit__(self, *args):\n assert self == Node._graph_stack.pop()\n\n def __repr__(self):\n return \"<node '%s'>\" % self.name\n\n def add_attribute(self, key, value):\n self.added_attrs.append(key)\n self.kwargs[key] = value\n\n def is_shape_finalized(self):\n if self.shape == UNSET_SHAPE:\n return False\n for s in self.shape:\n if not isinstance(s, Integral):\n return False\n return True\n\n def set_shape(self, shape=None, init=False):\n if isinstance(shape, float):\n self._shape = tuple([np.int(shape)])\n elif isinstance(shape, Integral):\n self._shape = tuple([shape])\n elif isinstance(shape, Node):\n self._shape = tuple([shape])\n elif not shape or len(shape) == 0:\n # TODO: Change in order to enable \"is shape finalized\" to work\n self._shape = UNSET_SHAPE\n else:\n shapes = []\n for dim in shape:\n if isinstance(dim, (Node, Integral)):\n shapes.append(dim)\n elif isinstance(dim, float):\n shapes.append(int(dim))\n else:\n raise TypeError(f\"Shape value must be placeholder or integer value for {self.name}\\n\"\n f\"\\tDim: {dim}\"\n f\"\\n\\t{self.kwargs} \")\n self._shape = tuple(shapes)\n\n @staticmethod\n def get_active_graph(graph=None):\n \"\"\"\n Obtain the currently active graph instance by returning the explicitly given graph or using\n the default graph.\n\n Parameters\n ----------\n graph : Node or None\n Graph to return or `None` to use the default graph.\n\n Raises\n ------\n ValueError\n If no `Graph` instance can be obtained.\n \"\"\"\n\n graph = graph or Node._graph_stack[-1]\n return graph\n\n def instantiate_node(self, node): # pylint:disable=W0621\n \"\"\"\n Instantiate nodes by retrieving the node object associated with the node name.\n\n Parameters\n ----------\n node : Node or str\n Node instance or name of an node.\n\n Returns\n -------\n instantiated_node : Node\n Node instance.\n\n Raises\n ------\n ValueError\n If `node` is not an `Node` instance or an node name.\n RuntimeError\n If `node` is an `Node` instance but does not belong to this graph.\n \"\"\"\n if isinstance(node, str):\n return self.nodes[node]\n\n if isinstance(node, Node):\n if node.name not in self.nodes and (node.graph != self):\n raise RuntimeError(f\"node '{node}' does not belong to {self} graph, instead belongs to\"\n f\" {node.graph}\")\n return node\n\n raise ValueError(f\"'{node}' is not an `Node` instance or node name\")\n\n def instantiate_graph(self, context, **kwargs):\n \"\"\"\n Instantiate a graph by replacing all node names with node instances.\n\n .. note::\n This function modifies the context in place. Use :code:`context=context.copy()` to avoid\n the context being modified.\n\n Parameters\n ----------\n context : dict[Node or str, object]\n Context whose keys are node instances or names.\n kwargs : dict[str, object]\n Additional context information keyed by variable name.\n\n Returns\n -------\n normalized_context : dict[Node, object]\n Normalized context whose keys are node instances.\n\n Raises\n ------\n ValueError\n If the context specifies more than one value for any node.\n ValueError\n If `context` is not a mapping.\n \"\"\"\n if context is None:\n context = {}\n elif not isinstance(context, Mapping):\n raise ValueError(\"`context` must be a mapping.\")\n\n nodes = list(context)\n # Add the keyword arguments\n for node in nodes: # pylint:disable=W0621\n value = context.pop(node)\n node = self.instantiate_node(node)\n if node in context:\n raise ValueError(f\"duplicate unequal value for node '{node}'\")\n context[node] = value\n if node.op_name in [\"placeholder\", \"state\", \"input\", \"output\", \"temp\"] and not node.is_shape_finalized():\n context[node] = node.evaluate(context)\n\n for name, value in kwargs.items():\n node = self.nodes[name]\n if node in context:\n raise ValueError(f\"duplicate value for node '{node}'\")\n context[node] = value\n if node.op_name in [\"placeholder\", \"state\", \"input\", \"output\", \"temp\"] and not node.is_shape_finalized():\n context[node] = node.evaluate(context)\n\n return context\n\n def run(self, fetches, context=None, *, callback=None, **kwargs):\n \"\"\"\n Evaluate one or more nodes given a dictionary of node names with their values.\n\n .. note::\n This function modifies the context in place. Use :code:`context=context.copy()` to avoid\n the context being modified.\n\n Parameters\n ----------\n fetches : list[str or Node] or str or Node\n One or more `Node` instances or names to evaluate.\n context : dict or None\n Context in which to evaluate the nodes.\n callback : callable or None\n Callback to be evaluated when an node is evaluated.\n kwargs : dict\n Additional context information keyed by variable name.\n\n Returns\n -------\n values : Node or tuple[object]\n Output of the nodes given the context.\n\n Raises\n ------\n ValueError\n If `fetches` is not an `Node` instance, node name, or a sequence thereof.\n \"\"\"\n if isinstance(fetches, (str, Node)):\n fetches = [fetches]\n single = True\n elif isinstance(fetches, Sequence):\n single = False\n else:\n raise ValueError(\"`fetches` must be an `Node` instance, node name, or a \"\n \"sequence thereof.\")\n fetches = [self.instantiate_node(node) for node in fetches]\n context = self.instantiate_graph(context, **kwargs)\n for c in context:\n if c in fetches and c.op_name in [\"output\", \"state\", \"temp\"]:\n write_name = \"/\".join([f\"{i}{c.write_count-1}\" for i in c.name.split(\"/\")]) if c.write_count > 0 else c.name\n fetches[fetches.index(c)] = c.graph.nodes[write_name]\n\n values = [fetch.evaluate_node(fetch, context, callback=callback) for fetch in fetches]\n\n return values[0] if single else tuple(values)\n\n def __getstate__(self):\n return self.__dict__\n\n def __setstate__(self, data):\n self.__dict__.update(data)\n\n def set_name(self, name):\n \"\"\"\n Set the name of the node and update the graph.\n\n Parameters\n ----------\n value : str\n Unique name of the node.\n\n Returns\n -------\n self : Node\n This node.\n\n Raises\n ------\n ValueError\n If an node with `value` already exists in the associated graph.\n KeyError\n If the current name of the node cannot be found in the associated graph.\n \"\"\"\n\n name = name or uuid.uuid4().hex\n # TODO: Need a way to check if the existing node is not equal to the current ndoe as ewll\n if self.graph and name in self.graph.nodes:\n raise ValueError(f\"duplicate name '{name}' in {self.graph.name}:\\n\\t\"\n f\"Existing: {self.graph.nodes[name].args}\\n\\t\"\n f\"New: {self.args}\")\n\n if self.graph:\n graph = self.graph\n if self._name and self._name in graph.nodes:\n graph.update_graph_key(self._name, name)\n else:\n graph.nodes[name] = self\n\n self._name = name\n return self\n\n def evaluate_dependencies(self, context, callback=None):\n \"\"\"\n Evaluate the dependencies of this node and discard the values.\n\n Parameters\n ----------\n context : dict\n Normalised context in which to evaluate the node.\n callback : callable or None\n Callback to be evaluated when an node is evaluated.\n \"\"\"\n for node in self.dependencies:\n node.evaluate(context, callback)\n\n def evaluate(self, context, callback=None):\n \"\"\"\n Evaluate the node given a context.\n\n Parameters\n ----------\n context : dict\n Normalised context in which to evaluate the node.\n callback : callable or None\n Callback to be evaluated when an node is evaluated.\n\n Returns\n -------\n value : object\n Output of the node given the context.\n \"\"\"\n # Evaluate all explicit dependencies first\n\n self.evaluate_dependencies(context, callback)\n\n if self in context:\n return context[self]\n\n # Evaluate the parents\n partial = functools.partial(self.evaluate_node, context=context, callback=callback)\n\n args = [partial(arg) for arg in self.args]\n kwargs = {key: partial(value) for key, value in self.kwargs.items() if key not in self.added_attrs}\n # Evaluate the node\n callback = callback or _noop_callback\n with callback(self, context):\n if self.__class__.__name__ == \"Node\":\n context[self] = self.value = self._evaluate(*args, context=context, **kwargs)\n else:\n context[self] = self.value = self._evaluate(*args, **kwargs)\n return self.value\n\n def _evaluate(self, *args, context=None, **kwargs):\n \"\"\"\n Inheriting nodes should implement this function to evaluate the node.\n \"\"\"\n return self(*args, context, **kwargs)\n\n @classmethod\n def evaluate_node(cls, node, context, **kwargs):\n \"\"\"\n Evaluate an node or constant given a context.\n \"\"\"\n Node.evaluated_nodes += 1\n try:\n if isinstance(node, Node):\n Node._eval_stack.append(node.name)\n return node.evaluate(context, **kwargs)\n partial = functools.partial(cls.evaluate_node, context=context, **kwargs)\n if isinstance(node, tuple):\n return tuple(partial(element) for element in node)\n if isinstance(node, list):\n return [partial(element) for element in node]\n if isinstance(node, dict):\n return {partial(key): partial(value) for key, value in node.items()}\n if isinstance(node, slice):\n return slice(*[partial(getattr(node, attr))\n for attr in ['start', 'stop', 'step']])\n return node\n except Exception as ex: # pragma: no cover\n messages = []\n interactive = False\n if isinstance(node, Node) or not is_iterable(node):\n node = [node]\n\n for n in node:\n stack = []\n if isinstance(n, Node):\n\n for frame in reversed(n._stack): # pylint: disable=protected-access\n # Do not capture any internal stack traces\n fname = frame.filename\n if 'polymath' in fname:\n continue\n # Stop tracing at the last interactive cell\n if interactive and not fname.startswith('<'):\n break # pragma: no cover\n interactive = fname.startswith('<')\n stack.append(frame)\n stack = \"\".join(traceback.format_list(reversed(stack)))\n message = \"Failed to evaluate node `%s` defined at:\\n\\n%s\" % (n, stack)\n messages.append(message)\n raise ex from EvaluationError(\"\".join(messages))\n\n\n @classmethod\n def init_from_args(cls, *args,\n name=None,\n shape=None,\n graph=None,\n dependencies=None,\n op_name=None,\n value=None,\n **kwargs):\n if len(args) == 0:\n n = cls(name=name,\n shape=shape,\n graph=graph,\n op_name=op_name,\n dependencies=dependencies,\n value=value,\n **kwargs)\n else:\n n = cls(*args,\n name=name,\n shape=shape,\n graph=graph,\n op_name=op_name,\n dependencies=dependencies,\n value=value,\n **kwargs)\n return n\n\n def __bool__(self):\n return True\n\n def __hash__(self):\n return id(self)\n\n def func_hash(self):\n \"\"\"\n This returns the functional hash of a particular node. The default hash returns an object id, whereas this function\n returns a hash of all attributes and subgraphs of a node.\n \"\"\"\n return node_hash(self)\n\n def find_node(self, name):\n g = self.graph\n while g is not None and name not in g.nodes:\n g = g.graph\n if name in g.nodes:\n return g.nodes[name]\n raise RuntimeError(f\"Cannot find {name} in graph nodes. Graph: {self.graph}\")\n\n def __len__(self):\n #TODO: Update this to check for finalzied shape\n if self.shape == UNSET_SHAPE:\n raise TypeError(f'`shape` must be specified explicitly for nodes {self}')\n return self.shape[0]\n\n def __iter__(self):\n num = len(self)\n for i in range(num):\n yield self[i]\n\n def __eq__(self, other):\n return hash(self) == hash(other)\n\n def __getattr__(self, name):\n return getattr_(self, name, graph=self.graph)\n\n def __getitem__(self, key):\n if self.__class__.__name__ != \"Node\":\n if isinstance(key, (slice, Integral)):\n return getitem(self, key, graph=self.graph)\n else:\n if isinstance(key, (list)):\n return var_index(self, key, graph=self)\n elif isinstance(key, tuple):\n return var_index(self, list(key), graph=self)\n else:\n return var_index(self, [key], graph=self)\n else:\n return self.nodes[key]\n\n def __add__(self, other):\n return add(self, other, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__radd__(self)\n\n def __radd__(self, other):\n return add(other, self, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__add__(self)\n\n def __sub__(self, other):\n return sub(self, other, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__rsub__(self)\n\n def __rsub__(self, other):\n return sub(other, self, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__sub__(self)\n\n def __pow__(self, other):\n return pow_(self, other, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__rpow__(self)\n\n def __rpow__(self, other):\n return pow_(other, self, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__rpow__(self)\n\n def __matmul__(self, other):\n return matmul(self, other, graph=self.graph)\n\n def __rmatmul__(self, other):\n return matmul(other, self, graph=self.graph)\n\n def __mul__(self, other):\n return mul(self, other, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__rmul__(self)\n\n def __rmul__(self, other):\n return mul(other, self, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__mul__(self)\n\n def __truediv__(self, other):\n return truediv(self, other, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__truediv__(self)\n\n def __rtruediv__(self, other):\n return truediv(other, self, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__rtruediv__(self)\n\n def __floordiv__(self, other):\n return floordiv(self, other, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__rfloordiv__(self)\n\n def __rfloordiv__(self, other):\n return floordiv(other, self, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__floordiv__(self)\n\n def __mod__(self, other):\n return mod(self, other, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__rmod__(self)\n\n def __rmod__(self, other):\n return mod(other, self, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__mod__(self)\n\n def __lshift__(self, other):\n return lshift(self, other, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__rlshift__(self)\n\n def __rlshift__(self, other):\n return lshift(other, self, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__lshift__(self)\n\n def __rshift__(self, other):\n return rshift(self, other, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__rrshift__(self)\n\n def __rrshift__(self, other):\n return rshift(other, self, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__rshift__(self)\n\n def __and__(self, other):\n return and_(self, other, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__rand__(self)\n\n def __rand__(self, other):\n return and_(other, self, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__and__(self)\n\n def __or__(self, other):\n return or_(self, other, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__ror__(self)\n\n def __ror__(self, other):\n return or_(other, self, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__or__(self)\n\n def __xor__(self, other):\n return xor(self, other, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__rxor__(self)\n\n def __rxor__(self, other):\n return xor(other, self, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__xor__(self)\n\n def __lt__(self, other):\n return lt(self, other, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__gt__(self)\n\n def __le__(self, other):\n return le(self, other, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__ge__(self)\n\n\n def __ne__(self, other):\n return ne(self, other, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__ne__(self)\n\n def __gt__(self, other):\n return gt(self, other, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__lt__(self)\n\n def __ge__(self, other):\n return ge(self, other, graph=self.graph) if not _is_node_type_instance(other, (\"slice_op\", \"var_index\", \"index\")) else other.__le__(self)\n\n def __invert__(self):\n return inv(self, graph=self.graph)\n\n def __neg__(self):\n return neg(self, graph=self.graph)\n\n def __abs__(self):\n return abs_(self, graph=self.graph)\n\n def __pos__(self):\n return pos(self, graph=self.graph)\n\n def __reversed__(self):\n return reversed_(self, graph=self.graph)\n\n\n def update_graph_key(self, old_key, new_key):\n n = list(map(lambda k: (new_key, self.nodes[k]) if k == old_key else (k, self.nodes[k]), self.nodes.keys()))\n self.nodes = Graph(n)\n\n def insert_node(self, node, idx):\n node_list = list(self.nodes.items())\n node_list.insert(idx, (node.name, node))\n self.nodes = Graph(node_list)\n\n def __call__(self, *args, **kwargs):\n return self.run(*args, **kwargs)\n\nclass EvaluationError(RuntimeError):\n \"\"\"\n Failed to evaluate an node.\n \"\"\"\n\n\nclass var_index(Node): # pylint: disable=C0103,W0223\n \"\"\"\n Node representing values of a variable corresponding to input index values.\n\n Parameters\n ----------\n var : Node\n The multi-dimensional variable used for indexing into.\n idx : tuple\n Tuple of either integer values or index/index_op nodes.\n \"\"\"\n def __init__(self, var, idx, name=None, **kwargs): # pylint: disable=W0235\n if \"domain\" in kwargs:\n domain = tuple(kwargs.pop(\"domain\")) if isinstance(kwargs[\"domain\"], list) else kwargs.pop(\"domain\")\n else:\n domain = Domain(idx)\n\n super(var_index, self).__init__(var, idx, name=name, domain=domain, **kwargs)\n\n @property\n def domain(self):\n return self.kwargs[\"domain\"]\n\n @property\n def var(self):\n var, index_list = self.args\n return var\n\n def set_name(self, name):\n \"\"\"\n Set the name for a variable index, making sure to replicate the new name with\n a unique stringwhich corresponds to the variable, index combination.\n\n Parameters\n ----------\n value : str\n Unique name of the node.\n\n Returns\n -------\n self : Node\n This node.\n\n Raises\n ------\n ValueError\n If an node with `value` already exists in the associated graph.\n KeyError\n If the current name of the node cannot be found in the associated graph.\n \"\"\"\n\n # TODO: Need a way to check if the existing node is not equal to the current ndoe as ewll\n if self.graph and name in self.graph.nodes:\n raise ValueError(f\"duplicate name '{name}' in {self.graph.name}:\"\n f\"Existing: {self.graph.nodes[name].args}\\n\"\n f\"New: {self.args}\")\n\n if self.graph:\n graph = self.graph\n if self._name is not None and self._name in graph.nodes:\n graph.update_graph_key(self._name, name)\n else:\n graph.nodes[name] = self\n\n self._name = name\n return self\n\n def __getitem__(self, key):\n if self.is_shape_finalized() and len(self.nodes) >= np.prod(self.shape):\n if isinstance(key, Integral):\n key = tuple([key])\n idx = np.ravel_multi_index(key, dims=self.shape, order='C')\n ret = self.nodes.item_by_index(idx)\n return ret\n else:\n if isinstance(key, (list)):\n ret = var_index(self.var, tuple(key), graph=self)\n elif isinstance(key, tuple):\n ret = var_index(self.var, key, graph=self)\n else:\n ret = var_index(self.var, tuple([key]), graph=self)\n return ret\n\n def is_scalar(self, val=None):\n if val is not None and (not isinstance(val, np.ndarray) or (len(val.shape) == 1 and val.shape[0] == 1)):\n if self.var.shape != DEFAULT_SHAPES[0] and (len(self.var.shape) == 1 and not isinstance(self.var.shape[0],Node)):\n raise ValueError(f\"Invalid shape var for var index {self} with variable shape {self.var.shape}\")\n return True\n else:\n return self.var.shape == DEFAULT_SHAPES[0]\n\n def scalar_result(self):\n return all([isinstance(v, int) for v in self.args[1]])\n\n def _evaluate(self, var, indices, **kwargs):\n\n if self.is_scalar(var):\n out_shape = (1,)\n indices = (0,)\n single = True\n else:\n out_shape = self.domain.shape_from_indices(indices)\n indices = self.domain.compute_pairs()\n single = False\n if isinstance(var, (Integral, Real, str)):\n var = np.asarray([var])\n elif not isinstance(var, (np.ndarray, list)):\n raise TypeError(f\"Variable {var} with type {type(var)} is not a list or numpy array, and cannot be sliced for {self.name}\")\n elif isinstance(var, list):\n var = np.asarray(var)\n if len(var.shape) != len(out_shape) and np.prod(var.shape) == np.prod(out_shape):\n if len(out_shape) > len(var.shape):\n for i in range(len(out_shape)):\n if out_shape[i] == 1:\n var = np.expand_dims(var, axis=i)\n else:\n var = np.squeeze(var)\n\n if len(var.shape) != len(out_shape) and np.prod(var.shape) != np.prod(out_shape):\n raise ValueError(f\"Index list does not match {var.shape} in {self.var.name} - {self.var.op_name}\"\n f\"dimensions for slice {self.args[0].name} with {out_shape}.\\n\"\n f\"Domain: {self.domain}\\n\"\n f\"Eval Stack: {Node._eval_stack}\")\n\n if not single and not all([(idx_val - 1) >= indices[-1][idx] for idx, idx_val in enumerate(var.shape)]):\n\n raise ValueError(f\"var_index {self.name} has indices which are greater than the variable shape:\\n\"\n f\"\\tArgs: {self.args}\\n\"\n f\"\\tVar shape: {var.shape}\\n\"\n f\"\\tNode shape: {self.var.shape}\\n\"\n f\"\\tIndex Upper bounds: {indices[-1]}\")\n\n indices = list(map(lambda x: x.tolist() if isinstance(x, np.ndarray) else x, indices))\n\n res = var[indices] if single else np.asarray([var[idx] for idx in indices]).reshape(out_shape)\n if out_shape == (1,) and len(indices) == 1:\n res = res[0]\n self.domain.set_computed(out_shape, indices)\n\n return res\n\n def __add__(self, other):\n return slice_op(operator.add, self, other, graph=self.graph)\n\n def __radd__(self, other):\n return slice_op(operator.add, other, self, graph=self.graph)\n\n def __sub__(self, other):\n return slice_op(operator.sub, self, other, graph=self.graph)\n\n def __rsub__(self, other):\n return slice_op(operator.sub, other, self, graph=self.graph)\n\n def __pow__(self, other):\n return slice_op(builtins.pow, self, other, graph=self.graph)\n\n def __rpow__(self, other):\n return slice_op(builtins.pow, other, self, graph=self.graph)\n\n def __mul__(self, other):\n return slice_op(operator.mul, self, other, graph=self.graph)\n\n def __rmul__(self, other):\n return slice_op(operator.mul, other, self, graph=self.graph)\n\n def __truediv__(self, other):\n return slice_op(operator.truediv, self, other, graph=self.graph)\n\n def __rtruediv__(self, other):\n return slice_op(operator.truediv, other, self, graph=self.graph)\n\n def __floordiv__(self, other):\n return slice_op(operator.floordiv, self, other, graph=self.graph)\n\n def __rfloordiv__(self, other):\n return slice_op(operator.floordiv, other, self, graph=self.graph)\n\n def __mod__(self, other):\n return slice_op(operator.mod, self, other, graph=self.graph)\n\n def __rmod__(self, other):\n return slice_op(operator.mod, other, self, graph=self.graph)\n\n def __lshift__(self, other):\n return slice_op(operator.lshift, self, other, graph=self.graph)\n\n def __rlshift__(self, other):\n return slice_op(operator.lshift, other, self, graph=self.graph)\n\n def __rshift__(self, other):\n return slice_op(operator.rshift, self, other, graph=self.graph)\n\n def __rrshift__(self, other):\n return slice_op(operator.rshift, other, self, graph=self.graph)\n\n def __and__(self, other):\n return slice_op(operator.and_, self, other, graph=self.graph)\n\n def __rand__(self, other):\n return slice_op(operator.and_, other, self, graph=self.graph)\n\n def __or__(self, other):\n return slice_op(operator.or_, self, other, graph=self.graph)\n\n def __ror__(self, other):\n return slice_op(operator.or_, other, self, graph=self.graph)\n\n def __xor__(self, other):\n return slice_op(operator.xor, self, other, graph=self.graph)\n\n def __rxor__(self, other):\n return slice_op(operator.xor, other, self, graph=self.graph)\n\n def __lt__(self, other):\n return slice_op(operator.lt, self, other, graph=self.graph)\n\n def __le__(self, other):\n return slice_op(operator.lt, other, self, graph=self.graph)\n\n def __ne__(self, other):\n return slice_op(operator.ne, self, other, graph=self.graph)\n\n def __gt__(self, other):\n return slice_op(operator.gt, self, other, graph=self.graph)\n\n def __ge__(self, other):\n return slice_op(operator.ge, self, other, graph=self.graph)\n\n def __repr__(self):\n return \"<var_index name=%s, index=%s>\" % (self.name, self.args)\n\nclass slice_op(Node):\n \"\"\"\n Node representing multi-dimensional operations performed on a node.\n\n Parameters\n ----------\n target : cal\n The multi-dimensional variable used for indexing into.\n idx : tuple\n Tuple of either integer values or index/index_op nodes.\n \"\"\"\n def __init__(self, target, *args, **kwargs):\n\n if \"domain\" in kwargs:\n domain = tuple(kwargs.pop(\"domain\")) if isinstance(kwargs[\"domain\"], list) else kwargs.pop(\"domain\")\n else:\n all_args = _flatten_iterable(args)\n slice1_var, slice1_idx, slice2_var, slice2_idx = self.get_index_nodes(all_args[0], all_args[1])\n domain = slice1_idx.combine_set_domains(slice2_idx)\n\n if \"op_name\" in kwargs:\n kwargs.pop(\"op_name\")\n\n target_name = f\"{target.__module__}.{target.__name__}\"\n super(slice_op, self).__init__(*args, target=target_name, domain=domain, op_name=f\"slice_{target.__name__}\", **kwargs)\n self.target = target\n\n\n @property\n def domain(self):\n return self.kwargs[\"domain\"]\n\n def __getitem__(self, key):\n\n if isinstance(key, (tuple, list, np.ndarray)) and len(key) == 0:\n return self\n elif self.is_shape_finalized() and len(self.nodes) > 0:\n if isinstance(key, (int, Node)):\n key = tuple([key])\n if len(key) != len(self.shape):\n raise KeyError(f\"Invalid key shape for {self.name}:\\n\"\n f\"Shape: {self.shape}\\n\"\n f\"Key: {key}\")\n if isinstance(key, list):\n key = tuple(key)\n name = f\"{self.name}{key}\"\n if name not in self.nodes.keys():\n raise KeyError(f\"{name} not in {self.name} keys:\\n\"\n f\"Node keys: {list(self.nodes.keys())}\")\n ret = self.nodes[name]\n return ret\n else:\n name = []\n if isinstance(key, Node):\n name.append(key.name)\n elif hasattr(key, \"__len__\") and not isinstance(key, str):\n for k in key:\n if isinstance(k, Node):\n name.append(k.name)\n else:\n name.append(k)\n\n else:\n name.append(key)\n name = tuple(name)\n name = self.var.name + str(name)\n if name in self.graph.nodes:\n return self.graph.nodes[name]\n elif isinstance(key, (list)):\n return var_index(self, key, name=name, graph=self.graph)\n elif isinstance(key, tuple):\n return var_index(self, list(key), name=name, graph=self.graph)\n else:\n return var_index(self, [key], name=name, graph=self.graph)\n\n def set_shape(self, shape=None, init=False):\n s = []\n assert isinstance(shape, (tuple, list))\n if all([isinstance(sv, Integral) for sv in shape]) and len(self.domain) == np.product(shape) and len(shape) > 0:\n self._shape = shape if isinstance(shape, tuple) else tuple(shape)\n else:\n\n for idx, d in enumerate(self.domain.dom_set):\n if shape and isinstance(shape[idx], (func_op, Integral)):\n s.append(shape[idx])\n elif shape and isinstance(shape[idx], float):\n s.append(int(shape[idx]))\n elif isinstance(d, float):\n s.append(int(d))\n elif isinstance(d, var_index):\n s.append(d.domain)\n else:\n s.append(d)\n\n self._shape = tuple(s)\n\n def is_scalar(self, val):\n return not isinstance(val, np.ndarray) or (len(val.shape) == 1 and val.shape[0] == 1)\n\n def scalar_result(self):\n return False\n\n def _evaluate(self, op1, op2, context=None, **kwargs):\n if self.is_scalar(op1) or self.is_scalar(op2):\n value = self.target(op1, op2)\n else:\n arg0_dom = self.args[0].domain\n arg1_dom = self.args[1].domain\n op1_idx = self.domain.map_sub_domain(arg0_dom) if isinstance(self.args[0], Node) else tuple([])\n op2_idx = self.domain.map_sub_domain(arg1_dom) if isinstance(self.args[1], Node) else tuple([])\n op1 = np.asarray(list(map(lambda x: op1[x], op1_idx))).reshape(self.domain.computed_shape)\n op2 = np.asarray(list(map(lambda x: op2[x], op2_idx))).reshape(self.domain.computed_shape)\n value = self.target(op1, op2)\n return value\n\n\n\n def get_index_nodes(self, slice1_var=None, slice2_var=None):\n if slice1_var is None and slice2_var is None:\n slice1_var, slice2_var = self.args\n\n if isinstance(slice1_var, (slice_op, var_index)) or _is_node_type_instance(slice1_var, \"GroupNode\"):\n slice1_idx = slice1_var.domain\n elif _is_node_type_instance(slice1_var, \"index\"):\n slice1_idx = slice1_var.domain\n else:\n slice1_idx = Domain(tuple([]))\n\n if isinstance(slice2_var, (slice_op, var_index)) or _is_node_type_instance(slice2_var, \"GroupNode\"):\n slice2_idx = slice2_var.domain\n elif _is_node_type_instance(slice2_var, \"index\"):\n slice2_idx = slice2_var.domain\n else:\n slice2_idx = Domain(tuple([]))\n return slice1_var, slice1_idx, slice2_var, slice2_idx\n\n def __add__(self, other):\n return slice_op(operator.add, self, other, graph=self.graph)\n\n def __radd__(self, other):\n return slice_op(operator.add, other, self, graph=self.graph)\n\n def __sub__(self, other):\n return slice_op(operator.sub, self, other, graph=self.graph)\n\n def __rsub__(self, other):\n return slice_op(operator.sub, other, self, graph=self.graph)\n\n def __pow__(self, other):\n return slice_op(builtins.pow, self, other, graph=self.graph)\n\n def __rpow__(self, other):\n return slice_op(builtins.pow, other, self, graph=self.graph)\n\n def __mul__(self, other):\n return slice_op(operator.mul, self, other, graph=self.graph)\n\n def __rmul__(self, other):\n return slice_op(operator.mul, other, self, graph=self.graph)\n\n def __truediv__(self, other):\n return slice_op(operator.truediv, self, other, graph=self.graph)\n\n def __rtruediv__(self, other):\n return slice_op(operator.truediv, other, self, graph=self.graph)\n\n def __floordiv__(self, other):\n return slice_op(operator.floordiv, self, other, graph=self.graph)\n\n def __rfloordiv__(self, other):\n return slice_op(operator.floordiv, other, self, graph=self.graph)\n\n def __mod__(self, other):\n return slice_op(operator.mod, self, other, graph=self.graph)\n\n def __rmod__(self, other):\n return slice_op(operator.mod, other, self, graph=self.graph)\n\n def __lshift__(self, other):\n return slice_op(operator.lshift, self, other, graph=self.graph)\n\n def __rlshift__(self, other):\n return slice_op(operator.lshift, other, self, graph=self.graph)\n\n def __rshift__(self, other):\n return slice_op(operator.rshift, self, other, graph=self.graph)\n\n def __rrshift__(self, other):\n return slice_op(operator.rshift, other, self, graph=self.graph)\n\n def __and__(self, other):\n return slice_op(operator.and_, self, other, graph=self.graph)\n\n def __rand__(self, other):\n return slice_op(operator.and_, other, self, graph=self.graph)\n\n def __or__(self, other):\n return slice_op(operator.or_, self, other, graph=self.graph)\n\n def __ror__(self, other):\n return slice_op(operator.or_, other, self, graph=self.graph)\n\n def __xor__(self, other):\n return slice_op(operator.xor, self, other, graph=self.graph)\n\n def __rxor__(self, other):\n return slice_op(operator.xor, other, self, graph=self.graph)\n\n def __lt__(self, other):\n return slice_op(operator.lt, self, other, graph=self.graph)\n\n def __le__(self, other):\n return slice_op(operator.lt, other, self, graph=self.graph)\n\n def __ne__(self, other):\n return slice_op(operator.ne, self, other, graph=self.graph)\n\n def __gt__(self, other):\n return slice_op(operator.gt, self, other, graph=self.graph)\n\n def __ge__(self, other):\n return slice_op(operator.ge, self, other, graph=self.graph)\n\n def __repr__(self):\n return \"<slice_%s '%s'>\" % (self.target.__name__, self.name)\n\n\nclass func_op(Node): # pylint: disable=C0103,R0903\n \"\"\"\n Node wrapper for stateless functions.\n\n Parameters\n ----------\n target : callable\n function to evaluate the node\n args : tuple\n positional arguments passed to the target\n kwargs : dict\n keywoard arguments passed to the target\n \"\"\"\n def __init__(self, target, *args, **kwargs):\n kwargs[\"op_name\"] = kwargs[\"op_name\"] if \"op_name\" in kwargs \\\n else f\"{target.__name__}\"\n if \"domain\" in kwargs:\n domain = tuple(kwargs.pop(\"domain\")) if isinstance(kwargs[\"domain\"], list) else kwargs.pop(\"domain\")\n elif len(args) == 2:\n all_args = _flatten_iterable(args)\n slice1_var, slice1_idx, slice2_var, slice2_idx = self.get_index_nodes(all_args[0], all_args[1])\n domain = slice1_idx.combine_set_domains(slice2_idx)\n else:\n domain = Domain(tuple([]))\n self._target = None\n super(func_op, self).__init__(*args, target=f\"{target.__module__}.{target.__name__}\", domain=domain, **kwargs)\n self.target = target\n self.added_attrs += [\"domain\", \"target\"]\n\n @property\n def target(self):\n return self._target\n\n @target.setter\n def target(self, fnc):\n self._target = fnc\n self.op_name = f\"{fnc.__name__}\"\n self.kwargs[\"target\"] = f\"{fnc.__module__}.{fnc.__name__}\"\n\n def __getitem__(self, key):\n return self\n\n @property\n def domain(self):\n return self.kwargs[\"domain\"]\n\n def get_index_nodes(self, slice1_var=None, slice2_var=None):\n if slice1_var is None and slice2_var is None:\n slice1_var, slice2_var = self.args\n\n if isinstance(slice1_var, (slice_op, var_index)) or _is_node_type_instance(slice1_var, \"GroupNode\"):\n slice1_idx = slice1_var.domain\n else:\n slice1_idx = Domain(tuple([]))\n\n if isinstance(slice2_var, (slice_op, var_index)) or _is_node_type_instance(slice2_var, \"GroupNode\"):\n slice2_idx = slice2_var.domain\n else:\n slice2_idx = Domain(tuple([]))\n return slice1_var, slice1_idx, slice2_var, slice2_idx\n\n def _evaluate(self, *args, **kwargs):\n\n for aa in list(kwargs.keys()):\n if aa in self.added_attrs:\n kwargs.pop(aa)\n return self.target(*args, **kwargs)\n\n def __call__(self, *args, **kwargs):\n return call(self, *args, **kwargs)\n\n def __repr__(self):\n return \"<func_op '%s' target=%s args=<%d items>>\" % \\\n (self.name, self.kwargs[\"target\"], len(self.args))\n\ndef nodeop(target=None, **kwargs):\n \"\"\"\n Decorator for creating nodes from functions.\n \"\"\"\n # This is called when the decorator is used with arguments\n if target is None:\n return functools.partial(nodeop, **kwargs)\n\n # This is called when the decorator is used without arguments\n @functools.wraps(target)\n def _wrapper(*args, **kwargs_inner):\n return func_op(target, *args, **kwargs_inner, **kwargs)\n return _wrapper\n\n\n@nodeop\ndef call(func, *args, **kwargs):\n \"\"\"\n Call `func` with positional arguments `args` and keyword arguments `kwargs`.\n\n Parameters\n ----------\n func : callable\n Function to call when the node is executed.\n args : list\n Sequence of positional arguments passed to `func`.\n kwargs : dict\n Mapping of keyword arguments passed to `func`.\n \"\"\"\n return func(*args, **kwargs)\n\[email protected]\ndef control_dependencies(dependencies, graph=None):\n \"\"\"\n Ensure that all `dependencies` are executed before any nodes in this scope.\n\n Parameters\n ----------\n dependencies : list\n Sequence of nodes to be evaluted before evaluating any nodes defined in this\n scope.\n \"\"\"\n # Add dependencies to the graph\n graph = Node.get_active_graph(graph)\n graph.dependencies.extend(dependencies)\n yield\n # Remove dependencies from the graph\n del graph.dependencies[-len(dependencies):]\n\n#pylint: disable=C0103\nabs_ = nodeop(builtins.abs)\ndict_ = nodeop(builtins.dict)\nhelp_ = nodeop(builtins.help)\nmin_ = nodeop(builtins.min)\nsetattr_ = nodeop(builtins.setattr)\nall_ = nodeop(builtins.all)\ndir_ = nodeop(builtins.dir)\nhex_ = nodeop(builtins.hex)\nnext_ = nodeop(builtins.next)\nslice_ = nodeop(builtins.slice)\nany_ = nodeop(builtins.any)\ndivmod_ = nodeop(builtins.divmod)\nid_ = nodeop(builtins.id)\nobject_ = nodeop(builtins.object)\nsorted_ = nodeop(builtins.sorted)\nascii_ = nodeop(builtins.ascii)\nenumerate_ = nodeop(builtins.enumerate)\ninput_ = nodeop(builtins.input)\noct_ = nodeop(builtins.oct)\nstaticmethod_ = nodeop(builtins.staticmethod)\nbin_ = nodeop(builtins.bin)\neval_ = nodeop(builtins.eval)\nint_ = nodeop(builtins.int)\nopen_ = nodeop(builtins.open)\nstr_ = nodeop(builtins.str)\nbool_ = nodeop(builtins.bool)\nexec_ = nodeop(builtins.exec)\nisinstance_ = nodeop(builtins.isinstance)\nord_ = nodeop(builtins.ord)\nsum_ = nodeop(builtins.sum)\nbytearray_ = nodeop(builtins.bytearray)\nfilter_ = nodeop(builtins.filter)\nissubclass_ = nodeop(builtins.issubclass)\npow_ = nodeop(builtins.pow)\nsuper_ = nodeop(builtins.super)\nbytes_ = nodeop(builtins.bytes)\nfloat_ = nodeop(builtins.float)\niter_ = nodeop(builtins.iter)\nprint_ = nodeop(builtins.print)\ntuple_ = nodeop(builtins.tuple)\ncallable_ = nodeop(builtins.callable)\nformat_ = nodeop(builtins.format)\nlen_ = nodeop(builtins.len)\nproperty_ = nodeop(builtins.property)\ntype_ = nodeop(builtins.type)\nchr_ = nodeop(builtins.chr)\nfrozenset_ = nodeop(builtins.frozenset)\nlist_ = nodeop(builtins.list)\nrange_ = nodeop(builtins.range)\nvars_ = nodeop(builtins.vars)\nclassmethod_ = nodeop(builtins.classmethod)\ngetattr_ = nodeop(builtins.getattr)\nlocals_ = nodeop(builtins.locals)\nrepr_ = nodeop(builtins.repr)\nzip_ = nodeop(builtins.zip)\ncompile_ = nodeop(builtins.compile)\nglobals_ = nodeop(builtins.globals)\nmap_ = nodeop(builtins.map)\nreversed_ = nodeop(builtins.reversed)\ncomplex_ = nodeop(builtins.complex)\nhasattr_ = nodeop(builtins.hasattr)\nmax_ = nodeop(builtins.max)\nround_ = nodeop(builtins.round)\ndelattr_ = nodeop(builtins.delattr)\nhash_ = nodeop(builtins.hash)\nmemoryview_ = nodeop(builtins.memoryview)\nset_ = nodeop(builtins.set)\nadd = nodeop(operator.add)\nand_ = nodeop(operator.and_)\nattrgetter = nodeop(operator.attrgetter)\nconcat = nodeop(operator.concat)\ncontains = nodeop(operator.contains)\ncountOf = nodeop(operator.countOf)\ndelitem = nodeop(operator.delitem)\neq = nodeop(operator.eq)\nfloordiv = nodeop(operator.floordiv)\nge = nodeop(operator.ge)\ngetitem = nodeop(operator.getitem)\ngt = nodeop(operator.gt)\nindex = nodeop(operator.index)\nindexOf = nodeop(operator.indexOf)\ninv = nodeop(operator.inv)\ninvert = nodeop(operator.invert)\nior = nodeop(operator.ior)\nipow = nodeop(operator.ipow)\nirshift = nodeop(operator.irshift)\nis_ = nodeop(operator.is_)\nis_not = nodeop(operator.is_not)\nitemgetter = nodeop(operator.itemgetter)\nle = nodeop(operator.le)\nlength_hint = nodeop(operator.length_hint)\nlshift = nodeop(operator.lshift)\nlt = nodeop(operator.lt)\nmatmul = nodeop(operator.matmul)\nmethodcaller = nodeop(operator.methodcaller)\nmod = nodeop(operator.mod)\nmul = nodeop(operator.mul)\nne = nodeop(operator.ne)\nneg = nodeop(operator.neg)\nnot_ = nodeop(operator.not_)\nor_ = nodeop(operator.or_)\npos = nodeop(operator.pos)\nrshift = nodeop(operator.rshift)\nsetitem = nodeop(operator.setitem)\nsub = nodeop(operator.sub)\ntruediv = nodeop(operator.truediv)\ntruth = nodeop(operator.truth)\nxor = nodeop(operator.xor)\nimport_ = nodeop(importlib.import_module)\n\n\n\n\n\n" ]
[ [ "numpy.product", "numpy.expand_dims", "numpy.asarray", "numpy.squeeze", "numpy.int", "numpy.prod", "numpy.ravel_multi_index" ] ]
luanagbmartins/cavia
[ "91f093af9d6f463ee651db533f6c2acc637c7e9f" ]
[ "rl/envs/mujoco/ant.py" ]
[ "import numpy as np\r\nfrom gym.envs.mujoco import AntEnv as AntEnv_\r\n\r\n\r\nclass AntEnv(AntEnv_):\r\n @property\r\n def action_scaling(self):\r\n if (not hasattr(self, 'action_space')) or (self.action_space is None):\r\n return 1.0\r\n if self._action_scaling is None:\r\n lb, ub = self.action_space.low, self.action_space.high\r\n self._action_scaling = 0.5 * (ub - lb)\r\n return self._action_scaling\r\n\r\n def _get_obs(self):\r\n return np.concatenate([\r\n self.sim.data.qpos.flat,\r\n self.sim.data.qvel.flat,\r\n np.clip(self.sim.data.cfrc_ext, -1, 1).flat,\r\n self.sim.data.get_body_xmat(\"torso\").flat,\r\n self.get_body_com(\"torso\").flat,\r\n ]).astype(np.float32).flatten()\r\n\r\n def viewer_setup(self):\r\n camera_id = self.model.camera_name2id('track')\r\n self.viewer.cam.type = 2\r\n self.viewer.cam.fixedcamid = camera_id\r\n self.viewer.cam.distance = self.model.stat.extent * 0.35\r\n # Hide the overlay\r\n self.viewer._hide_overlay = True\r\n\r\n def render(self, mode='human'):\r\n if mode == 'rgb_array':\r\n self._get_viewer().render()\r\n # window size used for old mujoco-py:\r\n width, height = 500, 500\r\n data = self._get_viewer().read_pixels(width, height, depth=False)\r\n return data\r\n elif mode == 'human':\r\n self._get_viewer().render()\r\n\r\n\r\nclass AntVelEnv(AntEnv):\r\n \"\"\"Ant environment with target velocity, as described in [1]. The \r\n code is adapted from\r\n https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/rllab/envs/mujoco/ant_env_rand.py\r\n\r\n The ant follows the dynamics from MuJoCo [2], and receives at each \r\n time step a reward composed of a control cost, a contact cost, a survival \r\n reward, and a penalty equal to the difference between its current velocity \r\n and the target velocity. The tasks are generated by sampling the target \r\n velocities from the uniform distribution on [0, 3].\r\n\r\n [1] Chelsea Finn, Pieter Abbeel, Sergey Levine, \"Model-Agnostic \r\n Meta-Learning for Fast Adaptation of Deep Networks\", 2017 \r\n (https://arxiv.org/abs/1703.03400)\r\n [2] Emanuel Todorov, Tom Erez, Yuval Tassa, \"MuJoCo: A physics engine for \r\n model-based control\", 2012 \r\n (https://homes.cs.washington.edu/~todorov/papers/TodorovIROS12.pdf)\r\n \"\"\"\r\n\r\n def __init__(self, task={}):\r\n self._task = task\r\n self._goal_vel = task.get('velocity', 0.0)\r\n self._action_scaling = None\r\n super(AntVelEnv, self).__init__()\r\n\r\n def step(self, action):\r\n xposbefore = self.get_body_com(\"torso\")[0]\r\n self.do_simulation(action, self.frame_skip)\r\n xposafter = self.get_body_com(\"torso\")[0]\r\n\r\n forward_vel = (xposafter - xposbefore) / self.dt\r\n forward_reward = -1.0 * np.abs(forward_vel - self._goal_vel) + 1.0\r\n survive_reward = 0.05\r\n\r\n ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / self.action_scaling))\r\n contact_cost = 0.5 * 1e-3 * np.sum(\r\n np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))\r\n\r\n observation = self._get_obs()\r\n reward = forward_reward - ctrl_cost - contact_cost + survive_reward\r\n state = self.state_vector()\r\n notdone = np.isfinite(state).all() \\\r\n and state[2] >= 0.2 and state[2] <= 1.0\r\n done = not notdone\r\n infos = dict(reward_forward=forward_reward, reward_ctrl=-ctrl_cost,\r\n reward_contact=-contact_cost, reward_survive=survive_reward,\r\n task=self._task)\r\n return (observation, reward, done, infos)\r\n\r\n def sample_tasks(self, num_tasks):\r\n velocities = self.np_random.uniform(0.0, 3.0, size=(num_tasks,))\r\n tasks = [{'velocity': velocity} for velocity in velocities]\r\n return tasks\r\n\r\n def reset_task(self, task):\r\n self._task = task\r\n self._goal_vel = task['velocity']\r\n\r\n\r\nclass AntDirEnv(AntEnv):\r\n \"\"\"Ant environment with target direction, as described in [1]. The \r\n code is adapted from\r\n https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/rllab/envs/mujoco/ant_env_rand_direc.py\r\n\r\n The ant follows the dynamics from MuJoCo [2], and receives at each \r\n time step a reward composed of a control cost, a contact cost, a survival \r\n reward, and a reward equal to its velocity in the target direction. The \r\n tasks are generated by sampling the target directions from a Bernoulli \r\n distribution on {-1, 1} with parameter 0.5 (-1: backward, +1: forward).\r\n\r\n [1] Chelsea Finn, Pieter Abbeel, Sergey Levine, \"Model-Agnostic \r\n Meta-Learning for Fast Adaptation of Deep Networks\", 2017 \r\n (https://arxiv.org/abs/1703.03400)\r\n [2] Emanuel Todorov, Tom Erez, Yuval Tassa, \"MuJoCo: A physics engine for \r\n model-based control\", 2012 \r\n (https://homes.cs.washington.edu/~todorov/papers/TodorovIROS12.pdf)\r\n \"\"\"\r\n\r\n def __init__(self, task={}):\r\n self._task = task\r\n self._goal_dir = task.get('direction', 1)\r\n self._action_scaling = None\r\n super(AntDirEnv, self).__init__()\r\n\r\n def step(self, action):\r\n xposbefore = self.get_body_com(\"torso\")[0]\r\n self.do_simulation(action, self.frame_skip)\r\n xposafter = self.get_body_com(\"torso\")[0]\r\n\r\n forward_vel = (xposafter - xposbefore) / self.dt\r\n forward_reward = self._goal_dir * forward_vel\r\n survive_reward = 0.05\r\n\r\n ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / self.action_scaling))\r\n contact_cost = 0.5 * 1e-3 * np.sum(\r\n np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))\r\n\r\n observation = self._get_obs()\r\n reward = forward_reward - ctrl_cost - contact_cost + survive_reward\r\n state = self.state_vector()\r\n notdone = np.isfinite(state).all() \\\r\n and state[2] >= 0.2 and state[2] <= 1.0\r\n done = not notdone\r\n infos = dict(reward_forward=forward_reward, reward_ctrl=-ctrl_cost,\r\n reward_contact=-contact_cost, reward_survive=survive_reward,\r\n task=self._task)\r\n return (observation, reward, done, infos)\r\n\r\n def sample_tasks(self, num_tasks):\r\n directions = 2 * self.np_random.binomial(1, p=0.5, size=(num_tasks,)) - 1\r\n tasks = [{'direction': direction} for direction in directions]\r\n return tasks\r\n\r\n def reset_task(self, task):\r\n self._task = task\r\n self._goal_dir = task['direction']\r\n\r\n\r\nclass AntPosEnv(AntEnv):\r\n \"\"\"Ant environment with target position. The code is adapted from\r\n https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/rllab/envs/mujoco/ant_env_rand_goal.py\r\n\r\n The ant follows the dynamics from MuJoCo [1], and receives at each \r\n time step a reward composed of a control cost, a contact cost, a survival \r\n reward, and a penalty equal to its L1 distance to the target position. The \r\n tasks are generated by sampling the target positions from the uniform \r\n distribution on [-3, 3]^2.\r\n\r\n [1] Emanuel Todorov, Tom Erez, Yuval Tassa, \"MuJoCo: A physics engine for \r\n model-based control\", 2012 \r\n (https://homes.cs.washington.edu/~todorov/papers/TodorovIROS12.pdf)\r\n \"\"\"\r\n\r\n def __init__(self, task={}):\r\n self._task = task\r\n self._goal_pos = task.get('position', np.zeros((2,), dtype=np.float32))\r\n self._action_scaling = None\r\n super(AntPosEnv, self).__init__()\r\n\r\n def step(self, action):\r\n self.do_simulation(action, self.frame_skip)\r\n xyposafter = self.get_body_com(\"torso\")[:2]\r\n\r\n goal_reward = -np.sum(np.abs(xyposafter - self._goal_pos)) + 4.0\r\n survive_reward = 0.05\r\n\r\n ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / self.action_scaling))\r\n contact_cost = 0.5 * 1e-3 * np.sum(\r\n np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))\r\n\r\n observation = self._get_obs()\r\n reward = goal_reward - ctrl_cost - contact_cost + survive_reward\r\n state = self.state_vector()\r\n notdone = np.isfinite(state).all() \\\r\n and state[2] >= 0.2 and state[2] <= 1.0\r\n done = not notdone\r\n infos = dict(reward_goal=goal_reward, reward_ctrl=-ctrl_cost,\r\n reward_contact=-contact_cost, reward_survive=survive_reward,\r\n task=self._task)\r\n return (observation, reward, done, infos)\r\n\r\n def sample_tasks(self, num_tasks):\r\n positions = self.np_random.uniform(-3.0, 3.0, size=(num_tasks, 2))\r\n tasks = [{'position': position} for position in positions]\r\n return tasks\r\n\r\n def reset_task(self, task):\r\n self._task = task\r\n self._goal_pos = task['position']\r\n" ]
[ [ "numpy.square", "numpy.abs", "numpy.isfinite", "numpy.clip", "numpy.zeros" ] ]
leniel/DataMining
[ "f249f636ede67a29de986b8f34c9cbe75b680f47", "f249f636ede67a29de986b8f34c9cbe75b680f47", "f249f636ede67a29de986b8f34c9cbe75b680f47" ]
[ "Classification/Work 2/NaiveBayes/naivebayes_crossvalidation.py", "Classification/Work 2/NeuralNetwork/neuralnet.py", "Classification/Work 2/SVM/svm_crossvalidation.py" ]
[ "'''\n\b Created on Sat Nov 05 2016\n\b\n\b Copyright (c) 2016 Leniel Macaferi's Consulting\n'''\n\nimport os\nimport pandas as pd\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.model_selection import cross_val_score\n\npath = os.path.realpath('..')\n\n# Loading the data used to train\ntrainingSet = pd.read_csv(os.path.join(path, '../Data/classification-training.csv'), sep=',', header = None)\n\nclasses = trainingSet[trainingSet.columns[22]] # Last column\nfeatures = trainingSet[trainingSet.columns[1:22]] # Columns between indexes 1 to 22\n\n#pd.set_option('display.max_columns', 23)\n#print(features)\n\nclassifier = GaussianNB()\n\nscores = cross_val_score(classifier, features, classes, cv = 5)\n\nprint(scores)\n\nprint(\"Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))", "'''\n\b Created on Sat Nov 05 2016\n\b\n\b Copyright (c) 2016 Leniel Macaferi's Consulting\n'''\n\nimport os\nimport numpy as np\nimport pandas as pd\nfrom sklearn.neural_network import MLPClassifier\n\npath = os.path.realpath('..')\n\n# Loading the Training set...\ntrainingSet = pd.read_csv(os.path.join(\n path, '../Data/classification-training.csv'), sep=',', header=None)\n\nclasses = trainingSet[trainingSet.columns[22]] # Last column\n# Columns between indexes 1 to 22\nfeatures = trainingSet[trainingSet.columns[1:22]]\n\n#pd.set_option('display.max_columns', 23)\n# print(features)\n\n# Neural Network classifier\nclassifier = MLPClassifier(solver='lbfgs', random_state=1)\n\nclassifier.fit(features, classes)\n\n# Loading the Test set...\ntestSet = pd.read_csv(os.path.join(\n path, '../Data/classification-test.csv'), sep=',', header=None)\n\n# Getting the ids that are used only to output the result\nids = testSet[testSet.columns[0]]\n\n# Using the trained classifier to predict the test data\npredictions = classifier.predict(testSet[testSet.columns[1:22]])\n\n# Write to save predictions to disk\nwriter = csv.writer(open(os.path.join(path, 'nn-predictions.csv'), 'w'))\n\nfor prediction, id in zip(predictions, ids):\n data = [id, prediction]\n\n #print(\"{0} = {1}\\n\".format(id, prediction))\n\n writer.writerow(data)\n", "'''\n\b Created on Sat Nov 05 2016\n\b\n\b Copyright (c) 2016 Leniel Macaferi's Consulting\n'''\n\nimport os\nimport pandas as pd\nfrom sklearn import svm\nfrom sklearn.model_selection import cross_val_score\n\npath = os.path.realpath('..')\n\n# Loading the data used to train\ntrainingSet = pd.read_csv(os.path.join(\n path, '../Data/classification-training.csv'), sep=',', header=None)\n\nclasses = trainingSet[trainingSet.columns[22]] # Last column\n# Columns between indexes 1 to 22\nfeatures = trainingSet[trainingSet.columns[1:22]]\n\n#pd.set_option('display.max_columns', 23)\n# print(features)\n\n# SVM classifier\nclassifier = svm.SVC(kernel='linear')\n\nscores = cross_val_score(classifier, features, classes, cv=5)\n\nprint(scores)\n\nprint(\"Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n" ]
[ [ "sklearn.naive_bayes.GaussianNB", "sklearn.model_selection.cross_val_score" ], [ "sklearn.neural_network.MLPClassifier" ], [ "sklearn.model_selection.cross_val_score", "sklearn.svm.SVC" ] ]
hieuhoang/parSentExtract
[ "9e7aa4c0f0f93934d7f6986d655195bf5bd8e03d" ]
[ "train.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\n\nfrom six.moves import xrange\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"1\"\n\nimport numpy as np\nimport tensorflow as tf\n\nimport utils\nfrom model import Config, BiRNN\n\n\ntf.flags.DEFINE_string(\"source_train_path\", \"\",\n \"Path to the file containing the source sentences to \"\n \"train the model.\")\n\ntf.flags.DEFINE_string(\"target_train_path\", \"\",\n \"Path to the file containing the target sentences to \"\n \"train the model.\")\n\ntf.flags.DEFINE_string(\"source_valid_path\", \"\",\n \"Path to the file containing the source sentences to \"\n \"evaluate the model.\")\n\ntf.flags.DEFINE_string(\"target_valid_path\", \"\",\n \"Path to the file containing the target sentences to \"\n \"evaluate the model.\")\n\ntf.flags.DEFINE_string(\"checkpoint_dir\", \"./tflogs\",\n \"Directory to save checkpoints and summaries of the model.\")\n\ntf.flags.DEFINE_integer(\"source_vocab_size\", 100000,\n \"Number of the most frequent words to keep in the source \"\n \"vocabulary.\")\n\ntf.flags.DEFINE_integer(\"target_vocab_size\", 100000,\n \"Number of the most frequent words to keep in target \"\n \"vocabulary.\")\n\ntf.flags.DEFINE_float(\"learning_rate\", 2e-4,\n \"Learning rate.\")\n\ntf.flags.DEFINE_float(\"max_gradient_norm\", 5.0,\n \"Clip gradient to this norm.\")\n\ntf.flags.DEFINE_float(\"decision_threshold\", 0.99,\n \"Decision threshold to predict a positive label.\")\n\ntf.flags.DEFINE_integer(\"embedding_size\", 300,\n \"Size of each word embedding.\")\n\ntf.flags.DEFINE_integer(\"state_size\", 300,\n \"Size of the recurrent state in the BiRNN encoder.\")\n\ntf.flags.DEFINE_integer(\"hidden_size\", 128,\n \"Size of the hidden layer in the feed-forward neural \"\n \"network.\")\n\ntf.flags.DEFINE_integer(\"num_layers\", 1,\n \"Number of layers in the BiRNN encoder.\")\n\ntf.flags.DEFINE_string(\"source_embeddings_path\", None,\n \"Pretrained embeddings to initialize the source embeddings \"\n \"matrix.\")\n\ntf.flags.DEFINE_string(\"target_embeddings_path\", None,\n \"Pretrained embeddings to initialize the target embeddings \"\n \"matrix.\")\n\ntf.flags.DEFINE_boolean(\"fix_pretrained\", False,\n \"If true fix pretrained embeddings.\")\n\ntf.flags.DEFINE_boolean(\"use_lstm\", False,\n \"If true use LSTM cells. Otherwise use GRU cells.\")\n\ntf.flags.DEFINE_boolean(\"use_mean_pooling\", False,\n \"If true use mean pooling for final sentence representation.\")\n\ntf.flags.DEFINE_boolean(\"use_max_pooling\", False,\n \"If true use max pooling for final sentence representation.\")\n\ntf.flags.DEFINE_integer(\"batch_size\", 128,\n \"Batch size to use during training.\")\n\ntf.flags.DEFINE_integer(\"num_epochs\", 15,\n \"Number of epochs to train the model.\")\n\ntf.flags.DEFINE_integer(\"num_negative\", 5,\n \"Number of negative examples to sample per pair of \"\n \"parallel sentences in training dataset.\")\n\ntf.flags.DEFINE_float(\"keep_prob_input\", 0.8,\n \"Keep probability for dropout applied at the embedding layer.\")\n\ntf.flags.DEFINE_float(\"keep_prob_output\", 0.7,\n \"Keep probability for dropout applied at the prediction layer.\")\n\ntf.flags.DEFINE_integer(\"steps_per_checkpoint\", 200,\n \"Number of steps to save a model checkpoint.\")\n\n\nFLAGS = tf.flags.FLAGS\n\n\ndef eval_epoch(sess, model, data_iterator, summary_writer):\n \"\"\"Evaluate model for one epoch.\"\"\"\n sess.run(tf.local_variables_initializer())\n num_iter = int(np.ceil(data_iterator.size / FLAGS.batch_size))\n epoch_loss = 0\n for step in xrange(num_iter):\n source, target, label = data_iterator.next_batch(FLAGS.batch_size)\n source_len = utils.sequence_length(source)\n target_len = utils.sequence_length(target)\n feed_dict = {model.x_source: source,\n model.x_target: target,\n model.labels: label,\n model.source_seq_length: source_len,\n model.target_seq_length: target_len,\n model.decision_threshold: FLAGS.decision_threshold}\n loss_value, epoch_accuracy,\\\n epoch_precision, epoch_recall = sess.run([model.mean_loss,\n model.accuracy[1],\n model.precision[1],\n model.recall[1]],\n feed_dict=feed_dict)\n epoch_loss += loss_value\n if step % FLAGS.steps_per_checkpoint == 0:\n summary = sess.run(model.summaries, feed_dict=feed_dict)\n summary_writer.add_summary(summary, global_step=data_iterator.global_step)\n epoch_loss /= step\n epoch_f1 = utils.f1_score(epoch_precision, epoch_recall)\n print(\" Testing: Loss = {:.6f}, Accuracy = {:.4f}, \"\n \"Precision = {:.4f}, Recall = {:.4f}, F1 = {:.4f}\"\n .format(epoch_loss, epoch_accuracy,\n epoch_precision, epoch_recall, epoch_f1))\n\n\ndef main(_):\n assert FLAGS.source_train_path, (\"--source_train_path is required.\")\n assert FLAGS.target_train_path, (\"--target_train_path is required.\")\n assert FLAGS.source_valid_path, (\"--source_valid_path is required.\")\n assert FLAGS.target_valid_path, (\"--target_valid_path is required.\")\n\n # Create vocabularies.\n source_vocab_path = os.path.join(os.path.dirname(FLAGS.source_train_path),\n \"vocabulary.source\")\n target_vocab_path = os.path.join(os.path.dirname(FLAGS.source_train_path),\n \"vocabulary.target\")\n utils.create_vocabulary(source_vocab_path, FLAGS.source_train_path, FLAGS.source_vocab_size)\n utils.create_vocabulary(target_vocab_path, FLAGS.target_train_path, FLAGS.target_vocab_size)\n\n # Read vocabularies.\n source_vocab, rev_source_vocab = utils.initialize_vocabulary(source_vocab_path)\n #print(\"source_vocab\", source_vocab)\n #print(\"rev_source_vocab\", rev_source_vocab)\n\n target_vocab, rev_target_vocab = utils.initialize_vocabulary(target_vocab_path)\n #print(\"target_vocab\", target_vocab)\n\n # Read parallel sentences.\n parallel_data = utils.read_data(FLAGS.source_train_path, FLAGS.target_train_path,\n source_vocab, target_vocab)\n print(\"parallel_data\", type(parallel_data), len(parallel_data))\n print(\"parallel_data[0]\", type(parallel_data[0]), len(parallel_data[0]), parallel_data[0])\n\n # Read validation data set.\n if FLAGS.source_valid_path and FLAGS.target_valid_path:\n valid_data = utils.read_data(FLAGS.source_valid_path, FLAGS.target_valid_path,\n source_vocab, target_vocab)\n\n # Initialize BiRNN.\n config = Config(len(source_vocab),\n len(target_vocab),\n FLAGS.embedding_size,\n FLAGS.state_size,\n FLAGS.hidden_size,\n FLAGS.num_layers,\n FLAGS.learning_rate,\n FLAGS.max_gradient_norm,\n FLAGS.use_lstm,\n FLAGS.use_mean_pooling,\n FLAGS.use_max_pooling,\n FLAGS.source_embeddings_path,\n FLAGS.target_embeddings_path,\n FLAGS.fix_pretrained)\n\n model = BiRNN(config)\n\n # Build graph.\n model.build_graph()\n\n # Train model.\n with tf.Session() as sess:\n\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n\n train_iterator = utils.TrainingIterator(parallel_data, FLAGS.num_negative)\n train_summary_writer = tf.summary.FileWriter(os.path.join(FLAGS.checkpoint_dir, \"train\"), sess.graph)\n\n if FLAGS.source_valid_path and FLAGS.target_valid_path:\n valid_iterator = utils.EvalIterator(valid_data)\n valid_summary_writer = tf.summary.FileWriter(os.path.join(FLAGS.checkpoint_dir, \"valid\"), sess.graph)\n\n epoch_loss = 0\n epoch_completed = 0\n batch_completed = 0\n\n num_iter = int(np.ceil(train_iterator.size / FLAGS.batch_size * FLAGS.num_epochs))\n start_time = time.time()\n print(\"Training model on {} sentence pairs per epoch:\".\n format(train_iterator.size, valid_iterator.size))\n\n for step in xrange(num_iter):\n source, target, label = train_iterator.next_batch(FLAGS.batch_size)\n source_len = utils.sequence_length(source)\n target_len = utils.sequence_length(target)\n feed_dict = {model.x_source: source,\n model.x_target: target,\n model.labels: label,\n model.source_seq_length: source_len,\n model.target_seq_length: target_len,\n model.input_dropout: FLAGS.keep_prob_input,\n model.output_dropout: FLAGS.keep_prob_output,\n model.decision_threshold: FLAGS.decision_threshold}\n\n _, loss_value, epoch_accuracy,\\\n epoch_precision, epoch_recall = sess.run([model.train_op,\n model.mean_loss,\n model.accuracy[1],\n model.precision[1],\n model.recall[1]],\n feed_dict=feed_dict)\n epoch_loss += loss_value\n batch_completed += 1\n # Write the model's training summaries.\n if step % FLAGS.steps_per_checkpoint == 0:\n summary = sess.run(model.summaries, feed_dict=feed_dict)\n train_summary_writer.add_summary(summary, global_step=step)\n # End of current epoch.\n if train_iterator.epoch_completed > epoch_completed:\n epoch_time = time.time() - start_time\n epoch_loss /= batch_completed\n epoch_f1 = utils.f1_score(epoch_precision, epoch_recall)\n epoch_completed += 1\n print(\"Epoch {} in {:.0f} sec\\n\"\n \" Training: Loss = {:.6f}, Accuracy = {:.4f}, \"\n \"Precision = {:.4f}, Recall = {:.4f}, F1 = {:.4f}\"\n .format(epoch_completed, epoch_time,\n epoch_loss, epoch_accuracy,\n epoch_precision, epoch_recall, epoch_f1))\n # Save a model checkpoint.\n checkpoint_path = os.path.join(FLAGS.checkpoint_dir, \"model.ckpt\")\n model.saver.save(sess, checkpoint_path, global_step=step)\n # Evaluate model on the validation set.\n if FLAGS.source_valid_path and FLAGS.target_valid_path:\n eval_epoch(sess, model, valid_iterator, valid_summary_writer)\n # Initialize local variables for new epoch.\n batch_completed = 0\n epoch_loss = 0\n sess.run(tf.local_variables_initializer())\n start_time = time.time()\n\n print(\"Training done with {} steps.\".format(num_iter))\n train_summary_writer.close()\n valid_summary_writer.close()\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n" ]
[ [ "tensorflow.flags.DEFINE_boolean", "tensorflow.local_variables_initializer", "tensorflow.flags.DEFINE_string", "numpy.ceil", "tensorflow.global_variables_initializer", "tensorflow.Session", "tensorflow.flags.DEFINE_float", "tensorflow.flags.DEFINE_integer", "tensorflow.app.run" ] ]
jamesrosstwo/idr-dif
[ "e900af5f440b943a7a46134a5afe7a81dd888a05", "e900af5f440b943a7a46134a5afe7a81dd888a05", "e900af5f440b943a7a46134a5afe7a81dd888a05" ]
[ "code/utils/general.py", "code/utils/plots.py", "code/training/exp_storage.py" ]
[ "import os\nfrom glob import glob\nfrom pathlib import Path\n\nimport torch\n\n\n_root_dir = os.path.dirname(os.path.abspath(__file__))\nROOT_PATH = Path(_root_dir).parent.parent\n\ndef mkdir_ifnotexists(directory):\n if not os.path.exists(directory):\n os.mkdir(directory)\n\ndef get_class(kls):\n parts = kls.split('.')\n module = \".\".join(parts[:-1])\n m = __import__(module)\n for comp in parts[1:]:\n m = getattr(m, comp)\n return m\n\ndef glob_imgs(path):\n imgs = []\n for ext in ['*.png', '*.jpg', '*.JPEG', '*.JPG']:\n imgs.extend(glob(os.path.join(path, ext)))\n return imgs\n\ndef split_input(model_input, total_pixels):\n '''\n Split the input to fit Cuda memory for large resolution.\n Can decrease the value of n_pixels in case of cuda out of memory error.\n '''\n n_pixels = 10000\n split = []\n for i, indx in enumerate(torch.split(torch.arange(total_pixels).cuda(), n_pixels, dim=0)):\n data = model_input.copy()\n data['uv'] = torch.index_select(model_input['uv'], 1, indx)\n data['object_mask'] = torch.index_select(model_input['object_mask'], 1, indx)\n split.append(data)\n return split\n\ndef merge_output(res, total_pixels, batch_size):\n ''' Merge the split output. '''\n\n model_outputs = {}\n for entry in res[0]:\n if res[0][entry] is None:\n continue\n if len(res[0][entry].shape) == 1:\n model_outputs[entry] = torch.cat([r[entry].reshape(batch_size, -1, 1) for r in res],\n 1).reshape(batch_size * total_pixels)\n else:\n model_outputs[entry] = torch.cat([r[entry].reshape(batch_size, -1, r[entry].shape[-1]) for r in res],\n 1).reshape(batch_size * total_pixels, -1)\n\n return model_outputs", "import plotly.graph_objs as go\nimport plotly.offline as offline\nimport numpy as np\nimport torch\nfrom skimage import measure\nimport torchvision\nimport trimesh\nfrom PIL import Image\nfrom utils import rend_util\n\n\ndef plot(model, indices, model_outputs, pose, rgb_gt, path, epoch, img_res, plot_nimgs, max_depth, resolution, lat_vec,\n hypo_params):\n # arrange data to plot\n batch_size, num_samples, _ = rgb_gt.shape\n\n network_object_mask = model_outputs['network_object_mask']\n points = model_outputs['points'].reshape(batch_size, num_samples, 3)\n rgb_eval = model_outputs['rgb_values']\n rgb_eval = rgb_eval.reshape(batch_size, num_samples, 3)\n\n depth = torch.ones(batch_size * num_samples).cuda().float() * max_depth\n depth[network_object_mask] = rend_util.get_depth(points, pose).reshape(-1)[network_object_mask]\n depth = depth.reshape(batch_size, num_samples, 1)\n network_object_mask = network_object_mask.reshape(batch_size, -1)\n\n cam_loc, cam_dir = rend_util.get_camera_for_plot(pose)\n\n # plot rendered images\n plot_images(rgb_eval, rgb_gt, path, epoch, plot_nimgs, img_res)\n\n # plot depth maps\n plot_depth_maps(depth, path, epoch, plot_nimgs, img_res)\n\n data = []\n\n def sdf_plot(x):\n return model.implicit_network(x, hypo_params, lat_vec, model.should_deform)[:, 0]\n\n # plot surface\n surface_traces = get_surface_trace(path=path,\n epoch=epoch,\n sdf=sdf_plot,\n resolution=resolution\n )\n try:\n data.append(surface_traces[0])\n except TypeError:\n return\n\n # plot cameras locations\n for i, loc, dir in zip(indices, cam_loc, cam_dir):\n data.append(get_3D_quiver_trace(loc.unsqueeze(0), dir.unsqueeze(0), name='camera_{0}'.format(i)))\n\n for i, p, m in zip(indices, points, network_object_mask):\n p = p[m]\n sampling_idx = torch.randperm(p.shape[0])[:2048]\n p = p[sampling_idx, :]\n\n val = model.implicit_network(p, hypo_params, lat_vec, model.should_deform)\n caption = [\"sdf: {0} \".format(v[0].item()) for v in val]\n\n data.append(get_3D_scatter_trace(p.detach(), name='intersection_points_{0}'.format(i), caption=caption))\n\n fig = go.Figure(data=data)\n scene_dict = dict(xaxis=dict(range=[-3, 3], autorange=False),\n yaxis=dict(range=[-3, 3], autorange=False),\n zaxis=dict(range=[-3, 3], autorange=False),\n aspectratio=dict(x=1, y=1, z=1))\n fig.update_layout(scene=scene_dict, width=1400, height=1400, showlegend=True)\n filename = '{0}/surface_{1}.html'.format(path, epoch)\n offline.plot(fig, filename=filename, auto_open=False)\n\n\ndef get_3D_scatter_trace(points, name='', size=3, caption=None):\n assert points.shape[1] == 3, \"3d scatter plot input points are not correctely shaped \"\n assert len(points.shape) == 2, \"3d scatter plot input points are not correctely shaped \"\n\n trace = go.Scatter3d(\n x=points[:, 0].cpu(),\n y=points[:, 1].cpu(),\n z=points[:, 2].cpu(),\n mode='markers',\n name=name,\n marker=dict(\n size=size,\n line=dict(\n width=2,\n ),\n opacity=1.0,\n ), text=caption)\n\n return trace\n\n\ndef get_3D_quiver_trace(points, directions, color='#bd1540', name=''):\n assert points.shape[1] == 3, \"3d cone plot input points are not correctely shaped \"\n assert len(points.shape) == 2, \"3d cone plot input points are not correctely shaped \"\n assert directions.shape[1] == 3, \"3d cone plot input directions are not correctely shaped \"\n assert len(directions.shape) == 2, \"3d cone plot input directions are not correctely shaped \"\n\n trace = go.Cone(\n name=name,\n x=points[:, 0].cpu(),\n y=points[:, 1].cpu(),\n z=points[:, 2].cpu(),\n u=directions[:, 0].cpu(),\n v=directions[:, 1].cpu(),\n w=directions[:, 2].cpu(),\n sizemode='absolute',\n sizeref=0.125,\n showscale=False,\n colorscale=[[0, color], [1, color]],\n anchor=\"tail\"\n )\n\n return trace\n\n\ndef get_surface_trace(path, epoch, sdf, resolution=100, return_mesh=False):\n grid = get_grid_uniform(resolution)\n points = grid['grid_points']\n\n z = []\n for i, pnts in enumerate(torch.split(points, 100000, dim=0)):\n z.append(sdf(pnts).detach().cpu().numpy())\n z = np.concatenate(z, axis=0)\n\n if (not (np.min(z) > 0 or np.max(z) < 0)):\n\n z = z.astype(np.float32)\n\n try:\n verts, faces, normals, values = measure.marching_cubes_lewiner(\n volume=z.reshape(grid['xyz'][1].shape[0], grid['xyz'][0].shape[0],\n grid['xyz'][2].shape[0]).transpose([1, 0, 2]),\n level=0,\n spacing=(grid['xyz'][0][2] - grid['xyz'][0][1],\n grid['xyz'][0][2] - grid['xyz'][0][1],\n grid['xyz'][0][2] - grid['xyz'][0][1]))\n except RuntimeError:\n return None\n\n verts = verts + np.array([grid['xyz'][0][0], grid['xyz'][1][0], grid['xyz'][2][0]])\n\n I, J, K = faces.transpose()\n\n traces = [go.Mesh3d(x=verts[:, 0], y=verts[:, 1], z=verts[:, 2],\n i=I, j=J, k=K, name='implicit_surface',\n opacity=1.0)]\n\n meshexport = trimesh.Trimesh(verts, faces, normals)\n meshexport.export('{0}/surface_{1}.ply'.format(path, epoch), 'ply')\n\n if return_mesh:\n return meshexport\n return traces\n return None\n\n\ndef get_surface_high_res_mesh(sdf, resolution=100):\n # get low res mesh to sample point cloud\n grid = get_grid_uniform(100)\n z = []\n points = grid['grid_points']\n\n for i, pnts in enumerate(torch.split(points, 100000, dim=0)):\n z.append(sdf(pnts).detach().cpu().numpy())\n z = np.concatenate(z, axis=0)\n\n z = z.astype(np.float32)\n\n verts, faces, normals, values = measure.marching_cubes_lewiner(\n volume=z.reshape(grid['xyz'][1].shape[0], grid['xyz'][0].shape[0],\n grid['xyz'][2].shape[0]).transpose([1, 0, 2]),\n level=0,\n spacing=(grid['xyz'][0][2] - grid['xyz'][0][1],\n grid['xyz'][0][2] - grid['xyz'][0][1],\n grid['xyz'][0][2] - grid['xyz'][0][1]))\n\n verts = verts + np.array([grid['xyz'][0][0], grid['xyz'][1][0], grid['xyz'][2][0]])\n\n mesh_low_res = trimesh.Trimesh(verts, faces, normals)\n components = mesh_low_res.split(only_watertight=False)\n areas = np.array([c.area for c in components], dtype=np.float)\n mesh_low_res = components[areas.argmax()]\n\n recon_pc = trimesh.sample.sample_surface(mesh_low_res, 10000)[0]\n recon_pc = torch.from_numpy(recon_pc).float().cuda()\n\n # Center and align the recon pc\n s_mean = recon_pc.mean(dim=0)\n s_cov = recon_pc - s_mean\n s_cov = torch.mm(s_cov.transpose(0, 1), s_cov)\n vecs = torch.eig(s_cov, True)[1].transpose(0, 1)\n if torch.det(vecs) < 0:\n vecs = torch.mm(torch.tensor([[1, 0, 0], [0, 0, 1], [0, 1, 0]]).cuda().float(), vecs)\n helper = torch.bmm(vecs.unsqueeze(0).repeat(recon_pc.shape[0], 1, 1),\n (recon_pc - s_mean).unsqueeze(-1)).squeeze()\n\n grid_aligned = get_grid(helper.cpu(), resolution)\n\n grid_points = grid_aligned['grid_points']\n\n g = []\n for i, pnts in enumerate(torch.split(grid_points, 100000, dim=0)):\n g.append(torch.bmm(vecs.unsqueeze(0).repeat(pnts.shape[0], 1, 1).transpose(1, 2),\n pnts.unsqueeze(-1)).squeeze() + s_mean)\n grid_points = torch.cat(g, dim=0)\n\n # MC to new grid\n points = grid_points\n z = []\n for i, pnts in enumerate(torch.split(points, 100000, dim=0)):\n z.append(sdf(pnts).detach().cpu().numpy())\n z = np.concatenate(z, axis=0)\n\n meshexport = None\n if (not (np.min(z) > 0 or np.max(z) < 0)):\n z = z.astype(np.float32)\n\n verts, faces, normals, values = measure.marching_cubes_lewiner(\n volume=z.reshape(grid_aligned['xyz'][1].shape[0], grid_aligned['xyz'][0].shape[0],\n grid_aligned['xyz'][2].shape[0]).transpose([1, 0, 2]),\n level=0,\n spacing=(grid_aligned['xyz'][0][2] - grid_aligned['xyz'][0][1],\n grid_aligned['xyz'][0][2] - grid_aligned['xyz'][0][1],\n grid_aligned['xyz'][0][2] - grid_aligned['xyz'][0][1]))\n\n verts = torch.from_numpy(verts).cuda().float()\n verts = torch.bmm(vecs.unsqueeze(0).repeat(verts.shape[0], 1, 1).transpose(1, 2),\n verts.unsqueeze(-1)).squeeze()\n verts = (verts + grid_points[0]).cpu().numpy()\n\n meshexport = trimesh.Trimesh(verts, faces, normals)\n\n return meshexport\n\n\ndef get_grid_uniform(resolution):\n x = np.linspace(-1.0, 1.0, resolution)\n y = x\n z = x\n\n xx, yy, zz = np.meshgrid(x, y, z)\n grid_points = torch.tensor(np.vstack([xx.ravel(), yy.ravel(), zz.ravel()]).T, dtype=torch.float)\n\n return {\"grid_points\": grid_points.cuda(),\n \"shortest_axis_length\": 2.0,\n \"xyz\": [x, y, z],\n \"shortest_axis_index\": 0}\n\n\ndef get_grid(points, resolution):\n eps = 0.2\n input_min = torch.min(points, dim=0)[0].squeeze().numpy()\n input_max = torch.max(points, dim=0)[0].squeeze().numpy()\n\n bounding_box = input_max - input_min\n shortest_axis = np.argmin(bounding_box)\n if (shortest_axis == 0):\n x = np.linspace(input_min[shortest_axis] - eps,\n input_max[shortest_axis] + eps, resolution)\n length = np.max(x) - np.min(x)\n y = np.arange(input_min[1] - eps, input_max[1] + length / (x.shape[0] - 1) + eps, length / (x.shape[0] - 1))\n z = np.arange(input_min[2] - eps, input_max[2] + length / (x.shape[0] - 1) + eps, length / (x.shape[0] - 1))\n elif (shortest_axis == 1):\n y = np.linspace(input_min[shortest_axis] - eps,\n input_max[shortest_axis] + eps, resolution)\n length = np.max(y) - np.min(y)\n x = np.arange(input_min[0] - eps, input_max[0] + length / (y.shape[0] - 1) + eps, length / (y.shape[0] - 1))\n z = np.arange(input_min[2] - eps, input_max[2] + length / (y.shape[0] - 1) + eps, length / (y.shape[0] - 1))\n elif (shortest_axis == 2):\n z = np.linspace(input_min[shortest_axis] - eps,\n input_max[shortest_axis] + eps, resolution)\n length = np.max(z) - np.min(z)\n x = np.arange(input_min[0] - eps, input_max[0] + length / (z.shape[0] - 1) + eps, length / (z.shape[0] - 1))\n y = np.arange(input_min[1] - eps, input_max[1] + length / (z.shape[0] - 1) + eps, length / (z.shape[0] - 1))\n\n xx, yy, zz = np.meshgrid(x, y, z)\n grid_points = torch.tensor(np.vstack([xx.ravel(), yy.ravel(), zz.ravel()]).T, dtype=torch.float).cuda()\n return {\"grid_points\": grid_points,\n \"shortest_axis_length\": length,\n \"xyz\": [x, y, z],\n \"shortest_axis_index\": shortest_axis}\n\n\ndef plot_depth_maps(depth_maps, path, epoch, plot_nrow, img_res):\n depth_maps_plot = lin2img(depth_maps, img_res)\n\n tensor = torchvision.utils.make_grid(depth_maps_plot.repeat(1, 3, 1, 1),\n scale_each=True,\n normalize=True,\n nrow=plot_nrow).cpu().detach().numpy()\n tensor = tensor.transpose(1, 2, 0)\n scale_factor = 255\n tensor = (tensor * scale_factor).astype(np.uint8)\n\n img = Image.fromarray(tensor)\n img.save('{0}/depth_{1}.png'.format(path, epoch))\n\n\ndef plot_images(rgb_points, ground_true, path, epoch, plot_nrow, img_res):\n ground_true = (ground_true.cuda() + 1.) / 2.\n rgb_points = (rgb_points + 1.) / 2.\n\n output_vs_gt = torch.cat((rgb_points, ground_true), dim=0)\n output_vs_gt_plot = lin2img(output_vs_gt, img_res)\n\n tensor = torchvision.utils.make_grid(output_vs_gt_plot,\n scale_each=False,\n normalize=False,\n nrow=plot_nrow).cpu().detach().numpy()\n\n tensor = tensor.transpose(1, 2, 0)\n scale_factor = 255\n tensor = (tensor * scale_factor).astype(np.uint8)\n\n img = Image.fromarray(tensor)\n img.save('{0}/rendering_{1}.png'.format(path, epoch))\n\n\ndef lin2img(tensor, img_res):\n batch_size, num_samples, channels = tensor.shape\n return tensor.permute(0, 2, 1).view(batch_size, channels, img_res[0], img_res[1])\n", "import pickle\nfrom abc import ABC, abstractmethod\nfrom pathlib import Path\nfrom typing import Any, List, Union, Dict\n\nimport pandas as pd\nimport torch\n\n\nclass DataCache(ABC):\n def __init__(self):\n self._entries: Dict[str, List[Any]] = dict()\n\n def add_entry(self, key: str, val: Any):\n if key not in self._entries:\n self._entries[key] = list()\n self._entries[key].append(val)\n\n @abstractmethod\n def peek_all(self):\n pass\n\n def pop_all(self) -> Dict[str, Any]:\n out = self.peek_all()\n self._entries = dict()\n return out\n\n\nclass TensorStackCache(DataCache):\n def __init__(self, cuda=True):\n self._stack_fn = lambda v: torch.stack(v)\n if cuda:\n self._stack_fn = lambda v: torch.stack(v).cuda()\n super().__init__()\n\n def add_entry(self, key: str, val: torch.Tensor):\n assert isinstance(val, torch.Tensor)\n super().add_entry(key, val)\n\n def peek_all(self) -> Dict[str, torch.Tensor]:\n return {k: self._stack_fn(v) for k, v in self._entries.items()}\n\n\nclass AvgCache(DataCache):\n def add_entry(self, key: str, val: Union[float, int, torch.Tensor]):\n assert isinstance(val, float) or isinstance(val, int) or isinstance(val, torch.Tensor)\n super().add_entry(key, val)\n\n def peek_all(self):\n return {k: sum(v) / len(v) for k, v in self._entries.items()}\n\nclass ExpStorage:\n def __init__(self, out_location: Path, entries=None):\n self.loc: Path = out_location\n self._entries = dict() if entries is None else entries\n self.metadata = dict()\n self._cache = AvgCache()\n\n @classmethod\n def load(cls, load_path: Path):\n with open(str(load_path), \"rb\") as handle:\n return cls(load_path, pickle.load(handle))\n\n @property\n def keys(self):\n return list(self._entries.keys())\n\n def cache(self, key: str, val: float):\n self._cache.add_entry(key, val)\n\n def store(self, key: str, entry: Any):\n if key not in self._entries:\n self._entries[key] = list()\n self._entries[key].append(entry)\n\n def get_all(self, *keys) -> Union[List[Any], Dict[str, List[Any]]]:\n assert len(keys) > 0\n assert all([key in self._entries for key in keys])\n if len(keys) == 1:\n return self._entries[keys[0]]\n return {key: self._entries[key] for key in keys}\n\n def get_latest(self, *keys):\n if len(keys) == 1:\n return self.get_all(*keys)[-1]\n return {k: v[-1] for k, v in self.get_all(*keys).items()}\n\n def get_latest_with_default(self, key, default=None):\n if key in self.keys:\n return self._entries[key][-1]\n return default\n\n def to_df(self, *keys) -> pd.DataFrame:\n return pd.DataFrame.from_dict({k: self._entries[k] for k in keys})\n\n def delete_after(self, idx: int, *keys):\n assert all([key in self._entries for key in keys])\n for key in keys:\n self._entries[key] = self._entries[key][:idx]\n\n def pop_cache(self):\n for k, v in self._cache.pop_all().items():\n self.store(k, v)\n\n def peek_cache(self):\n for k, v in self._cache.peek_all().items():\n self.store(k, v)\n\n def change_path(self, new_path: Path):\n self.loc = new_path\n\n def save(self):\n with open(str(self.loc), \"wb\") as handle:\n pickle.dump(self._entries, handle)\n" ]
[ [ "torch.index_select", "torch.arange" ], [ "torch.ones", "torch.max", "numpy.linspace", "torch.cat", "torch.randperm", "torch.det", "numpy.arange", "numpy.min", "torch.min", "torch.from_numpy", "torch.tensor", "numpy.concatenate", "numpy.max", "numpy.argmin", "torch.eig", "torch.split", "numpy.array", "numpy.meshgrid" ], [ "torch.stack", "pandas.DataFrame.from_dict" ] ]
usimarit/selfattention-segan
[ "563a86e825f1e4067ec1fd3bed36e89e11434388" ]
[ "sasegan/datasets/test_dataset.py" ]
[ "# Copyright 2020 Huy Le Nguyen (@usimarit)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport tensorflow as tf\n\nfrom tensorflow_asr.featurizers.speech_featurizers import read_raw_audio\n\nfrom .train_dataset import SeganAugTrainDataset, SeganTrainDataset\nfrom ..featurizers.speech_featurizer import SpeechFeaturizer\n\n\nclass SeganAugTestDataset(SeganAugTrainDataset):\n def __init__(self,\n speech_featurizer: SpeechFeaturizer,\n clean_dir: str,\n noises_config: dict):\n super(SeganAugTestDataset, self).__init__(\n stage=\"test\", speech_featurizer=speech_featurizer, clean_dir=clean_dir, noises_config=noises_config)\n\n def parse(self, clean_wav):\n noisy_wav = self.noises.augment(clean_wav)\n noisy_slices = self.speech_featurizer.extract(noisy_wav)\n clean_slices = self.speech_featurizer.extract(clean_wav)\n return clean_slices, noisy_slices\n\n def create(self):\n def _gen_data():\n for clean_wav_path in self.data_paths:\n clean_wav = read_raw_audio(clean_wav_path, sample_rate=self.speech_featurizer.sample_rate)\n clean_slices, noisy_slices = self.parse(clean_wav)\n yield clean_wav_path, clean_slices, noisy_slices\n\n dataset = tf.data.Dataset.from_generator(\n _gen_data,\n output_types=(tf.string, tf.float32),\n output_shapes=(\n tf.TensorShape([]),\n tf.TensorShape([None, *self.speech_featurizer.shape]),\n tf.TensorShape([None, *self.speech_featurizer.shape])\n )\n )\n # Prefetch to improve speed of input length\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n return dataset\n\n\nclass SeganTestDataset(SeganTrainDataset):\n def __init__(self,\n speech_featurizer: SpeechFeaturizer,\n clean_dir: str,\n noisy_dir: str):\n super(SeganTestDataset, self).__init__(\n stage=\"test\", speech_featurizer=speech_featurizer, clean_dir=clean_dir, noisy_dir=noisy_dir)\n\n def parse(self, clean_wav, noisy_wav):\n clean_slices = self.speech_featurizer.extract(clean_wav)\n noisy_slices = self.speech_featurizer.extract(noisy_wav)\n return clean_slices, noisy_slices\n\n def create(self):\n def _gen_data():\n for clean_wav_path in self.data_paths:\n clean_wav = read_raw_audio(clean_wav_path, sample_rate=self.speech_featurizer.sample_rate)\n noisy_wav_path = clean_wav_path.replace(self.clean_dir, self.noisy_dir)\n noisy_wav = read_raw_audio(noisy_wav_path, sample_rate=self.speech_featurizer.sample_rate)\n clean_slices, noisy_slices = self.parse(clean_wav, noisy_wav)\n yield clean_wav_path, clean_slices, noisy_slices\n\n dataset = tf.data.Dataset.from_generator(\n _gen_data,\n output_types=(tf.string, tf.float32),\n output_shapes=(\n tf.TensorShape([]),\n tf.TensorShape([None, *self.speech_featurizer.shape]),\n tf.TensorShape([None, *self.speech_featurizer.shape])\n )\n )\n # Prefetch to improve speed of input length\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n return dataset\n" ]
[ [ "tensorflow.TensorShape" ] ]
mwussow/pytorch_geometric
[ "01c68f9b58c94d9efd1f6e39b9c85177aae521bb", "01c68f9b58c94d9efd1f6e39b9c85177aae521bb" ]
[ "test/nn/conv/test_sg_conv.py", "test/nn/glob/test_sort.py" ]
[ "import torch\nfrom torch_geometric.nn import SGConv\n\n\ndef test_sg_conv():\n in_channels, out_channels = (16, 32)\n edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])\n num_nodes = edge_index.max().item() + 1\n x = torch.randn((num_nodes, in_channels))\n\n conv = SGConv(in_channels, out_channels, K=10, cached=True)\n assert conv.__repr__() == 'SGConv(16, 32, K=10)'\n assert conv(x, edge_index).size() == (num_nodes, out_channels)\n assert conv(x, edge_index).size() == (num_nodes, out_channels)\n\n conv = SGConv(in_channels, out_channels, K=10, cached=False)\n assert conv.__repr__() == 'SGConv(16, 32, K=10)'\n assert conv(x, edge_index).size() == (num_nodes, out_channels)\n assert conv(x, edge_index).size() == (num_nodes, out_channels)\n", "import torch\nfrom torch_geometric.nn import global_sort_pool\n\n\ndef test_global_sort_pool():\n N_1, N_2 = 4, 6\n x = torch.randn(N_1 + N_2, 4)\n batch = torch.tensor([0 for _ in range(N_1)] + [1 for _ in range(N_2)])\n\n out = global_sort_pool(x, batch, k=5)\n assert out.size() == (2, 5 * 4)\n out = out.view(2, 5, 4)\n\n # Features are individually sorted.\n expected = torch.arange(4).view(1, 1, 4).expand_as(out)\n assert out.argsort(dim=2).tolist() == expected.tolist()\n\n # First graph output has been filled up with zeros.\n assert out[0, -1].tolist() == [0, 0, 0, 0]\n\n # Nodes are sorted.\n expected = 3 - torch.arange(4)\n assert out[0, :4, -1].argsort().tolist() == expected.tolist()\n\n expected = 4 - torch.arange(5)\n assert out[1, :, -1].argsort().tolist() == expected.tolist()\n\n\ndef test_global_sort_pool_smaller_than_k():\n N_1, N_2 = 4, 6\n x = torch.randn(N_1 + N_2, 4)\n batch = torch.tensor([0 for _ in range(N_1)] + [1 for _ in range(N_2)])\n\n # Set k which is bigger than both N_1=4 and N_2=6.\n out = global_sort_pool(x, batch, k=10)\n assert out.size() == (2, 10 * 4)\n out = out.view(2, 10, 4)\n\n # Features are individually sorted.\n expected = torch.arange(4).view(1, 1, 4).expand_as(out)\n assert out.argsort(dim=2).tolist() == expected.tolist()\n\n # Both graph outputs have been filled up with zeros.\n assert out[0, -1].tolist() == [0, 0, 0, 0]\n assert out[1, -1].tolist() == [0, 0, 0, 0]\n\n # Nodes are sorted.\n expected = 3 - torch.arange(4)\n assert out[0, :4, -1].argsort().tolist() == expected.tolist()\n\n expected = 5 - torch.arange(6)\n assert out[1, :6, -1].argsort().tolist() == expected.tolist()\n" ]
[ [ "torch.randn", "torch.tensor" ], [ "torch.randn", "torch.arange" ] ]
Hung-Jia-Jun/yolo_keras_camera_realtime
[ "d74ea9a95ed625337765f4fea9e6f8881ee0a9cf" ]
[ "test_yolo_2.py" ]
[ "#! /usr/bin/env python\n\"\"\"Run a YOLO_v2 style detection model on test images.\"\"\"\nimport argparse\nimport colorsys\nimport imghdr\nimport os\nimport random\n\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom PIL import Image, ImageDraw, ImageFont\n\nfrom yad2k.models.keras_yolo import yolo_eval, yolo_head\n\n# parser = argparse.ArgumentParser(\n# description='Run a YOLO_v2 style detection model on test images..')\n# parser.add_argument(\n# 'model_path',\n# help='path to h5 model file containing body of a YOLO_v2 model',\n# default = \"model_data\\\\yolo.h5\"\n# )\n# parser.add_argument(\n# '-a',\n# '--anchors_path',\n# help='path to anchors file, defaults to yolo_anchors.txt',\n# default='model_data/yolo_anchors.txt')\n# parser.add_argument(\n# '-c',\n# '--classes_path',\n# help='path to classes file, defaults to coco_classes.txt',\n# default='model_data/coco_classes.txt')\n# parser.add_argument(\n# '-t',\n# '--test_path',\n# help='path to directory of test images, defaults to images/',\n# default='images')\n# parser.add_argument(\n# '-o',\n# '--output_path',\n# help='path to output test images, defaults to images/out',\n# default='images\\\\out')\n# parser.add_argument(\n# '-s',\n# '--score_threshold',\n# type=float,\n# help='threshold for bounding box scores, default .3',\n# default=.3)\n# parser.add_argument(\n# '-iou',\n# '--iou_threshold',\n# type=float,\n# help='threshold for non max suppression IOU, default .5',\n# default=.5)\n\n\ndef _main():\n score_threshold = .3\n iou_threshold = .5\n model_path = \"model_data\\\\yolo.h5\"\n# model_path = os.path.expanduser(args.model_path)\n assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'\n \n anchors_path = \"model_data/yolo_anchors.txt\"\n# anchors_path = os.path.expanduser(args.anchors_path)\n\n# classes_path = os.path.expanduser(args.classes_path)\n classes_path = 'model_data/coco_classes.txt'\n# test_path = os.path.expanduser(args.test_path)\n test_path = 'images'\n# output_path = os.path.expanduser(args.output_path)\n output_path = \"images\\out\"\n if not os.path.exists(output_path):\n print('Creating output path {}'.format(output_path))\n os.mkdir(output_path)\n\n sess = K.get_session() # TODO: Remove dependence on Tensorflow session.\n\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n anchors = np.array(anchors).reshape(-1, 2)\n\n yolo_model = load_model(model_path)\n\n # Verify model, anchors, and classes are compatible\n num_classes = len(class_names)\n num_anchors = len(anchors)\n # TODO: Assumes dim ordering is channel last\n model_output_channels = yolo_model.layers[-1].output_shape[-1]\n assert model_output_channels == num_anchors * (num_classes + 5), \\\n 'Mismatch between model and given anchor and class sizes. ' \\\n 'Specify matching anchors and classes with --anchors_path and ' \\\n '--classes_path flags.'\n print('{} model, anchors, and classes loaded.'.format(model_path))\n\n # Check if model is fully convolutional, assuming channel last order.\n model_image_size = yolo_model.layers[0].input_shape[1:3]\n is_fixed_size = model_image_size != (None, None)\n\n # Generate colors for drawing bounding boxes.\n hsv_tuples = [(x / len(class_names), 1., 1.)\n for x in range(len(class_names))]\n colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n colors))\n random.seed(10101) # Fixed seed for consistent colors across runs.\n random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.\n random.seed(None) # Reset seed to default.\n\n # Generate output tensor targets for filtered bounding boxes.\n # TODO: Wrap these backend operations with Keras layers.\n yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))\n input_image_shape = K.placeholder(shape=(2, ))\n boxes, scores, classes = yolo_eval(\n yolo_outputs,\n input_image_shape,\n score_threshold=score_threshold,\n iou_threshold=iou_threshold)\n\n for image_file in os.listdir(test_path):\n # try:\n # image_type = imghdr.what(os.path.join(test_path, image_file))\n # if not image_type:\n # continue\n # except IsADirectoryError:\n # continue\n\n image = Image.open(os.path.join(test_path, image_file))\n if is_fixed_size: # TODO: When resizing we can use minibatch input.\n resized_image = image.resize(\n tuple(reversed(model_image_size)), Image.BICUBIC)\n image_data = np.array(resized_image, dtype='float32')\n else:\n # Due to skip connection + max pooling in YOLO_v2, inputs must have\n # width and height as multiples of 32.\n new_image_size = (image.width - (image.width % 32),\n image.height - (image.height % 32))\n resized_image = image.resize(new_image_size, Image.BICUBIC)\n image_data = np.array(resized_image, dtype='float32')\n print(image_data.shape)\n\n image_data /= 255.\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n\n out_boxes, out_scores, out_classes = sess.run(\n [boxes, scores, classes],\n feed_dict={\n yolo_model.input: image_data,\n input_image_shape: [image.size[1], image.size[0]],\n K.learning_phase(): 0\n })\n print('Found {} boxes for {}'.format(len(out_boxes), image_file))\n\n font = ImageFont.truetype(\n font='font/FiraMono-Medium.otf',\n size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\n thickness = (image.size[0] + image.size[1]) // 300\n\n for i, c in reversed(list(enumerate(out_classes))):\n predicted_class = class_names[c]\n box = out_boxes[i]\n score = out_scores[i]\n\n label = '{} {:.2f}'.format(predicted_class, score)\n\n draw = ImageDraw.Draw(image)\n label_size = draw.textsize(label, font)\n\n top, left, bottom, right = box\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n print(label, (left, top), (right, bottom))\n\n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n # My kingdom for a good redistributable image drawing library.\n for i in range(thickness):\n draw.rectangle(\n [left + i, top + i, right - i, bottom - i],\n outline=colors[c])\n draw.rectangle(\n [tuple(text_origin), tuple(text_origin + label_size)],\n fill=colors[c])\n draw.text(text_origin, label, fill=(0, 0, 0), font=font)\n del draw\n\n image.save(os.path.join(output_path, image_file), quality=90)\n sess.close()\n\n\nif __name__ == '__main__':\n _main()\n " ]
[ [ "numpy.array", "numpy.expand_dims", "numpy.floor" ] ]
leonmkim/gym-kuka-mujoco
[ "ed45ae74d10e69f4e51439de2d1d0c0811623b6b", "a8a40bb08a1a1a269a2386ca0d102d62d8384206" ]
[ "gym_kuka_mujoco/controllers/impedance_controller_v2.py", "examples/attic/sweep_learning.py" ]
[ "import os\n\nimport numpy as np\nfrom gym import spaces\nimport mujoco_py\n\nfrom gym_kuka_mujoco.envs.assets import kuka_asset_dir\nfrom gym_kuka_mujoco.utils.quaternion import identity_quat, subQuat, quatAdd, mat2Quat\nfrom gym_kuka_mujoco.utils.kinematics import forwardKinSite, forwardKinJacobianSite\nfrom .base_controller import BaseController\nfrom . import register_controller\nfrom gym_kuka_mujoco.utils.mujoco_utils import get_qpos_indices, get_qvel_indices, get_actuator_indices, get_joint_indices \n\n\nclass ImpedanceControllerV2(BaseController):\n '''\n An inverse dynamics controller that used PD gains to compute a desired acceleration.\n '''\n\n def __init__(self,\n sim,\n pos_scale=1.0,\n rot_scale=0.3,\n pos_limit=1.0,\n rot_limit=1.0,\n model_path='full_kuka_no_collision_no_gravity.xml',\n site_name='ee_site',\n stiffness=None,\n damping='auto',\n null_space_damping=1.0,\n controlled_joints=None,\n in_ee_frame=False):\n super(ImpedanceControllerV2, self).__init__(sim)\n\n # Create a model for control\n print('Controller model imported from: {}'.format(model_path))\n\n model_path = os.path.join(kuka_asset_dir(), model_path)\n self.model = mujoco_py.load_model_from_path(model_path)\n\n self.in_ee_frame = in_ee_frame\n\n # Construct the action space.\n high_pos = pos_limit*np.ones(3)\n low_pos = -high_pos\n\n high_rot = rot_limit*np.ones(3)\n low_rot = -high_rot\n\n high = np.concatenate((high_pos, high_rot))\n low = np.concatenate((low_pos, low_rot))\n self.action_space = spaces.Box(low, high, dtype=np.float32)\n\n # Controller parameters.\n self.scale = np.ones(6)\n self.scale[:3] *= pos_scale\n self.scale[3:6] *= rot_scale\n\n self.site_name = site_name\n self.pos_set = np.zeros(3)\n self.quat_set = identity_quat.copy()\n\n if stiffness is None:\n self.stiffness = np.array([1.0, 1.0, 1.0, 0.3, 0.3, 0.3])\n else:\n self.stiffness = np.ones(6)*stiffness\n\n if damping=='auto':\n self.damping = 2*np.sqrt(self.stiffness)\n else:\n self.damping = np.ones(6)*damping\n\n self.null_space_damping = null_space_damping\n\n # Get the position, velocity, and actuator indices for the model.\n if controlled_joints is not None:\n self.sim_qpos_idx = get_qpos_indices(sim.model, controlled_joints)\n self.sim_qvel_idx = get_qvel_indices(sim.model, controlled_joints)\n self.sim_actuators_idx = get_actuator_indices(sim.model, controlled_joints)\n self.sim_joint_idx = get_joint_indices(sim.model, controlled_joints)\n\n self.self_qpos_idx = get_qpos_indices(self.model, controlled_joints)\n self.self_qvel_idx = get_qvel_indices(self.model, controlled_joints)\n self.self_actuators_idx = get_actuator_indices(self.model, controlled_joints)\n else:\n assert self.model.nv == self.model.nu, \"if the number of degrees of freedom is different than the number of actuators you must specify the controlled_joints\"\n self.sim_qpos_idx = range(self.model.nq)\n self.sim_qvel_idx = range(self.model.nv)\n self.sim_actuators_idx = range(self.model.nu)\n self.sim_joint_idx = range(self.model.nu)\n\n self.self_qpos_idx = range(self.model.nq)\n self.self_qvel_idx = range(self.model.nv)\n self.self_actuators_idx = range(self.model.nu)\n\n def set_action(self, action):\n '''\n Set the setpoint.\n '''\n action = action * self.scale\n\n dx = action[0:3].astype(np.float64)\n dr = action[3:6].astype(np.float64)\n\n # print('dx is: {} and dr is: {}'.format(dx, dr) )\n\n pos, mat = forwardKinSite(self.sim, self.site_name, recompute=False)\n quat = mat2Quat(mat)\n \n if self.in_ee_frame:\n dx = mat.dot(dx)\n\n self.pos_set = pos + dx\n self.quat_set = quatAdd(quat, dr)\n\n # print('pos setpoint updated: {}'.format(self.pos_set))\n\n\n def get_torque(self):\n '''\n Update the impedance control setpoint and compute the torque.\n '''\n # Compute the pose difference.\n pos, mat = forwardKinSite(self.sim, self.site_name, recompute=False)\n quat = mat2Quat(mat)\n dx = self.pos_set - pos\n dr = subQuat(self.quat_set, quat) # Original\n dframe = np.concatenate((dx,dr))\n\n # Compute generalized forces from a virtual external force.\n jpos, jrot = forwardKinJacobianSite(self.sim, self.site_name, recompute=False)\n J = np.vstack((jpos[:,self.sim_qvel_idx], jrot[:,self.sim_qvel_idx]))\n cartesian_acc_des = self.stiffness*dframe - self.damping*J.dot(self.sim.data.qvel[self.sim_qvel_idx])\n impedance_acc_des = J.T.dot(np.linalg.solve(J.dot(J.T) + 1e-6*np.eye(6), cartesian_acc_des))\n\n # Add damping in the null space of the the Jacobian\n projection_matrix = J.T.dot(np.linalg.solve(J.dot(J.T), J))\n projection_matrix = np.eye(projection_matrix.shape[0]) - projection_matrix\n null_space_vel = projection_matrix.dot(self.sim.data.qvel[self.sim_qvel_idx])\n impedance_acc_des += -self.null_space_damping*null_space_vel # null space damping\n\n # Cancel other dynamics and add virtual damping using inverse dynamics.\n acc_des = np.zeros(self.sim.model.nv)\n acc_des[self.sim_qvel_idx] = impedance_acc_des\n self.sim.data.qacc[:] = acc_des\n mujoco_py.functions.mj_inverse(self.model, self.sim.data)\n id_torque = self.sim.data.qfrc_inverse[self.sim_actuators_idx].copy()\n \n return id_torque\n\nregister_controller(ImpedanceControllerV2, \"ImpedanceControllerV2\")", "import os \nfrom datetime import datetime\n\nimport numpy as np\n\nimport gym\nimport gym_kuka_mujoco\n\nfrom mpi4py import MPI\n\nfrom stable_baselines import PPO2\nfrom stable_baselines.common import set_global_seeds\nfrom stable_baselines.common.cmd_util import make_mujoco_env\nfrom stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv, VecNormalize\nfrom stable_baselines.common.policies import MlpPolicy, MlpLstmPolicy\n\ndef make_mujoco_env(env_id, seed, allow_early_resets=True):\n \"\"\"\n Create a wrapped, monitored gym.Env for MuJoCo.\n :param env_id: (str) the environment ID\n :param seed: (int) the inital seed for RNG\n :param allow_early_resets: (bool) allows early reset of the environment\n :return: (Gym Environment) The mujoco environment\n \"\"\"\n rank = MPI.COMM_WORLD.Get_rank()\n set_global_seeds(seed + 10000 * rank)\n env = gym.make(env_id)\n env.seed(seed)\n return env\n\nnorm_obs = True\nnorm_reward = False\n\nfor lr in np.logspace(-4.5,-2.5,3):\n # env_name = 'CartPole-v1'\n env_name = 'PegInsertionBigHole-v0'\n num_env = 8\n # num_env = 4\n\n description = 'lr={},norm_obs={},norm_reward={}'.format(lr,norm_obs,norm_reward)\n\n print(description)\n\n date, time = datetime.now().isoformat().split('T')\n tensorboard_logdir = os.path.join(\n os.environ['OPENAI_LOGDIR'],\n date,\n time,\n 'parameter_sweep')\n\n actor_options = {\n 'learning_rate': lr,\n 'gamma':1.,\n 'verbose':0,\n 'n_steps':100,\n 'ent_coef':0.,\n 'max_grad_norm':1e2,\n }\n\n description = ','.join(['{}={}'.format(k,v) for k, v in actor_options.items()])\n description += ',num_env={},norm_obs={},norm_reward={}'.format(num_env, norm_obs, norm_reward)\n\n learning_options = {\n 'total_timesteps': int(1e6)\n }\n\n # Wrap in a try statement to close the environment properly in case of keyboard interrupt.\n try:\n envs = [make_mujoco_env(env_name, 2) for _ in range(num_env)]\n # env = DummyVecEnv([lambda: env for env in envs])\n env = SubprocVecEnv([lambda: env for env in envs])\n env = VecNormalize(env, norm_obs=norm_obs, norm_reward=norm_reward)\n \n # Create the actor and learn\n actor_options['tensorboard_log'] = os.path.join(tensorboard_logdir,env_name)\n model = PPO2(MlpPolicy, env, **actor_options)\n # model = PPO2(MlpLstmPolicy, env, **actor_options)\n model.learn(**learning_options, tb_log_name=description)\n finally:\n env.close()" ]
[ [ "numpy.sqrt", "numpy.eye", "numpy.ones", "numpy.concatenate", "numpy.array", "numpy.zeros", "numpy.vstack" ], [ "numpy.logspace" ] ]
surisdi/DPC
[ "ce6fe25938c1bebb7f654d0c8f8479bf92ab4054", "ce6fe25938c1bebb7f654d0c8f8479bf92ab4054" ]
[ "utils/utils.py", "utils/pairwise_hyp_cone.py" ]
[ "import torch\nimport numpy as np\nimport os\nfrom datetime import datetime\nimport glob\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nfrom collections import deque\nfrom torchvision import transforms\n\n\ndef save_checkpoint(state, is_best=0, gap=1, filename='models/checkpoint.pth.tar', keep_all=False):\n torch.save(state, filename)\n last_epoch_path = os.path.join(os.path.dirname(filename),\n 'epoch%s.pth.tar' % str(state['epoch']-gap))\n if not keep_all:\n try: os.remove(last_epoch_path)\n except: pass\n if is_best:\n past_best = glob.glob(os.path.join(os.path.dirname(filename), 'model_best_*.pth.tar'))\n for i in past_best:\n try: os.remove(i)\n except: pass\n path_best = os.path.join(os.path.dirname(filename), 'model_best_epoch%s.pth.tar' % str(state['epoch']))\n torch.save(state, path_best)\n print(f'Updating best model: {path_best}')\n\n\ndef write_log(content, epoch, filename):\n if not os.path.exists(filename):\n log_file = open(filename, 'w')\n else:\n log_file = open(filename, 'a')\n log_file.write('## Epoch %d:\\n' % epoch)\n log_file.write('time: %s\\n' % str(datetime.now()))\n log_file.write(content + '\\n\\n')\n log_file.close()\n\n\ndef calc_topk_accuracy(output, target, topk=(1,)):\n '''\n Modified from: https://gist.github.com/agermanidis/275b23ad7a10ee89adccf021536bb97e\n Given predicted and ground truth labels, \n calculate top-k accuracies.\n '''\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(1 / batch_size))\n return res\n\n\ndef calc_accuracy(output, target):\n '''output: (B, N); target: (B)'''\n target = target.squeeze()\n _, pred = torch.max(output, 1)\n return torch.mean((pred == target).float())\n\n\ndef calc_accuracy_binary(output, target):\n '''output, target: (B, N), output is logits, before sigmoid '''\n pred = output > 0\n acc = torch.mean((pred == target.byte()).float())\n del pred, output, target\n return acc\n\n\ndef denorm(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):\n assert len(mean)==len(std)==3\n inv_mean = [-mean[i]/std[i] for i in range(3)]\n inv_std = [1/i for i in std]\n return transforms.Normalize(mean=inv_mean, std=inv_std)\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.local_history = deque([])\n self.local_avg = 0\n self.history = []\n self.dict = {} # save all data values here\n self.save_dict = {} # save mean and std here, for summary table\n self.avg_expanded = None\n\n def update(self, val, n=1, history=0, step=5):\n is_array = False\n if type(val) == torch.Tensor:\n if len(val.shape) > 0 and val.shape[0] > 1:\n is_array = True\n val = val.view(-1).cpu().data.detach().numpy()\n else:\n val = val.mean().item()\n elif type(val) == np.ndarray:\n if len(val.shape) > 0 and val.shape[0] > 1:\n is_array = True\n val = val.reshape(-1)\n elif type(val) == float:\n pass\n else:\n raise TypeError(f'{type(val)} type not supported in AverageMeter')\n\n if type(n) == torch.Tensor:\n n = n.float().mean().item()\n self.val = np.mean(val)\n self.sum += val * n\n self.count += n\n # self.avg = self.sum / self.count\n if is_array:\n self.avg_expanded = self.sum / self.count\n self.avg = self.avg_expanded.mean()\n else:\n self.avg = self.sum / self.count\n self.avg_expanded = np.array([self.avg])\n if history:\n self.history.append(val.mean())\n if step > 0:\n self.local_history.append(val)\n if len(self.local_history) > step:\n self.local_history.popleft()\n self.local_avg = np.average(self.local_history)\n\n def dict_update(self, val, key):\n if key in self.dict.keys():\n self.dict[key].append(val)\n else:\n self.dict[key] = [val]\n\n def __len__(self):\n return self.count\n\n\nclass AccuracyTable(object):\n '''compute accuracy for each class'''\n def __init__(self):\n self.dict = {}\n\n def update(self, pred, tar):\n pred = torch.squeeze(pred)\n tar = torch.squeeze(tar)\n for i, j in zip(pred, tar):\n i = int(i)\n j = int(j)\n if j not in self.dict.keys():\n self.dict[j] = {'count':0,'correct':0}\n self.dict[j]['count'] += 1\n if i == j:\n self.dict[j]['correct'] += 1\n\n def print_table(self, label):\n for key in self.dict.keys():\n acc = self.dict[key]['correct'] / self.dict[key]['count']\n print('%s: %2d, accuracy: %3d/%3d = %0.6f' \\\n % (label, key, self.dict[key]['correct'], self.dict[key]['count'], acc))\n\n\ndef neq_load_customized(args, model, pretrained_dict,\n parts=['backbone', 'agg', 'network_pred', 'hyperbolic_linear', 'network-class'],\n size_diff=False):\n '''\n load pre-trained model in a not-equal way, when new model has been partially modified\n size_diff: some parameters may have the same name but different size. Cannot load these, but do not throw error, and\n load all the rest\n '''\n model_dict = model.state_dict()\n tmp = {}\n print_r(args, '\\n=======Check Weights Loading======')\n print_r(args, ('loading the following parts:', ', '.join(parts)))\n if parts == 'all':\n if size_diff:\n for k, v in pretrained_dict.items():\n if k in model.state_dict() and model.state_dict()[k].shape == v.shape:\n tmp[k] = v\n else:\n print_r(args, f'{k} not loaded')\n else:\n tmp = pretrained_dict\n else:\n for part in parts:\n print_r(args, ('loading:', part))\n print_r(args, '\\n=======Check Weights Loading======')\n print_r(args, 'Weights not used from pretrained file:')\n for k, v in pretrained_dict.items():\n if part in k:\n if k in model_dict:\n if not (size_diff and model.state_dict()[k].shape != v.shape):\n tmp[k] = v\n else:\n print_r(args, k)\n print_r(args, '---------------------------')\n print_r(args, 'Weights not loaded into new model:')\n for k, v in model_dict.items():\n if part in k:\n if k not in pretrained_dict:\n print_r(args, k)\n print_r(args, '===================================\\n')\n\n del pretrained_dict\n if 'time_index.weight' in tmp and \\\n 'time_index' in [a[0].split('.')[0] for a in list(model.named_parameters())] and \\\n model.time_index.weight.shape[0] < tmp['time_index.weight'].shape[0]:\n tmp['time_index.weight'].data = tmp['time_index.weight'][:model.time_index.weight.shape[0]].data\n model.load_state_dict(tmp, strict=False)\n return model\n\n\ndef print_r(args, text, print_no_verbose=False):\n \"\"\" Print only when the local rank is <=0 (only once)\"\"\"\n if args.local_rank <= 0 and (args.verbose or print_no_verbose):\n if type(text) == tuple:\n print(*text)\n else:\n print(text)\n \n \nclass ConfusionMeter(object):\n '''compute and show confusion matrix'''\n def __init__(self, num_class):\n self.num_class = num_class\n self.mat = np.zeros((num_class, num_class))\n self.precision = []\n self.recall = []\n\n def update(self, pred, tar):\n pred, tar = pred.cpu().numpy(), tar.cpu().numpy()\n pred = np.squeeze(pred)\n tar = np.squeeze(tar)\n for p,t in zip(pred.flat, tar.flat):\n self.mat[p][t] += 1\n\n def print_mat(self):\n print('Confusion Matrix: (target in columns)')\n print(self.mat)\n\n def plot_mat(self, path, dictionary=None, annotate=False):\n plt.figure(dpi=600)\n plt.imshow(self.mat,\n cmap=plt.cm.jet,\n interpolation=None,\n extent=(0.5, np.shape(self.mat)[0]+0.5, np.shape(self.mat)[1]+0.5, 0.5))\n width, height = self.mat.shape\n if annotate:\n for x in range(width):\n for y in range(height):\n plt.annotate(str(int(self.mat[x][y])), xy=(y+1, x+1),\n horizontalalignment='center',\n verticalalignment='center',\n fontsize=8)\n\n if dictionary is not None:\n plt.xticks([i+1 for i in range(width)],\n [dictionary[i] for i in range(width)],\n rotation='vertical')\n plt.yticks([i+1 for i in range(height)],\n [dictionary[i] for i in range(height)])\n plt.xlabel('Ground Truth')\n plt.ylabel('Prediction')\n plt.colorbar()\n plt.tight_layout()\n plt.savefig(path, format='svg')\n plt.clf()\n\n # for i in range(width):\n # if np.sum(self.mat[i,:]) != 0:\n # self.precision.append(self.mat[i,i] / np.sum(self.mat[i,:]))\n # if np.sum(self.mat[:,i]) != 0:\n # self.recall.append(self.mat[i,i] / np.sum(self.mat[:,i]))\n # print('Average Precision: %0.4f' % np.mean(self.precision))\n # print('Average Recall: %0.4f' % np.mean(self.recall))\n\n\n\n\n", "import torch\nimport numpy as np\n\nclass PairwiseHypConeDist():\n def __init__(self, K=0.1, fp64_hyper=True):\n self.K = K\n self.fp64_hyper = fp64_hyper\n def __call__(self, x, y):\n '''\n scale up embedding if it's smaller than the threshold radius K\n \n Note: this step potentially contains a lot of in-place operation,\n which is not legal for torch.autograd. Need to make clone of\n the variable every step of the way\n '''\n N_pred, D = x.shape\n N_gt, D = y.shape\n \n # scaling up x when it's too small\n x_norm = torch.norm(x, p=2, dim=-1)\n x_small = x.transpose(dim0=-1, dim1=-2)\n scale_factor = ((0.1 + 1e-7) / x_norm)\n x_small = (x_small * scale_factor).clone()\n x = torch.where(x_norm < (0.1 + 1e-7), x_small, x.transpose(dim0=-1, dim1=-2)).transpose(dim0=-1, dim1=-2)\n \n # neccessary components\n x_square = self.square_norm(x).unsqueeze(dim=1).expand(N_pred, N_gt)\n y_square = self.square_norm(y).unsqueeze(dim=0).expand(N_pred, N_gt)\n x_norm = torch.sqrt(x_square)\n xy_square = self.pairwise_distances(x, y)\n xy_norm = torch.sqrt(xy_square)\n xy_prod = self.pairwise_mul(x, y)\n \n # Xi\n num = xy_prod * (1 + x_square) - x_square * (1 + y_square)\n denom = x_norm * xy_norm * torch.sqrt(1 + x_square * y_square - 2.0 * xy_prod)\n Xi = torch.acos(num / denom)\n\n # Phi\n Phi = torch.asin(self.K * (1 - x_square) / x_norm)\n \n return Xi - Phi\n \n def square_norm(self, x):\n \"\"\"\n Helper function returning square of the euclidean norm.\n Also here we clamp it since it really likes to die to zero.\n \"\"\"\n norm = torch.norm(x,dim=-1,p=2)**2\n return torch.clamp(norm, min=0.0)\n\n def pairwise_mul(self, x, y):\n \"\"\"\n Helper function returning pairwise vector product.\n Also here we clamp it since it really likes to die to zero.\n \"\"\"\n y_t = torch.transpose(y, 0, 1)\n prod = torch.mm(x, y_t)\n return prod\n\n def pairwise_distances(self, x, y=None):\n '''\n Input: x is a Nxd matrix\n y is an optional Mxd matirx\n Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]\n if y is not given then use 'y=x'.\n i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2\n '''\n x_norm = (x**2).sum(1).view(-1, 1)\n if y is not None:\n y_t = torch.transpose(y, 0, 1)\n y_norm = (y**2).sum(1).view(1, -1)\n else:\n y_t = torch.transpose(x, 0, 1)\n y_norm = x_norm.view(1, -1)\n\n dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)\n # Ensure diagonal is zero if x=y\n # if y is None:\n # dist = dist - torch.diag(dist.diag)\n return torch.clamp(dist, 0.0, np.inf)" ]
[ [ "matplotlib.pyplot.tight_layout", "torch.max", "matplotlib.pyplot.switch_backend", "numpy.squeeze", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.clf", "numpy.mean", "numpy.shape", "numpy.average", "matplotlib.pyplot.xlabel", "numpy.array", "numpy.zeros", "torch.squeeze", "torch.save" ], [ "torch.norm", "torch.transpose", "torch.mm", "torch.sqrt", "torch.acos", "torch.asin", "torch.clamp" ] ]
JunweiPan3013/featuretools
[ "b0c8478f9bf8f46217726e3a32de51e083d98351" ]
[ "featuretools/computational_backends/pandas_backend.py" ]
[ "import cProfile\nimport logging\nimport os\nimport pstats\nimport sys\nimport warnings\nfrom datetime import datetime\n\nimport numpy as np\nimport pandas as pd\nimport pandas.api.types as pdtypes\nfrom future import standard_library\n\nfrom .base_backend import ComputationalBackend\nfrom .feature_tree import FeatureTree\n\nfrom featuretools import variable_types\nfrom featuretools.entityset.relationship import Relationship\nfrom featuretools.exceptions import UnknownFeature\nfrom featuretools.primitives import (\n AggregationPrimitive,\n DirectFeature,\n IdentityFeature,\n TransformPrimitive\n)\nfrom featuretools.utils.gen_utils import make_tqdm_iterator\n\nstandard_library.install_aliases()\nwarnings.simplefilter('ignore', np.RankWarning)\nwarnings.simplefilter(\"ignore\", category=RuntimeWarning)\nlogger = logging.getLogger('featuretools.computational_backend')\nROOT_DIR = os.path.expanduser(\"~\")\n\n\nclass PandasBackend(ComputationalBackend):\n\n def __init__(self, entityset, features):\n assert len(set(f.entity.id for f in features)) == 1, \\\n \"Features must all be defined on the same entity\"\n\n self.entityset = entityset\n self.target_eid = features[0].entity.id\n self.features = features\n self.feature_tree = FeatureTree(entityset, features)\n\n def __sizeof__(self):\n return self.entityset.__sizeof__()\n\n def calculate_all_features(self, instance_ids, time_last,\n training_window=None, profile=False,\n precalculated_features=None, ignored=None,\n verbose=False):\n \"\"\"\n Given a list of instance ids and features with a shared time window,\n generate and return a mapping of instance -> feature values.\n\n Args:\n instance_ids (list): List of instance id for which to build features.\n\n time_last (pd.Timestamp): Last allowed time. Data from exactly this\n time not allowed.\n\n training_window (Timedelta, optional): Data older than\n time_last by more than this will be ignored.\n\n profile (bool): Enable profiler if True.\n\n verbose (bool): Print output progress if True.\n\n Returns:\n pd.DataFrame : Pandas DataFrame of calculated feature values.\n Indexed by instance_ids. Columns in same order as features\n passed in.\n\n \"\"\"\n assert len(instance_ids) > 0, \"0 instance ids provided\"\n self.instance_ids = instance_ids\n\n self.time_last = time_last\n if self.time_last is None:\n self.time_last = datetime.now()\n\n # For debugging\n if profile:\n pr = cProfile.Profile()\n pr.enable()\n\n if precalculated_features is None:\n precalculated_features = {}\n # Access the index to get the filtered data we need\n target_entity = self.entityset[self.target_eid]\n if ignored:\n # TODO: Just want to remove entities if don't have any (sub)features defined\n # on them anymore, rather than recreating\n ordered_entities = FeatureTree(self.entityset, self.features, ignored=ignored).ordered_entities\n else:\n ordered_entities = self.feature_tree.ordered_entities\n\n necessary_columns = self.feature_tree.necessary_columns\n eframes_by_filter = \\\n self.entityset.get_pandas_data_slice(filter_entity_ids=ordered_entities,\n index_eid=self.target_eid,\n instances=instance_ids,\n entity_columns=necessary_columns,\n time_last=time_last,\n training_window=training_window,\n verbose=verbose)\n large_eframes_by_filter = None\n if any([f.uses_full_entity for f in self.feature_tree.all_features]):\n large_necessary_columns = self.feature_tree.necessary_columns_for_all_values_features\n large_eframes_by_filter = \\\n self.entityset.get_pandas_data_slice(filter_entity_ids=ordered_entities,\n index_eid=self.target_eid,\n instances=None,\n entity_columns=large_necessary_columns,\n time_last=time_last,\n training_window=training_window,\n verbose=verbose)\n\n # Handle an empty time slice by returning a dataframe with defaults\n if eframes_by_filter is None:\n return self.generate_default_df(instance_ids=instance_ids)\n\n finished_entity_ids = []\n # Populate entity_frames with precalculated features\n if len(precalculated_features) > 0:\n for entity_id, precalc_feature_values in precalculated_features.items():\n if entity_id in eframes_by_filter:\n frame = eframes_by_filter[entity_id][entity_id]\n eframes_by_filter[entity_id][entity_id] = pd.merge(frame,\n precalc_feature_values,\n left_index=True,\n right_index=True)\n else:\n # Only features we're taking from this entity\n # are precomputed\n # Make sure the id variable is a column as well as an index\n entity_id_var = self.entityset[entity_id].index\n precalc_feature_values[entity_id_var] = precalc_feature_values.index.values\n eframes_by_filter[entity_id] = {entity_id: precalc_feature_values}\n finished_entity_ids.append(entity_id)\n\n # Iterate over the top-level entities (filter entities) in sorted order\n # and calculate all relevant features under each one.\n if verbose:\n total_groups_to_compute = sum(len(group)\n for group in self.feature_tree.ordered_feature_groups.values())\n\n pbar = make_tqdm_iterator(total=total_groups_to_compute,\n desc=\"Computing features\",\n unit=\"feature group\")\n if verbose:\n pbar.update(0)\n\n for filter_eid in ordered_entities:\n entity_frames = eframes_by_filter[filter_eid]\n large_entity_frames = None\n if large_eframes_by_filter is not None:\n large_entity_frames = large_eframes_by_filter[filter_eid]\n\n # update the current set of entity frames with the computed features\n # from previously finished entities\n for eid in finished_entity_ids:\n # only include this frame if it's not from a descendent entity:\n # descendent entity frames will have to be re-calculated.\n # TODO: this check might not be necessary, depending on our\n # constraints\n if not self.entityset.find_backward_path(start_entity_id=filter_eid,\n goal_entity_id=eid):\n entity_frames[eid] = eframes_by_filter[eid][eid]\n # TODO: look this over again\n # precalculated features will only be placed in entity_frames,\n # and it's possible that that they are the only features computed\n # for an entity. In this case, the entity won't be present in\n # large_eframes_by_filter. The relevant lines that this case passes\n # through are 136-143\n if (large_eframes_by_filter is not None and\n eid in large_eframes_by_filter and eid in large_eframes_by_filter[eid]):\n large_entity_frames[eid] = large_eframes_by_filter[eid][eid]\n\n if filter_eid in self.feature_tree.ordered_feature_groups:\n for group in self.feature_tree.ordered_feature_groups[filter_eid]:\n if verbose:\n pbar.set_postfix({'running': 0})\n\n test_feature = group[0]\n entity_id = test_feature.entity.id\n\n input_frames_type = self.feature_tree.input_frames_type(test_feature)\n\n input_frames = large_entity_frames\n if input_frames_type == \"subset_entity_frames\":\n input_frames = entity_frames\n\n handler = self._feature_type_handler(test_feature)\n result_frame = handler(group, input_frames)\n\n output_frames_type = self.feature_tree.output_frames_type(test_feature)\n if output_frames_type in ['full_and_subset_entity_frames', 'subset_entity_frames']:\n index = entity_frames[entity_id].index\n # If result_frame came from a uses_full_entity feature,\n # and the input was large_entity_frames,\n # then it's possible it doesn't contain some of the features\n # in the output entity_frames\n # We thus need to concatenate the existing frame with the result frame,\n # making sure not to duplicate any columns\n _result_frame = result_frame.reindex(index)\n cols_to_keep = [c for c in _result_frame.columns\n if c not in entity_frames[entity_id].columns]\n entity_frames[entity_id] = pd.concat([entity_frames[entity_id],\n _result_frame[cols_to_keep]],\n axis=1)\n\n if output_frames_type in ['full_and_subset_entity_frames', 'full_entity_frames']:\n index = large_entity_frames[entity_id].index\n _result_frame = result_frame.reindex(index)\n cols_to_keep = [c for c in _result_frame.columns\n if c not in large_entity_frames[entity_id].columns]\n large_entity_frames[entity_id] = pd.concat([large_entity_frames[entity_id],\n _result_frame[cols_to_keep]],\n axis=1)\n\n if verbose:\n pbar.update(1)\n\n finished_entity_ids.append(filter_eid)\n\n if verbose:\n pbar.set_postfix({'running': 0})\n pbar.refresh()\n sys.stdout.flush()\n pbar.close()\n\n # debugging\n if profile:\n pr.disable()\n prof_folder_path = os.path.join(ROOT_DIR, 'prof')\n if not os.path.exists(prof_folder_path):\n os.mkdir(prof_folder_path)\n with open(os.path.join(prof_folder_path, 'inst-%s.log' %\n list(instance_ids)[0]), 'w') as f:\n pstats.Stats(pr, stream=f).strip_dirs().sort_stats(\"cumulative\", \"tottime\").print_stats()\n\n df = eframes_by_filter[self.target_eid][self.target_eid]\n\n # fill in empty rows with default values\n missing_ids = [i for i in instance_ids if i not in\n df[target_entity.index]]\n if missing_ids:\n default_df = self.generate_default_df(instance_ids=missing_ids,\n extra_columns=df.columns)\n df = df.append(default_df, sort=True)\n\n df.index.name = self.entityset[self.target_eid].index\n return df[[feat.get_name() for feat in self.features]]\n\n def generate_default_df(self, instance_ids, extra_columns=None):\n index_name = self.features[0].entity.index\n default_row = [f.default_value for f in self.features]\n default_cols = [f.get_name() for f in self.features]\n default_matrix = [default_row] * len(instance_ids)\n default_df = pd.DataFrame(default_matrix,\n columns=default_cols,\n index=instance_ids)\n default_df.index.name = index_name\n if extra_columns is not None:\n for c in extra_columns:\n if c not in default_df.columns:\n default_df[c] = [np.nan] * len(instance_ids)\n return default_df\n\n def _feature_type_handler(self, f):\n if isinstance(f, TransformPrimitive):\n return self._calculate_transform_features\n elif isinstance(f, DirectFeature):\n return self._calculate_direct_features\n elif isinstance(f, AggregationPrimitive):\n return self._calculate_agg_features\n elif isinstance(f, IdentityFeature):\n return self._calculate_identity_features\n else:\n raise UnknownFeature(u\"{} feature unknown\".format(f.__class__))\n\n def _calculate_identity_features(self, features, entity_frames):\n entity_id = features[0].entity.id\n assert (entity_id in entity_frames and\n features[0].get_name() in entity_frames[entity_id].columns)\n return entity_frames[entity_id]\n\n def _calculate_transform_features(self, features, entity_frames):\n entity_id = features[0].entity.id\n assert len(set([f.entity.id for f in features])) == 1, \\\n \"features must share base entity\"\n assert entity_id in entity_frames\n\n frame = entity_frames[entity_id]\n for f in features:\n # handle when no data\n if frame.shape[0] == 0:\n set_default_column(frame, f)\n continue\n\n # collect only the variables we need for this transformation\n variable_data = [frame[bf.get_name()].values\n for bf in f.base_features]\n\n feature_func = f.get_function()\n # apply the function to the relevant dataframe slice and add the\n # feature row to the results dataframe.\n if f.uses_calc_time:\n values = feature_func(*variable_data, time=self.time_last)\n else:\n values = feature_func(*variable_data)\n\n if isinstance(values, pd.Series):\n values = values.values\n frame[f.get_name()] = list(values)\n return frame\n\n def _calculate_direct_features(self, features, entity_frames):\n entity_id = features[0].entity.id\n parent_entity_id = features[0].parent_entity.id\n\n assert entity_id in entity_frames and parent_entity_id in entity_frames\n\n path = self.entityset.find_forward_path(entity_id, parent_entity_id)\n assert len(path) == 1, \\\n \"Error calculating DirectFeatures, len(path) > 1\"\n\n parent_df = entity_frames[parent_entity_id]\n child_df = entity_frames[entity_id]\n merge_var = path[0].child_variable.id\n\n # generate a mapping of old column names (in the parent entity) to\n # new column names (in the child entity) for the merge\n col_map = {path[0].parent_variable.id: merge_var}\n index_as_feature = None\n for f in features:\n if f.base_features[0].get_name() == path[0].parent_variable.id:\n index_as_feature = f\n # Sometimes entityset._add_multigenerational_links adds link variables\n # that would ordinarily get calculated as direct features,\n # so we make sure not to attempt to calculate again\n if f.get_name() in child_df.columns:\n continue\n col_map[f.base_features[0].get_name()] = f.get_name()\n\n # merge the identity feature from the parent entity into the child\n merge_df = parent_df[list(col_map.keys())].rename(columns=col_map)\n if index_as_feature is not None:\n merge_df.set_index(index_as_feature.get_name(), inplace=True,\n drop=False)\n else:\n merge_df.set_index(merge_var, inplace=True)\n\n new_df = pd.merge(left=child_df, right=merge_df,\n left_on=merge_var, right_index=True,\n how='left')\n\n return new_df\n\n def _calculate_agg_features(self, features, entity_frames):\n test_feature = features[0]\n entity = test_feature.entity\n child_entity = test_feature.base_features[0].entity\n\n assert entity.id in entity_frames and child_entity.id in entity_frames\n\n frame = entity_frames[entity.id]\n base_frame = entity_frames[child_entity.id]\n # Sometimes approximate features get computed in a previous filter frame\n # and put in the current one dynamically,\n # so there may be existing features here\n features = [f for f in features if f.get_name()\n not in frame.columns]\n if not len(features):\n return frame\n\n # handle where\n where = test_feature.where\n if where is not None and not base_frame.empty:\n base_frame = base_frame.loc[base_frame[where.get_name()]]\n\n # when no child data, just add all the features to frame with nan\n if base_frame.empty:\n for f in features:\n frame[f.get_name()] = np.nan\n else:\n relationship_path = self.entityset.find_backward_path(entity.id,\n child_entity.id)\n\n groupby_var = Relationship._get_link_variable_name(relationship_path)\n\n # if the use_previous property exists on this feature, include only the\n # instances from the child entity included in that Timedelta\n use_previous = test_feature.use_previous\n if use_previous and not base_frame.empty:\n # Filter by use_previous values\n time_last = self.time_last\n if use_previous.is_absolute():\n time_first = time_last - use_previous\n ti = child_entity.time_index\n if ti is not None:\n base_frame = base_frame[base_frame[ti] >= time_first]\n else:\n n = use_previous.value\n\n def last_n(df):\n return df.iloc[-n:]\n\n base_frame = base_frame.groupby(groupby_var, observed=True, sort=False).apply(last_n)\n\n to_agg = {}\n agg_rename = {}\n to_apply = set()\n # apply multivariable and time-dependent features as we find them, and\n # save aggregable features for later\n for f in features:\n if _can_agg(f):\n variable_id = f.base_features[0].get_name()\n\n if variable_id not in to_agg:\n to_agg[variable_id] = []\n\n func = f.get_function()\n funcname = func\n if callable(func):\n funcname = func.__name__\n\n to_agg[variable_id].append(func)\n # this is used below to rename columns that pandas names for us\n agg_rename[u\"{}-{}\".format(variable_id, funcname)] = f.get_name()\n continue\n\n to_apply.add(f)\n\n # Apply the non-aggregable functions generate a new dataframe, and merge\n # it with the existing one\n if len(to_apply):\n wrap = agg_wrapper(to_apply, self.time_last)\n # groupby_var can be both the name of the index and a column,\n # to silence pandas warning about ambiguity we explicitly pass\n # the column (in actuality grouping by both index and group would\n # work)\n to_merge = base_frame.groupby(base_frame[groupby_var], observed=True, sort=False).apply(wrap)\n frame = pd.merge(left=frame, right=to_merge,\n left_index=True,\n right_index=True, how='left')\n\n # Apply the aggregate functions to generate a new dataframe, and merge\n # it with the existing one\n if len(to_agg):\n # groupby_var can be both the name of the index and a column,\n # to silence pandas warning about ambiguity we explicitly pass\n # the column (in actuality grouping by both index and group would\n # work)\n to_merge = base_frame.groupby(base_frame[groupby_var],\n observed=True, sort=False).agg(to_agg)\n # rename columns to the correct feature names\n to_merge.columns = [agg_rename[\"-\".join(x)] for x in to_merge.columns.ravel()]\n to_merge = to_merge[list(agg_rename.values())]\n\n # workaround for pandas bug where categories are in the wrong order\n # see: https://github.com/pandas-dev/pandas/issues/22501\n if pdtypes.is_categorical_dtype(frame.index):\n categories = pdtypes.CategoricalDtype(categories=frame.index.categories)\n to_merge.index = to_merge.index.astype(object).astype(categories)\n\n frame = pd.merge(left=frame, right=to_merge,\n left_index=True, right_index=True, how='left')\n\n # Handle default values\n # 1. handle non scalar default values\n iterfeats = [f for f in features\n if hasattr(f.default_value, '__iter__')]\n for f in iterfeats:\n nulls = pd.isnull(frame[f.get_name()])\n for ni in nulls[nulls].index:\n frame.at[ni, f.get_name()] = f.default_value\n\n # 2. handle scalars default values\n fillna_dict = {f.get_name(): f.default_value for f in features\n if f not in iterfeats}\n frame.fillna(fillna_dict, inplace=True)\n\n # convert boolean dtypes to floats as appropriate\n # pandas behavior: https://github.com/pydata/pandas/issues/3752\n for f in features:\n if (not f.expanding and\n f.variable_type == variable_types.Numeric and\n frame[f.get_name()].dtype.name in ['object', 'bool']):\n frame[f.get_name()] = frame[f.get_name()].astype(float)\n\n return frame\n\n\ndef _can_agg(feature):\n assert isinstance(feature, AggregationPrimitive)\n base_features = feature.base_features\n if feature.where is not None:\n base_features = [bf.get_name() for bf in base_features\n if bf.get_name() != feature.where.get_name()]\n\n if feature.uses_calc_time:\n return False\n\n return len(base_features) == 1 and not feature.expanding\n\n\ndef agg_wrapper(feats, time_last):\n def wrap(df):\n d = {}\n for f in feats:\n func = f.get_function()\n variable_ids = [bf.get_name() for bf in f.base_features]\n args = [df[v] for v in variable_ids]\n\n if f.uses_calc_time:\n d[f.get_name()] = func(*args, time=time_last)\n else:\n d[f.get_name()] = func(*args)\n\n return pd.Series(d)\n return wrap\n\n\ndef set_default_column(frame, f):\n default = f.default_value\n if hasattr(default, '__iter__'):\n length = frame.shape[0]\n default = [f.default_value] * length\n frame[f.get_name()] = default\n" ]
[ [ "pandas.api.types.is_categorical_dtype", "pandas.merge", "pandas.concat", "pandas.api.types.CategoricalDtype", "pandas.Series", "pandas.DataFrame" ] ]
wdzhong/ASTGCN-PyTorch
[ "4f76d2302b6fd4227c4846e06ff11560d8a8237b" ]
[ "lib/utils.py" ]
[ "# -*- coding:utf-8 -*-\n# pylint: disable=no-member\n\nimport csv\nimport numpy as np\nfrom scipy.sparse.linalg import eigs\n\nfrom .metrics import mean_absolute_error, mean_squared_error, masked_mape_np\n\n\ndef search_data(sequence_length, num_of_batches, label_start_idx,\n num_for_predict, units, points_per_hour):\n '''\n Parameters\n ----------\n sequence_length: int, length of all history data\n\n num_of_batches: int, the number of batches will be used for training\n\n label_start_idx: int, the first index of predicting target\n\n num_for_predict: int,\n the number of points will be predicted for each sample\n\n units: int, week: 7 * 24, day: 24, recent(hour): 1\n\n points_per_hour: int, number of points per hour, depends on data\n\n Returns\n ----------\n list[(start_idx, end_idx)]\n '''\n\n if points_per_hour < 0:\n raise ValueError(\"points_per_hour should be greater than 0!\")\n\n if label_start_idx + num_for_predict > sequence_length:\n return None\n\n x_idx = []\n for i in range(1, num_of_batches + 1):\n start_idx = label_start_idx - points_per_hour * units * i\n end_idx = start_idx + num_for_predict # wd: this could overlap with 'label_start_index', e.g. when num_for_predict is larger than 12 (one hour)\n if start_idx >= 0:\n x_idx.append((start_idx, end_idx))\n else:\n return None\n\n if len(x_idx) != num_of_batches:\n return None\n\n return x_idx[::-1]\n\n\ndef get_sample_indices(data_sequence, num_of_weeks, num_of_days, num_of_hours,\n label_start_idx, num_for_predict, points_per_hour=12):\n \"\"\"\n Parameters\n ----------\n data_sequence: np.ndarray\n shape is (sequence_length, num_of_vertices, num_of_features)\n\n num_of_weeks, num_of_days, num_of_hours: int\n\n label_start_idx: int, the first index of predicting target\n\n num_for_predict: int,\n the number of points will be predicted for each sample\n\n points_per_hour: int, default 12, number of points per hour\n\n Returns\n ----------\n week_sample: np.ndarray\n shape is (num_of_weeks * points_per_hour, # wd: points_per_hour should be num_for_predict??\n num_of_vertices, num_of_features)\n\n day_sample: np.ndarray\n shape is (num_of_days * points_per_hour,\n num_of_vertices, num_of_features)\n\n hour_sample: np.ndarray\n shape is (num_of_hours * points_per_hour,\n num_of_vertices, num_of_features)\n\n target: np.ndarray\n shape is (num_for_predict, num_of_vertices, num_of_features)\n \"\"\"\n week_indices = search_data(data_sequence.shape[0], num_of_weeks,\n label_start_idx, num_for_predict,\n 7 * 24, points_per_hour)\n if not week_indices:\n return None\n\n day_indices = search_data(data_sequence.shape[0], num_of_days,\n label_start_idx, num_for_predict,\n 24, points_per_hour)\n if not day_indices:\n return None\n\n hour_indices = search_data(data_sequence.shape[0], num_of_hours,\n label_start_idx, num_for_predict,\n 1, points_per_hour)\n if not hour_indices:\n return None\n\n week_sample = np.concatenate([data_sequence[i: j]\n for i, j in week_indices], axis=0)\n day_sample = np.concatenate([data_sequence[i: j]\n for i, j in day_indices], axis=0)\n hour_sample = np.concatenate([data_sequence[i: j]\n for i, j in hour_indices], axis=0)\n target = data_sequence[label_start_idx: label_start_idx + num_for_predict]\n\n return week_sample, day_sample, hour_sample, target\n\n\ndef get_adjacency_matrix(distance_df_filename, num_of_vertices):\n '''\n Parameters\n ----------\n distance_df_filename: str, path of the csv file contains edges information\n\n num_of_vertices: int, the number of vertices\n\n Returns\n ----------\n A: np.ndarray, adjacency matrix\n\n '''\n\n with open(distance_df_filename, 'r') as f:\n reader = csv.reader(f)\n header = f.__next__()\n edges = [(int(i[0]), int(i[1])) for i in reader]\n\n A = np.zeros((int(num_of_vertices), int(num_of_vertices)),\n dtype=np.float32)\n\n for i, j in edges:\n A[i, j] = 1\n\n return A\n\n\ndef scaled_Laplacian(W):\n '''\n compute \\tilde{L}\n\n Parameters\n ----------\n W: np.ndarray, shape is (N, N), N is the num of vertices\n\n Returns\n ----------\n scaled_Laplacian: np.ndarray, shape (N, N)\n\n '''\n\n assert W.shape[0] == W.shape[1]\n\n D = np.diag(np.sum(W, axis=1))\n\n L = D - W\n\n lambda_max = eigs(L, k=1, which='LR')[0].real\n\n return (2 * L) / lambda_max - np.identity(W.shape[0])\n\n\ndef cheb_polynomial(L_tilde, K):\n '''\n compute a list of chebyshev polynomials from T_0 to T_{K-1}\n\n Parameters\n ----------\n L_tilde: scaled Laplacian, np.ndarray, shape (N, N)\n\n K: the maximum order of chebyshev polynomials\n\n Returns\n ----------\n cheb_polynomials: list[np.ndarray], length: K, from T_0 to T_{K-1}\n\n '''\n\n N = L_tilde.shape[0]\n\n cheb_polynomials = [np.identity(N), L_tilde.copy()]\n\n for i in range(2, K):\n cheb_polynomials.append(\n 2 * L_tilde * cheb_polynomials[i - 1] - cheb_polynomials[i - 2])\n\n return cheb_polynomials\n\n\ndef compute_val_loss(net, val_loader, loss_function, sw, epoch, device):\n \"\"\"\n compute mean loss on validation set\n\n Parameters\n ----------\n net: model\n\n val_loader: DataLoader\n\n loss_function: func\n\n sw: SummaryWriter. TODO: to be implemented\n\n epoch: int, current epoch\n\n \"\"\"\n val_loader_length = len(val_loader)\n tmp = []\n for index, (val_w, val_d, val_r, val_t) in enumerate(val_loader):\n val_w = val_w.to(device)\n val_d = val_d.to(device)\n val_r = val_r.to(device)\n val_t = val_t.to(device)\n output = net([val_w, val_d, val_r])\n l = loss_function(output, val_t) # l is a tensor, with single value\n tmp.append(l.item())\n print('validation batch %s / %s, loss: %.2f' % (\n index + 1, val_loader_length, l.item()))\n\n validation_loss = sum(tmp) / len(tmp)\n\n if sw:\n sw.add_scalar(tag='validation_loss',\n value=validation_loss,\n global_step=epoch)\n\n print('epoch: %s, validation loss: %.2f' % (epoch, validation_loss))\n\n\ndef predict(net, test_loader, device):\n \"\"\"\n predict\n\n Parameters\n ----------\n net: model\n\n test_loader: DataLoader\n\n Returns\n ----------\n prediction: np.ndarray,\n shape is (num_of_samples, num_of_vertices, num_for_predict)\n\n \"\"\"\n\n test_loader_length = len(test_loader)\n prediction = []\n for index, (test_w, test_d, test_r, _) in enumerate(test_loader):\n test_w = test_w.to(device)\n test_d = test_d.to(device)\n test_r = test_r.to(device)\n prediction.append(net([test_w, test_d, test_r]).cpu().numpy())\n print('predicting testing set batch %s / %s' % (index + 1, test_loader_length))\n prediction = np.concatenate(prediction, 0)\n return prediction\n\n\ndef evaluate(net, test_loader, true_value, num_of_vertices, sw, epoch, device):\n \"\"\"\n compute MAE, RMSE, MAPE scores of the prediction\n for 3, 6, 12 points on testing set\n\n Parameters\n ----------\n net: model\n\n test_loader: DataLoader\n\n true_value: np.ndarray, all ground truth of testing set\n shape is (num_of_samples, num_for_predict, num_of_vertices)\n\n num_of_vertices: int, number of vertices\n\n sw: SummaryWriter. TODO: to be implemented.\n\n epoch: int, current epoch\n\n \"\"\"\n prediction = predict(net, test_loader, device)\n prediction = (prediction.transpose((0, 2, 1))\n .reshape(prediction.shape[0], -1))\n for i in [3, 6, 12]:\n print('current epoch: %s, predict %s points' % (epoch, i))\n\n mae = mean_absolute_error(true_value[:, : i * num_of_vertices],\n prediction[:, : i * num_of_vertices])\n rmse = mean_squared_error(true_value[:, : i * num_of_vertices],\n prediction[:, : i * num_of_vertices]) ** 0.5\n mape = masked_mape_np(true_value[:, : i * num_of_vertices],\n prediction[:, : i * num_of_vertices], 0)\n\n print('MAE: %.2f' % (mae))\n print('RMSE: %.2f' % (rmse))\n print('MAPE: %.2f' % (mape))\n print()\n if sw:\n sw.add_scalar(tag='MAE_%s_points' % (i),\n value=mae,\n global_step=epoch)\n sw.add_scalar(tag='RMSE_%s_points' % (i),\n value=rmse,\n global_step=epoch)\n sw.add_scalar(tag='MAPE_%s_points' % (i),\n value=mape,\n global_step=epoch)\n" ]
[ [ "numpy.concatenate", "numpy.identity", "numpy.sum", "scipy.sparse.linalg.eigs" ] ]
JacopoBugini/SpotMask
[ "0be6c35283b89d5bbddcdb2b65a67a59fac4d264" ]
[ "utils/utils.py" ]
[ "import numpy as np\nimport cv2\nimport tensorflow.keras as keras\nfrom tensorflow.keras.preprocessing import image\nimport numpy as np\n\n# -------------------------------------------------------------------\n# Load models\n# -------------------------------------------------------------------\n\n# Load the trained model\nmask_net = keras.models.load_model('models/facemask-correctness/mask_correctness_model.h5')\nprint(\"Model Check Mask imported correctly\")\n\ndetect_net = keras.models.load_model('models/mask-detection/mask_detection_model.h5')\nprint(\"Model Detect Mask imported correctly\")\nprint(\"*********************************************\")\n\nsuggest_net = keras.models.load_model('models/suggestions-detection/suggestions_model.h5')\nprint(\"Model Detect Mask imported correctly\")\nprint(\"*********************************************\")\n\n# -------------------------------------------------------------------\n# Parameters\n# -------------------------------------------------------------------\n\nCONF_THRESHOLD = 0.5\nNMS_THRESHOLD = 0.4\nIMG_WIDTH = 416\nIMG_HEIGHT = 416\n\n\n# -------------------------------------------------------------------\n# Help functions\n# -------------------------------------------------------------------\n\n# Get the names of the output layers\ndef get_outputs_names(net):\n # Get the names of all the layers in the network\n layers_names = net.getLayerNames()\n\n # Get the names of the output layers, i.e. the layers with unconnected\n return [layers_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\ndef process_frame(frame, outs, conf_threshold, nms_threshold, mode):\n frame_height = frame.shape[0]\n frame_width = frame.shape[1]\n\n # Scan through all the bounding boxes output from the network and keep only the ones with high confidence scores.\n confidences = []\n boxes = []\n final_boxes = []\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > conf_threshold:\n center_x = int(detection[0] * frame_width)\n center_y = int(detection[1] * frame_height)\n width = int(detection[2] * frame_width)\n height = int(detection[3] * frame_height)\n left = int(center_x - width / 2)\n top = int(center_y - height / 2)\n confidences.append(float(confidence))\n boxes.append([left, top, width, height])\n\n # Perform non maximum suppression to eliminate redundant overlapping boxes with lower confidences.\n indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold,\n nms_threshold)\n\n for i in indices:\n i = i[0]\n box = boxes[i]\n left = box[0]\n top = box[1]\n width = box[2]\n height = box[3]\n final_boxes.append(box)\n \n colour_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n face_img_crop = colour_frame[top-30:top+height+30, left-30:left+width+30]\n\n img_array = prepare_frame(face_img_crop)\n\n output_mask, colour, mask_result = detect_mask_usage(img_array, mode)\n\n cv2.rectangle(frame, (left, top), (left+width, top+height), colour, 3)\n cv2.putText(frame, output_mask, (left, top-10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, colour, 2)\n\n return final_boxes\n\ndef prepare_frame(img, size=[150,150]):\n\n img_reshaped = cv2.resize(img, (size[0],size[1]))\n img_array = image.img_to_array(img_reshaped)\n img_array = img_array.astype('float32')\n img_array /= 255.0\n img_array = img_array.reshape((1,) + img_array.shape)\n\n return img_array\n\ndef detect_mask_usage(img_array, mode):\n\n # predict mask presence: Detect Mask\n mask_result = detect_net.predict_on_batch(img_array)\n\n # Predict Mask Correctness: Mask Correctness\n mask_is_proper = mask_net.predict_on_batch(img_array)\n\n # Predict Mask Suggestions: Mask Suggestions\n suggestions = suggest_net.predict_on_batch(img_array)\n \n # Elaborate scores based on prediction values\n # get mask presence results\n score=np.amax(mask_result[0], axis=0)\n list_scores = list(mask_result[0])\n mask_detection_result_index = list_scores.index(score)\n \n # get mask correctness results\n score_2=np.amax(mask_is_proper[0], axis=0)\n list_scores_2 = list(mask_is_proper[0])\n correctness_result_index = list_scores_2.index(score_2)\n\n # get mask suggestions results\n score_3=np.amax(suggestions[0], axis=0)\n list_scores_3 = list(suggestions[0])\n suggestions_result_index = list_scores_3.index(score_3)\n\n \n if mask_detection_result_index == 1:\n output_mask = 'Wear a Mask!' \n colour = (0,0,255)\n\n else: \n\n if mode == 'simple':\n\n if correctness_result_index == 1:\n output_mask = 'Good!'\n colour = (0,255,0)\n else:\n output_mask = 'Wear it correctly!'\n colour = (0,152,232)\n \n elif mode == 'suggestions':\n\n if suggestions_result_index == 0:\n output_mask = 'Adjust on Chin!'\n colour = (0,152,232)\n elif suggestions_result_index == 1:\n output_mask = 'Cover your Nose!'\n colour = (0,152,232)\n elif suggestions_result_index == 2:\n output_mask = 'Cover Mouth and Nose!'\n colour = (0,152,232)\n elif suggestions_result_index == 3:\n output_mask = 'Good!'\n colour = (0,255,0)\n\n else:\n print('Mode not recongized. Please consider giving --mode \"suggestions\" or --mode \"simple\"')\n \n return output_mask, colour, mask_result\n" ]
[ [ "tensorflow.keras.models.load_model", "numpy.amax", "numpy.argmax", "tensorflow.keras.preprocessing.image.img_to_array" ] ]
visatish/ray
[ "dc76e51a60652b3210c93f81df6dafcf461d4431", "dc76e51a60652b3210c93f81df6dafcf461d4431", "dc76e51a60652b3210c93f81df6dafcf461d4431" ]
[ "python/ray/tune/logger.py", "python/ray/rllib/utils/tf_run_builder.py", "python/ray/rllib/tuned_examples/regression_tests/regression_test.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport csv\nimport json\nimport logging\nimport numpy as np\nimport os\nimport yaml\n\nfrom ray.tune.log_sync import get_syncer\nfrom ray.tune.result import NODE_IP, TRAINING_ITERATION, TIME_TOTAL_S, \\\n TIMESTEPS_TOTAL\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import tensorflow as tf\nexcept ImportError:\n tf = None\n logger.warning(\"Couldn't import TensorFlow - \"\n \"disabling TensorBoard logging.\")\n\n\nclass Logger(object):\n \"\"\"Logging interface for ray.tune; specialized implementations follow.\n\n By default, the UnifiedLogger implementation is used which logs results in\n multiple formats (TensorBoard, rllab/viskit, plain json) at once.\n \"\"\"\n\n def __init__(self, config, logdir, upload_uri=None):\n self.config = config\n self.logdir = logdir\n self.uri = upload_uri\n self._init()\n\n def _init(self):\n pass\n\n def on_result(self, result):\n \"\"\"Given a result, appends it to the existing log.\"\"\"\n\n raise NotImplementedError\n\n def close(self):\n \"\"\"Releases all resources used by this logger.\"\"\"\n\n pass\n\n def flush(self):\n \"\"\"Flushes all disk writes to storage.\"\"\"\n\n pass\n\n\nclass UnifiedLogger(Logger):\n \"\"\"Unified result logger for TensorBoard, rllab/viskit, plain json.\n\n This class also periodically syncs output to the given upload uri.\"\"\"\n\n def _init(self):\n self._loggers = []\n for cls in [_JsonLogger, _TFLogger, _VisKitLogger]:\n if cls is _TFLogger and tf is None:\n logger.info(\"TF not installed - \"\n \"cannot log with {}...\".format(cls))\n continue\n self._loggers.append(cls(self.config, self.logdir, self.uri))\n self._log_syncer = get_syncer(self.logdir, self.uri)\n\n def on_result(self, result):\n for logger in self._loggers:\n logger.on_result(result)\n self._log_syncer.set_worker_ip(result.get(NODE_IP))\n self._log_syncer.sync_if_needed()\n\n def close(self):\n for logger in self._loggers:\n logger.close()\n self._log_syncer.sync_now(force=True)\n\n def flush(self):\n for logger in self._loggers:\n logger.flush()\n self._log_syncer.sync_now(force=True)\n self._log_syncer.wait()\n\n\nclass NoopLogger(Logger):\n def on_result(self, result):\n pass\n\n\nclass _JsonLogger(Logger):\n def _init(self):\n config_out = os.path.join(self.logdir, \"params.json\")\n with open(config_out, \"w\") as f:\n json.dump(self.config, f, sort_keys=True, cls=_SafeFallbackEncoder)\n local_file = os.path.join(self.logdir, \"result.json\")\n self.local_out = open(local_file, \"w\")\n\n def on_result(self, result):\n json.dump(result, self, cls=_SafeFallbackEncoder)\n self.write(\"\\n\")\n\n def write(self, b):\n self.local_out.write(b)\n self.local_out.flush()\n\n def close(self):\n self.local_out.close()\n\n\ndef to_tf_values(result, path):\n values = []\n for attr, value in result.items():\n if value is not None:\n if type(value) in [int, float, np.float32, np.float64, np.int32]:\n values.append(\n tf.Summary.Value(\n tag=\"/\".join(path + [attr]), simple_value=value))\n elif type(value) is dict:\n values.extend(to_tf_values(value, path + [attr]))\n return values\n\n\nclass _TFLogger(Logger):\n def _init(self):\n self._file_writer = tf.summary.FileWriter(self.logdir)\n\n def on_result(self, result):\n tmp = result.copy()\n for k in [\n \"config\", \"pid\", \"timestamp\", TIME_TOTAL_S, TRAINING_ITERATION\n ]:\n del tmp[k] # not useful to tf log these\n values = to_tf_values(tmp, [\"ray\", \"tune\"])\n train_stats = tf.Summary(value=values)\n t = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]\n self._file_writer.add_summary(train_stats, t)\n iteration_value = to_tf_values({\n \"training_iteration\": result[TRAINING_ITERATION]\n }, [\"ray\", \"tune\"])\n iteration_stats = tf.Summary(value=iteration_value)\n self._file_writer.add_summary(iteration_stats, t)\n self._file_writer.flush()\n\n def flush(self):\n self._file_writer.flush()\n\n def close(self):\n self._file_writer.close()\n\n\nclass _VisKitLogger(Logger):\n def _init(self):\n \"\"\"CSV outputted with Headers as first set of results.\"\"\"\n # Note that we assume params.json was already created by JsonLogger\n self._file = open(os.path.join(self.logdir, \"progress.csv\"), \"w\")\n self._csv_out = None\n\n def on_result(self, result):\n if self._csv_out is None:\n self._csv_out = csv.DictWriter(self._file, result.keys())\n self._csv_out.writeheader()\n self._csv_out.writerow(result.copy())\n\n def close(self):\n self._file.close()\n\n\nclass _SafeFallbackEncoder(json.JSONEncoder):\n def __init__(self, nan_str=\"null\", **kwargs):\n super(_SafeFallbackEncoder, self).__init__(**kwargs)\n self.nan_str = nan_str\n\n def default(self, value):\n try:\n if np.isnan(value):\n return None\n if np.issubdtype(value, float):\n return float(value)\n if np.issubdtype(value, int):\n return int(value)\n except Exception:\n return str(value) # give up, just stringify it (ok for logs)\n\n\ndef pretty_print(result):\n result = result.copy()\n result.update(config=None) # drop config from pretty print\n out = {}\n for k, v in result.items():\n if v is not None:\n out[k] = v\n\n cleaned = json.dumps(out, cls=_SafeFallbackEncoder)\n return yaml.safe_dump(json.loads(cleaned), default_flow_style=False)\n", "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\n\nimport tensorflow as tf\nfrom tensorflow.python.client import timeline\n\n\nclass TFRunBuilder(object):\n \"\"\"Used to incrementally build up a TensorFlow run.\n\n This is particularly useful for batching ops from multiple different\n policies in the multi-agent setting.\n \"\"\"\n\n def __init__(self, session, debug_name):\n self.session = session\n self.debug_name = debug_name\n self.feed_dict = {}\n self.fetches = []\n self._executed = None\n\n def add_feed_dict(self, feed_dict):\n assert not self._executed\n for k in feed_dict:\n assert k not in self.feed_dict\n self.feed_dict.update(feed_dict)\n\n def add_fetches(self, fetches):\n assert not self._executed\n base_index = len(self.fetches)\n self.fetches.extend(fetches)\n return list(range(base_index, len(self.fetches)))\n\n def get(self, to_fetch):\n if self._executed is None:\n try:\n self._executed = run_timeline(\n self.session, self.fetches, self.debug_name,\n self.feed_dict, os.environ.get(\"TF_TIMELINE_DIR\"))\n except Exception as e:\n print(\"Error fetching: {}, feed_dict={}\".format(\n self.fetches, self.feed_dict))\n raise e\n if isinstance(to_fetch, int):\n return self._executed[to_fetch]\n elif isinstance(to_fetch, list):\n return [self.get(x) for x in to_fetch]\n elif isinstance(to_fetch, tuple):\n return tuple(self.get(x) for x in to_fetch)\n else:\n raise ValueError(\"Unsupported fetch type: {}\".format(to_fetch))\n\n\n_count = 0\n\n\ndef run_timeline(sess, ops, debug_name, feed_dict={}, timeline_dir=None):\n if timeline_dir:\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n start = time.time()\n fetches = sess.run(\n ops,\n options=run_options,\n run_metadata=run_metadata,\n feed_dict=feed_dict)\n trace = timeline.Timeline(step_stats=run_metadata.step_stats)\n global _count\n outf = os.path.join(\n timeline_dir, \"timeline-{}-{}-{}.json\".format(\n debug_name, os.getpid(), _count))\n _count += 1\n trace_file = open(outf, \"w\")\n print(\"Wrote tf timeline ({} s) to {}\".format(time.time() - start,\n os.path.abspath(outf)))\n trace_file.write(trace.generate_chrome_trace_format())\n else:\n fetches = sess.run(ops, feed_dict=feed_dict)\n return fetches\n", "#!/usr/bin/env python\n\"\"\"\nThis class runs the regression YAMLs in the ASV format.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import defaultdict\nimport numpy as np\nimport os\nimport yaml\n\nimport ray\nfrom ray import tune\n\nCONFIG_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\ndef _evaulate_config(filename):\n with open(os.path.join(CONFIG_DIR, filename)) as f:\n experiments = yaml.load(f)\n for _, config in experiments.items():\n config[\"num_samples\"] = 3\n ray.init()\n trials = tune.run_experiments(experiments)\n results = defaultdict(list)\n for t in trials:\n results[\"time_total_s\"] += [t.last_result[\"time_total_s\"]]\n results[\"episode_reward_mean\"] += [\n t.last_result[\"episode_reward_mean\"]\n ]\n results[\"training_iteration\"] += [t.last_result[\"training_iteration\"]]\n\n return {k: np.median(v) for k, v in results.items()}\n\n\nclass Regression():\n def setup_cache(self):\n # We need to implement this in separate classes\n # below so that ASV will register the setup/class\n # as a separate test.\n raise NotImplementedError\n\n def teardown(self, *args):\n ray.shutdown()\n\n def track_time(self, result):\n return result[\"time_total_s\"]\n\n def track_reward(self, result):\n return result[\"episode_reward_mean\"]\n\n def track_iterations(self, result):\n return result[\"training_iteration\"]\n" ]
[ [ "numpy.isnan", "tensorflow.Summary", "numpy.issubdtype", "tensorflow.summary.FileWriter" ], [ "tensorflow.RunOptions", "tensorflow.python.client.timeline.Timeline", "tensorflow.RunMetadata" ], [ "numpy.median" ] ]
h-mayorquin/time_series_basic
[ "654fb67ef6258b3f200c15a2b8068ab9300401d7" ]
[ "hdf5_loading_three_bumps.py" ]
[ "import h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n\nfrom signals.aux_functions import gaussian_bump\nimport nexa.loading as load\nfrom visualization.sensors import visualize_SLM_hdf5\nfrom visualization.sensors import visualize_STDM_hdf5\nfrom visualization.sensor_clustering import visualize_cluster_matrix_hdf5\n\n# Load the database\nlocation = './results_database/three_bumps_distance.hdf5'\ndatabase = h5py.File(location, 'r')\n\n# Time \nTmax = 1000\ndt = 1.0\ntime = np.arange(0, Tmax, dt)\n\n# Parameters that the bumpbs share\nmax_rate = 100\nbase = 10\nvalue = 50\nattenuation = 2\n\n# Define three arangments for the values of the gaussian bumpbs\ncenter1 = 100\ncenter2 = 500\ncenter3 = 700\n\n# Now create the guassian bumps\ngb1 = gaussian_bump(time, center1, max_rate, base, value, attenuation)\ngb2 = gaussian_bump(time, center2, max_rate, base, value * 2, attenuation)\ngb3 = gaussian_bump(time, center3, max_rate, base, value * 0.5, attenuation)\n\n# Database extraction\nrun_name = str(center1) + '-'\nrun_name += str(center2) + '-'\nrun_name += str(center3)\n\nnexa_arrangement = '3-4-3'\nr = database[run_name]\n\n# Load everything\nSLM = load.get_SLM_hdf5(database, run_name)\nSTDM = load.get_STDM_hdf5(database, run_name, nexa_arrangement)\ncluster_to_index = load.get_cluster_to_index_hdf5(database, run_name, nexa_arrangement)\nindex_to_cluster = load.get_index_to_cluster_hdf5(database, run_name, nexa_arrangement)\ncluster_to_time_centers = load.get_cluster_to_time_centers_hdf5(database, run_name, nexa_arrangement)\n\n# Now visualize the signals and the SLM\nif False:\n fig = plt.figure()\n gs = gridspec.GridSpec(3, 2)\n\n ax1 = fig.add_subplot(gs[0, 0])\n ax1.plot(time, gb1)\n ax2 = fig.add_subplot(gs[1, 0])\n ax2.plot(time,gb2)\n ax3 = fig.add_subplot(gs[2, 0])\n ax3.plot(time, gb3)\n\n ax4 = fig.add_subplot(gs[:, 1])\n visualize_SLM_hdf5(database, run_name, ax=ax4)\n\n plt.show()\n\n# Now the signals and the STDM\nif False:\n fig = plt.figure()\n gs = gridspec.GridSpec(3, 2)\n\n ax1 = fig.add_subplot(gs[0, 0])\n ax1.plot(time, gb1)\n ax2 = fig.add_subplot(gs[1, 0])\n ax2.plot(time,gb2)\n ax3 = fig.add_subplot(gs[2, 0])\n ax3.plot(time, gb3)\n\n ax4 = fig.add_subplot(gs[:, 1])\n visualize_STDM_hdf5(database, run_name, nexa_arrangement, ax= ax4)\n\n plt.show()\n\n \n# Now visualize the SLM and STDM\nif False:\n fig = plt.figure()\n gs = gridspec.GridSpec(2, 2)\n ax1 = fig.add_subplot(gs[:, 0])\n visualize_SLM_hdf5(database, run_name, ax=ax1)\n ax2 = fig.add_subplot(gs[:, 1])\n visualize_STDM_hdf5(database, run_name, nexa_arrangement, ax= ax2)\n fig.show()\n plt.close(fig)\n\n# Now visualize the signals and the cluster matrix \nif True:\n fig = plt.figure()\n gs = gridspec.GridSpec(3, 2)\n\n ax1 = fig.add_subplot(gs[0, 0])\n ax1.plot(time, gb1)\n ax2 = fig.add_subplot(gs[1, 0])\n ax2.plot(time, gb2)\n ax3 = fig.add_subplot(gs[2, 0])\n ax3.plot(time, gb3)\n\n ax4 = fig.add_subplot(gs[:, 1])\n visualize_cluster_matrix_hdf5(database, run_name, nexa_arrangement, ax=ax4)\n\n plt.show()\n\n" ]
[ [ "numpy.arange", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.close", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
sun-yitao/GrabAIChallenge
[ "05946339e5a478216d7a9234e29e9bd7af5b3492" ]
[ "ws-dan/utils.py" ]
[ "import numpy as np\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100. / batch_size))\n\n return np.array(res, dtype='float')\n" ]
[ [ "numpy.array" ] ]
cinmoy98/neural-network-visualizer
[ "bbb8a5237fe60ee552e3f343ab03707d381895dc" ]
[ "app.py" ]
[ "\nimport streamlit as st\nimport json\nimport requests\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nURI = 'http://neural-net-viz-flask.herokuapp.com'\n\nst.title('Nural Network Visualizer')\nst.sidebar.markdown('## Input Image')\n\nif st.button('Get Random Prediction'):\n response = requests.post(URI, data={})\n response = json.loads(response.text)\n preds = response.get('prediction')\n image = response.get('image')\n image = np.reshape(image, (28, 28))\n \n st.sidebar.image(image, width=150)\n \n for layer, p in enumerate(preds):\n numbers = np.squeeze(np.array(p))\n \n plt.figure(figsize=(32, 4))\n \n if layer == 2:\n row = 1\n col = 10\n else:\n row = 2\n col = 16\n \n for i, number in enumerate(numbers):\n plt.subplot(row, col, i+1)\n plt.imshow(number * np.ones((8, 8, 3)).astype('float32'))\n plt.xticks([])\n plt.yticks([])\n \n if layer == 2:\n plt.xlabel(str(i), fontsize=40)\n plt.subplots_adjust(wspace=0.05, hspace=0.05)\n plt.tight_layout()\n st.text('Layer {}'.format(layer + 1))\n st.pyplot()\n \n" ]
[ [ "matplotlib.pyplot.yticks", "matplotlib.pyplot.tight_layout", "numpy.reshape", "numpy.ones", "matplotlib.pyplot.subplot", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.xticks", "numpy.array", "matplotlib.pyplot.figure" ] ]
stanford-oval/word-language-model
[ "3be3f65a198b518b66e22a910f28f83324db3825" ]
[ "data.py" ]
[ "import os\nimport torch\n\nclass Dictionary(object):\n \"\"\"Build word2idx and idx2word from Corpus(train/val/test)\"\"\"\n def __init__(self):\n self.word2idx = {} # word: index\n self.idx2word = [] # position(index): word\n\n def add_word(self, word):\n \"\"\"Create/Update word2idx and idx2word\"\"\"\n if word not in self.word2idx:\n self.idx2word.append(word)\n self.word2idx[word] = len(self.idx2word) - 1\n return self.word2idx[word]\n\n def __len__(self):\n return len(self.idx2word)\n\n\nclass Corpus(object):\n \"\"\"Corpus Tokenizer\"\"\"\n def __init__(self, path):\n self.dictionary = Dictionary()\n self.train = self.tokenize(os.path.join(path, 'train.txt'))\n self.valid = self.tokenize(os.path.join(path, 'valid.txt'))\n self.test = self.tokenize(os.path.join(path, 'test.txt'))\n\n def tokenize(self, path):\n \"\"\"Tokenizes a text file.\"\"\"\n assert os.path.exists(path)\n # Add words to the dictionary\n with open(path, 'r') as f:\n tokens = 0\n for line in f:\n # line to list of token + eos\n words = line.split() + ['<eos>']\n tokens += len(words)\n for word in words:\n self.dictionary.add_word(word)\n\n # Tokenize file content\n with open(path, 'r') as f:\n ids = torch.LongTensor(tokens)\n token = 0\n for line in f:\n words = line.split() + ['<eos>']\n for word in words:\n ids[token] = self.dictionary.word2idx[word]\n token += 1\n\n return ids" ]
[ [ "torch.LongTensor" ] ]
KexianShen/acados
[ "2981d29dc6ecdaabdb39cd6c0d784724704afe4a", "2981d29dc6ecdaabdb39cd6c0d784724704afe4a" ]
[ "interfaces/acados_template/acados_template/acados_sim.py", "examples/acados_python/getting_started/mhe/minimal_example_mhe.py" ]
[ "# -*- coding: future_fstrings -*-\n#\n# Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,\n# Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,\n# Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,\n# Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl\n#\n# This file is part of acados.\n#\n# The 2-Clause BSD License\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.;\n#\n\nimport numpy as np\nimport casadi as ca\nimport os\nfrom .acados_model import AcadosModel\nfrom .utils import get_acados_path, get_lib_ext\n\nclass AcadosSimDims:\n \"\"\"\n Class containing the dimensions of the model to be simulated.\n \"\"\"\n def __init__(self):\n self.__nx = None\n self.__nu = None\n self.__nz = 0\n self.__np = 0\n\n @property\n def nx(self):\n \"\"\":math:`n_x` - number of states. Type: int > 0\"\"\"\n return self.__nx\n\n @property\n def nz(self):\n \"\"\":math:`n_z` - number of algebraic variables. Type: int >= 0\"\"\"\n return self.__nz\n\n @property\n def nu(self):\n \"\"\":math:`n_u` - number of inputs. Type: int >= 0\"\"\"\n return self.__nu\n\n @property\n def np(self):\n \"\"\":math:`n_p` - number of parameters. Type: int >= 0\"\"\"\n return self.__np\n\n @nx.setter\n def nx(self, nx):\n if isinstance(nx, int) and nx > 0:\n self.__nx = nx\n else:\n raise Exception('Invalid nx value, expected positive integer.')\n\n @nz.setter\n def nz(self, nz):\n if isinstance(nz, int) and nz > -1:\n self.__nz = nz\n else:\n raise Exception('Invalid nz value, expected nonnegative integer.')\n\n @nu.setter\n def nu(self, nu):\n if isinstance(nu, int) and nu > -1:\n self.__nu = nu\n else:\n raise Exception('Invalid nu value, expected nonnegative integer.')\n\n @np.setter\n def np(self, np):\n if isinstance(np, int) and np > -1:\n self.__np = np\n else:\n raise Exception('Invalid np value, expected nonnegative integer.')\n\n def set(self, attr, value):\n setattr(self, attr, value)\n\n\nclass AcadosSimOpts:\n \"\"\"\n class containing the solver options\n \"\"\"\n def __init__(self):\n self.__integrator_type = 'ERK'\n self.__collocation_type = 'GAUSS_LEGENDRE'\n self.__Tsim = None\n # ints\n self.__sim_method_num_stages = 1\n self.__sim_method_num_steps = 1\n self.__sim_method_newton_iter = 3\n # bools\n self.__sens_forw = True\n self.__sens_adj = False\n self.__sens_algebraic = False\n self.__sens_hess = False\n self.__output_z = False\n self.__sim_method_jac_reuse = 0\n\n @property\n def integrator_type(self):\n \"\"\"Integrator type. Default: 'ERK'.\"\"\"\n return self.__integrator_type\n\n @property\n def num_stages(self):\n \"\"\"Number of stages in the integrator. Default: 1\"\"\"\n return self.__sim_method_num_stages\n\n @property\n def num_steps(self):\n \"\"\"Number of steps in the integrator. Default: 1\"\"\"\n return self.__sim_method_num_steps\n\n @property\n def newton_iter(self):\n \"\"\"Number of Newton iterations in simulation method. Default: 3\"\"\"\n return self.__sim_method_newton_iter\n\n @property\n def sens_forw(self):\n \"\"\"Boolean determining if forward sensitivities are computed. Default: True\"\"\"\n return self.__sens_forw\n\n @property\n def sens_adj(self):\n \"\"\"Boolean determining if adjoint sensitivities are computed. Default: False\"\"\"\n return self.__sens_adj\n\n @property\n def sens_algebraic(self):\n \"\"\"Boolean determining if sensitivities wrt algebraic variables are computed. Default: False\"\"\"\n return self.__sens_algebraic\n\n @property\n def sens_hess(self):\n \"\"\"Boolean determining if hessians are computed. Default: False\"\"\"\n return self.__sens_hess\n\n @property\n def output_z(self):\n \"\"\"Boolean determining if values for algebraic variables (corresponding to start of simulation interval) are computed. Default: False\"\"\"\n return self.__output_z\n\n @property\n def sim_method_jac_reuse(self):\n \"\"\"Integer determining if jacobians are reused (0 or 1). Default: 0\"\"\"\n return self.__sim_method_jac_reuse\n\n @property\n def T(self):\n \"\"\"Time horizon\"\"\"\n return self.__Tsim\n\n @property\n def collocation_type(self):\n \"\"\"Collocation type: relevant for implicit integrators\n -- string in {GAUSS_RADAU_IIA, GAUSS_LEGENDRE}\n\n Default: GAUSS_LEGENDRE\n \"\"\"\n return self.__collocation_type\n\n @integrator_type.setter\n def integrator_type(self, integrator_type):\n integrator_types = ('ERK', 'IRK', 'GNSF')\n if integrator_type in integrator_types:\n self.__integrator_type = integrator_type\n else:\n raise Exception('Invalid integrator_type value. Possible values are:\\n\\n' \\\n + ',\\n'.join(integrator_types) + '.\\n\\nYou have: ' + integrator_type + '.\\n\\n')\n\n @collocation_type.setter\n def collocation_type(self, collocation_type):\n collocation_types = ('GAUSS_RADAU_IIA', 'GAUSS_LEGENDRE')\n if collocation_type in collocation_types:\n self.__collocation_type = collocation_type\n else:\n raise Exception('Invalid collocation_type value. Possible values are:\\n\\n' \\\n + ',\\n'.join(collocation_types) + '.\\n\\nYou have: ' + collocation_type + '.\\n\\n')\n\n @T.setter\n def T(self, T):\n self.__Tsim = T\n\n @num_stages.setter\n def num_stages(self, num_stages):\n if isinstance(num_stages, int):\n self.__sim_method_num_stages = num_stages\n else:\n raise Exception('Invalid num_stages value. num_stages must be an integer.')\n\n @num_steps.setter\n def num_steps(self, num_steps):\n if isinstance(num_steps, int):\n self.__sim_method_num_steps = num_steps\n else:\n raise Exception('Invalid num_steps value. num_steps must be an integer.')\n\n @newton_iter.setter\n def newton_iter(self, newton_iter):\n if isinstance(newton_iter, int):\n self.__sim_method_newton_iter = newton_iter\n else:\n raise Exception('Invalid newton_iter value. newton_iter must be an integer.')\n\n @sens_forw.setter\n def sens_forw(self, sens_forw):\n if sens_forw in (True, False):\n self.__sens_forw = sens_forw\n else:\n raise Exception('Invalid sens_forw value. sens_forw must be a Boolean.')\n\n @sens_adj.setter\n def sens_adj(self, sens_adj):\n if sens_adj in (True, False):\n self.__sens_adj = sens_adj\n else:\n raise Exception('Invalid sens_adj value. sens_adj must be a Boolean.')\n\n @sens_hess.setter\n def sens_hess(self, sens_hess):\n if sens_hess in (True, False):\n self.__sens_hess = sens_hess\n else:\n raise Exception('Invalid sens_hess value. sens_hess must be a Boolean.')\n\n @sens_algebraic.setter\n def sens_algebraic(self, sens_algebraic):\n if sens_algebraic in (True, False):\n self.__sens_algebraic = sens_algebraic\n else:\n raise Exception('Invalid sens_algebraic value. sens_algebraic must be a Boolean.')\n\n @output_z.setter\n def output_z(self, output_z):\n if output_z in (True, False):\n self.__output_z = output_z\n else:\n raise Exception('Invalid output_z value. output_z must be a Boolean.')\n\n @sim_method_jac_reuse.setter\n def sim_method_jac_reuse(self, sim_method_jac_reuse):\n if sim_method_jac_reuse in (0, 1):\n self.__sim_method_jac_reuse = sim_method_jac_reuse\n else:\n raise Exception('Invalid sim_method_jac_reuse value. sim_method_jac_reuse must be 0 or 1.')\n\nclass AcadosSim:\n \"\"\"\n The class has the following properties that can be modified to formulate a specific simulation problem, see below:\n\n :param acados_path: string with the path to acados. It is used to generate the include and lib paths.\n\n - :py:attr:`dims` of type :py:class:`acados_template.acados_ocp.AcadosSimDims` - are automatically detected from model\n - :py:attr:`model` of type :py:class:`acados_template.acados_model.AcadosModel`\n - :py:attr:`solver_options` of type :py:class:`acados_template.acados_sim.AcadosSimOpts`\n\n - :py:attr:`acados_include_path` (set automatically)\n - :py:attr:`shared_lib_ext` (set automatically)\n - :py:attr:`acados_lib_path` (set automatically)\n - :py:attr:`parameter_values` - used to initialize the parameters (can be changed)\n\n \"\"\"\n def __init__(self, acados_path=''):\n if acados_path == '':\n acados_path = get_acados_path()\n self.dims = AcadosSimDims()\n \"\"\"Dimension definitions, automatically detected from :py:attr:`model`. Type :py:class:`acados_template.acados_sim.AcadosSimDims`\"\"\"\n self.model = AcadosModel()\n \"\"\"Model definitions, type :py:class:`acados_template.acados_model.AcadosModel`\"\"\"\n self.solver_options = AcadosSimOpts()\n \"\"\"Solver Options, type :py:class:`acados_template.acados_sim.AcadosSimOpts`\"\"\"\n\n self.acados_include_path = os.path.join(acados_path, 'include').replace(os.sep, '/') # the replace part is important on Windows for CMake\n \"\"\"Path to acados include directory (set automatically), type: `string`\"\"\"\n self.acados_lib_path = os.path.join(acados_path, 'lib').replace(os.sep, '/') # the replace part is important on Windows for CMake\n \"\"\"Path to where acados library is located (set automatically), type: `string`\"\"\"\n\n self.code_export_directory = 'c_generated_code'\n \"\"\"Path to where code will be exported. Default: `c_generated_code`.\"\"\"\n self.shared_lib_ext = get_lib_ext()\n\n self.cython_include_dirs = ''\n self.__parameter_values = np.array([])\n\n @property\n def parameter_values(self):\n \"\"\":math:`p` - initial values for parameter - can be updated\"\"\"\n return self.__parameter_values\n\n @parameter_values.setter\n def parameter_values(self, parameter_values):\n if isinstance(parameter_values, np.ndarray):\n self.__parameter_values = parameter_values\n else:\n raise Exception('Invalid parameter_values value. ' +\n f'Expected numpy array, got {type(parameter_values)}.')\n\n def set(self, attr, value):\n # tokenize string\n tokens = attr.split('_', 1)\n if len(tokens) > 1:\n setter_to_call = getattr(getattr(self, tokens[0]), 'set')\n else:\n setter_to_call = getattr(self, 'set')\n\n setter_to_call(tokens[1], value)\n\n return\n", "# -*- coding: future_fstrings -*-\n#\n# Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,\n# Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,\n# Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,\n# Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl\n#\n# This file is part of acados.\n#\n# The 2-Clause BSD License\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.;\n#\n\nimport sys\nsys.path.insert(0, '../common')\n\nfrom pendulum_model import export_pendulum_ode_model\nfrom export_mhe_ode_model import export_mhe_ode_model\n\nfrom export_ocp_solver import export_ocp_solver\nfrom export_mhe_solver import export_mhe_solver\n\nimport numpy as np\nfrom scipy.linalg import block_diag\n\nfrom utils import plot_pendulum\n\n# general\n\nTf = 1.0\nN = 20\nh = Tf/N\nFmax = 80\n\n# ocp model and solver\nmodel = export_pendulum_ode_model()\n\nnx = model.x.size()[0]\nnu = model.u.size()[0]\n\nQ_ocp = np.diag([1e3, 1e3, 1e-2, 1e-2])\nR_ocp = 1e-2 *np.eye(1)\n\nacados_solver_ocp = export_ocp_solver(model, N, h, Q_ocp, R_ocp, Fmax)\n\n# mhe model and solver\nmodel_mhe = export_mhe_ode_model()\n\nnx = model_mhe.x.size()[0]\nnw = model_mhe.u.size()[0]\nny = nx\n\nQ0_mhe = 100*np.eye((nx))\nQ_mhe = 0.1*np.eye(nx)\nR_mhe = 0.1*np.eye(nx)\n# Q_mhe = np.zeros((nx, nx))\n# Q0_mhe = np.diag([0.01, 1, 1, 1])\n# R_mhe = np.diag([0.1, 10, 10, 10])\nacados_solver_mhe = export_mhe_solver(model_mhe, N, h, Q_mhe, Q0_mhe, R_mhe)\n\n# simulation\nv_stds = [0.1, 0.01, 0.01, 0.01]\nv_stds = [0, 0, 0, 0]\n\nsimX = np.ndarray((N+1, nx))\nsimU = np.ndarray((N, nu))\nsimY = np.ndarray((N+1, nx))\n\nsimXest = np.zeros((N+1, nx))\nsimWest = np.zeros((N, nx))\n\n# arrival cost mean\nx0_bar = np.array([0.0, np.pi, 0.0, 0.0])\n\n# solve ocp problem\nstatus = acados_solver_ocp.solve()\n\nif status != 0:\n raise Exception(f'acados returned status {status}.')\n\n# get solution\nfor i in range(N):\n simX[i,:] = acados_solver_ocp.get(i, \"x\")\n simU[i,:] = acados_solver_ocp.get(i, \"u\")\n simY[i,:] = simX[i,:] + np.transpose(np.diag(v_stds) @ np.random.standard_normal((nx, 1)))\n\nsimX[N,:] = acados_solver_ocp.get(N, \"x\")\nsimY[N,:] = simX[N,:] + np.transpose(np.diag(v_stds) @ np.random.standard_normal((nx, 1)))\n\n# set measurements and controls\nyref_0 = np.zeros((3*nx, ))\nyref_0[:nx] = simY[0, :]\nyref_0[2*nx:] = x0_bar\nacados_solver_mhe.set(0, \"yref\", yref_0)\nacados_solver_mhe.set(0, \"p\", simU[0,:])\n#acados_solver_mhe.set(0, \"x\", simX[0,:])\n\nyref = np.zeros((2*nx, ))\nfor j in range(1,N):\n yref[:nx] = simY[j, :]\n acados_solver_mhe.set(j, \"yref\", yref)\n acados_solver_mhe.set(j, \"p\", simU[j,:])\n # acados_solver_mhe.set(j, \"x\", simX[j,:])\n\n\n# solve mhe problem\nstatus = acados_solver_mhe.solve()\n\nif status != 0 and status != 2:\n raise Exception(f'acados returned status {status}.')\n\n# get solution\nfor i in range(N):\n simXest[i,:] = acados_solver_mhe.get(i, \"x\")\n simWest[i,:] = acados_solver_mhe.get(i, \"u\")\n\nsimXest[N, :] = acados_solver_mhe.get(N, \"x\")\n\nprint('difference |x0_est - x0_bar|', np.linalg.norm(x0_bar - simXest[0, :]))\nprint('difference |x_est - x_true|', np.linalg.norm(simXest - simX))\n\nplot_pendulum(np.linspace(0, Tf, N+1), Fmax, simU, simX, simXest, simY, latexify=False)\n" ]
[ [ "numpy.array" ], [ "numpy.diag", "numpy.linspace", "numpy.random.standard_normal", "numpy.eye", "numpy.ndarray", "numpy.linalg.norm", "numpy.array", "numpy.zeros" ] ]
dmytrov/gaussianprocess
[ "7044bd2d66f44e10656fee17e94fdee0c24c70bb" ]
[ "code/utils/numerical/ssyrk.py" ]
[ "import os\nimport sys\nimport time\nimport numpy\nfrom numpy import zeros\nfrom numpy.random import randn\nfrom scipy.linalg import blas\n\n\ndef run_ssyrk(N, l):\n\n A = randn(N, N).astype('float32', order='F')\n C = zeros((N, N), dtype='float32', order='F')\n\n start = time.time()\n for i in range(0, l):\n blas.ssyrk(1.0, A, c=C, overwrite_c=True)\n end = time.time()\n\n timediff = (end - start)\n mflops = (N * N * N) * l / timediff\n mflops *= 1e-6\n\n size = \"%dx%d\" % (N, N)\n print(\"%14s :\\t%20f MFlops\\t%20f sec\" % (size, mflops, timediff))\n\n\nif __name__ == \"__main__\":\n N = 128\n NMAX = 2048\n NINC = 128\n LOOPS = 1\n\n z = 0\n for arg in sys.argv:\n if z == 1:\n N = int(arg)\n elif z == 2:\n NMAX = int(arg)\n elif z == 3:\n NINC = int(arg)\n elif z == 4:\n LOOPS = int(arg)\n\n z = z + 1\n\n if 'OPENBLAS_LOOPS' in os.environ:\n p = os.environ['OPENBLAS_LOOPS']\n if p:\n LOOPS = int(p)\n\n print(\"From: %d To: %d Step=%d Loops=%d\" % (N, NMAX, NINC, LOOPS))\n print(\"\\tSIZE\\t\\t\\tFlops\\t\\t\\t\\t\\tTime\")\n\n for i in range(N, NMAX + NINC, NINC):\n run_ssyrk(i, LOOPS)\n\n " ]
[ [ "scipy.linalg.blas.ssyrk", "numpy.zeros", "numpy.random.randn" ] ]
nadaved1/hailo_model_zoo
[ "42b716f337dde4ec602022a34d6a07a1bbd45539", "42b716f337dde4ec602022a34d6a07a1bbd45539" ]
[ "hailo_model_zoo/utils/data.py", "hailo_model_zoo/core/postprocessing/detection/efficientdet.py" ]
[ "from builtins import object\nimport os\nimport cv2\nimport numpy as np\nimport tensorflow as tf\n\nfrom hailo_model_zoo.core.datasets import dataset_factory\nfrom hailo_model_zoo.utils.video_utils import VideoCapture\n\n\ndef _open_image_file(img_path):\n image = tf.io.read_file(img_path)\n image = tf.cast(tf.image.decode_jpeg(image, channels=3), tf.uint8)\n image_name = tf.compat.v1.string_split([img_path], os.path.sep).values[-1]\n return image, {\n 'img_orig': image,\n 'image_name': image_name,\n }\n\n\ndef _read_npz(item):\n img_name = item.decode()\n data = np.load(img_name, allow_pickle=True)\n base_name = os.path.basename(img_name).replace('.npz', '')\n data = {key: data[key].item() for key in data}\n image_info = data[base_name]['image_info']\n rpn_boxes = image_info['rpn_proposals']\n num_rpn_boxes = image_info['num_rpn_proposals']\n return data[base_name]['logits'], rpn_boxes, num_rpn_boxes, image_info['image_name'], \\\n image_info['image_id']\n\n\ndef _open_featuremap(img_path):\n featuremap, rpn_boxes, num_rpn_boxes, \\\n image_name, image_id = tf.compat.v1.py_func(_read_npz, [img_path], [tf.float32, tf.float32,\n tf.int64, tf.string, tf.int32])\n return featuremap, {\"rpn_proposals\": rpn_boxes,\n \"num_rpn_boxes\": num_rpn_boxes,\n \"image_name\": image_name,\n \"image_id\": image_id}\n\n\ndef _parse_video_frame(image, name):\n image = tf.cast(image, tf.uint8)\n return image, {'label_index': tf.cast(0, tf.float32),\n 'img_orig': image,\n 'image_name': name,\n 'is_same': tf.cast(0, tf.float32),\n 'mask': tf.image.rgb_to_grayscale(image)}\n\n\ndef _video_generator(video_path):\n def _video_generator_implementation():\n filename = os.path.basename(video_path)\n base, _ = os.path.splitext(filename)\n with VideoCapture(video_path) as cap:\n total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n required_digits = len(str(total_frames))\n number_format = '{{:0{}d}}'.format(required_digits)\n name_format = '{}_frame_' + number_format + '.png'\n frame_count = 0\n success = True\n while success:\n success, image = cap.read()\n if success:\n image_name = name_format.format(base, frame_count)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n yield image, image_name\n frame_count += 1\n return _video_generator_implementation\n\n\nclass DataFeed(object):\n \"\"\"DataFeed class. Use this class to handle input data\"\"\"\n\n def __init__(self, preprocessing_callback, batch_size=8):\n self._preproc_callback = preprocessing_callback\n self._batch_size = batch_size\n\n @property\n def iterator(self):\n return tf.compat.v1.data.make_initializable_iterator(self._dataset)\n\n\nclass TFRecordFeed(DataFeed):\n def __init__(self, preprocessing_callback, batch_size, tfrecord_file, dataset_name):\n super().__init__(preprocessing_callback, batch_size=batch_size)\n parse_func = dataset_factory.get_dataset_parse_func(dataset_name)\n dataset = tf.data.TFRecordDataset([str(tfrecord_file)]).map(parse_func)\n if self._preproc_callback:\n dataset = dataset.map(self._preproc_callback)\n self._dataset = dataset.batch(self._batch_size)\n\n\ndef _dataset_from_folder(folder_path):\n all_files = []\n for root, dirs, files in os.walk(folder_path, topdown=False):\n for name in files:\n if os.path.splitext(name)[-1].lower() in ['.jpg', '.jpeg', '.png', '.npz']:\n all_files.append(os.path.join(root, name))\n all_files.sort()\n all_files = tf.convert_to_tensor(all_files, dtype=tf.string)\n dataset = tf.data.Dataset.from_tensor_slices(all_files)\n return dataset\n\n\nclass ImageFeed(DataFeed):\n def __init__(self, preprocessing_callback, batch_size, folder_path):\n super().__init__(preprocessing_callback, batch_size)\n\n dataset = _dataset_from_folder(folder_path).map(_open_image_file)\n if self._preproc_callback:\n dataset = dataset.map(self._preproc_callback)\n self._dataset = dataset.batch(self._batch_size)\n\n\nclass RegionProposalFeed(DataFeed):\n def __init__(self, preprocessing_callback, batch_size, folder_path):\n super().__init__(preprocessing_callback, batch_size)\n\n dataset = _dataset_from_folder(folder_path).map(_open_featuremap)\n if self._preproc_callback:\n dataset = dataset.map(self._preproc_callback)\n dataset = dataset.apply(tf.data.experimental.unbatch())\n self._dataset = dataset.batch(self._batch_size)\n\n\nclass VideoFeed(DataFeed):\n def __init__(self, preprocessing_callback, batch_size, file_path):\n super().__init__(preprocessing_callback, batch_size=batch_size)\n\n dataset = tf.data.Dataset.from_generator(_video_generator(file_path), (tf.float32, tf.string))\n dataset = dataset.map(_parse_video_frame)\n if self._preproc_callback:\n dataset = dataset.map(self._preproc_callback)\n self._dataset = dataset.batch(self._batch_size)\n", "import tensorflow as tf\nimport numpy as np\n\nfrom .ssd import collect_box_class_predictions\nfrom tensorflow.image import combined_non_max_suppression\n\n\nclass EfficientDetPostProc(object):\n\n def __init__(self, img_dims, nms_iou_thresh, score_threshold, anchors,\n classes, labels_offset, max_detections=100, **kwargs):\n self._image_dims = img_dims\n self._nms_iou_thresh = nms_iou_thresh\n self._score_threshold = score_threshold\n self._num_classes = classes\n self._max_detections = max_detections\n self._label_offset = labels_offset\n if anchors is None:\n raise ValueError('Missing detection anchors metadata')\n self._anchors_input = tf.reshape(tf.compat.v1.py_func(self.anchors_for_shape,\n [img_dims, anchors[\"aspect_ratios\"],\n anchors[\"scales\"],\n anchors[\"sizes\"], anchors[\"strides\"]],\n ['float32'])[0], (1, -1, 4))\n\n def bbox_transform_inv(self, deltas):\n cxa = (self._anchors_input[..., 0] + self._anchors_input[..., 2]) / 2\n cya = (self._anchors_input[..., 1] + self._anchors_input[..., 3]) / 2\n wa = self._anchors_input[..., 2] - self._anchors_input[..., 0]\n ha = self._anchors_input[..., 3] - self._anchors_input[..., 1]\n ty, tx, th, tw = deltas[..., 0], deltas[..., 1], deltas[..., 2], deltas[..., 3]\n w = tf.math.exp(tw) * wa\n h = tf.math.exp(th) * ha\n cy = ty * ha + cya\n cx = tx * wa + cxa\n ymin = cy - h / 2.\n xmin = cx - w / 2.\n ymax = cy + h / 2.\n xmax = cx + w / 2.\n return tf.stack([xmin, ymin, xmax, ymax], axis=-1)\n\n def clip_boxes(self, boxes):\n height, width = self._image_dims\n x1 = tf.clip_by_value(boxes[:, :, 0], 0, width - 1) / width\n y1 = tf.clip_by_value(boxes[:, :, 1], 0, height - 1) / height\n x2 = tf.clip_by_value(boxes[:, :, 2], 0, width - 1) / width\n y2 = tf.clip_by_value(boxes[:, :, 3], 0, height - 1) / height\n return tf.stack([y1, x1, y2, x2], axis=2)\n\n def postprocessing(self, endnodes, **kwargs):\n with tf.compat.v1.name_scope('Postprocessor'):\n regression, classification = collect_box_class_predictions(endnodes, self._num_classes)\n classification = tf.sigmoid(classification)\n boxes = self.bbox_transform_inv(regression)\n boxes = self.clip_boxes(boxes)\n (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) = \\\n combined_non_max_suppression(boxes=tf.expand_dims(boxes, axis=[2]),\n scores=classification,\n score_threshold=self._score_threshold,\n iou_threshold=self._nms_iou_thresh,\n max_output_size_per_class=self._max_detections,\n max_total_size=self._max_detections)\n nmsed_classes = tf.cast(tf.add(nmsed_classes, self._label_offset), tf.int16)\n return {'detection_boxes': nmsed_boxes,\n 'detection_scores': nmsed_scores,\n 'detection_classes': nmsed_classes,\n 'num_detections': num_detections}\n\n def shift(self, feature_map_shape, stride, anchors):\n # create a grid starting from half stride from the top left corner\n shift_x = (np.arange(0, feature_map_shape[1]) + 0.5) * stride\n shift_y = (np.arange(0, feature_map_shape[0]) + 0.5) * stride\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n shifts = np.vstack((\n shift_x.ravel(), shift_y.ravel(),\n shift_x.ravel(), shift_y.ravel()\n )).transpose()\n A = anchors.shape[0]\n K = shifts.shape[0]\n all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))\n all_anchors = np.array(all_anchors.reshape((K * A, 4)), np.float32)\n return all_anchors\n\n def generate_anchors(self, base_size, aspect_ratio, scales):\n num_anchors = len(aspect_ratio) * len(scales)\n\n # initialize output anchors\n anchors = np.zeros((num_anchors, 4))\n anchors[:, 2:] = base_size * np.tile(np.repeat(scales, len(aspect_ratio))[None], (2, 1)).T\n areas = anchors[:, 2] * anchors[:, 3]\n\n # correct for ratios\n anchors[:, 2] = np.sqrt(areas / np.tile(aspect_ratio, len(scales)))\n anchors[:, 3] = anchors[:, 2] * np.tile(aspect_ratio, len(scales))\n anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T\n anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T\n\n return anchors\n\n def anchors_for_shape(self, image_dims, aspect_ratio, scales, sizes, strides):\n pyramid_levels = [3, 4, 5, 6, 7]\n feature_map_shapes = [(image_dims + 2 ** x - 1) // (2 ** x) for x in pyramid_levels]\n all_anchors = np.zeros((0, 4), dtype=np.float32)\n for idx, p in enumerate(pyramid_levels):\n anchors = self.generate_anchors(sizes[idx], aspect_ratio, scales)\n shifted_anchors = self.shift(feature_map_shapes[idx], strides[idx], anchors)\n all_anchors = np.append(all_anchors, shifted_anchors, axis=0)\n return all_anchors\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.data.experimental.unbatch", "tensorflow.compat.v1.string_split", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.cast", "tensorflow.compat.v1.py_func", "tensorflow.compat.v1.data.make_initializable_iterator", "tensorflow.image.rgb_to_grayscale", "tensorflow.io.read_file", "numpy.load", "tensorflow.image.decode_jpeg" ], [ "tensorflow.clip_by_value", "tensorflow.stack", "numpy.arange", "tensorflow.sigmoid", "numpy.tile", "tensorflow.expand_dims", "tensorflow.math.exp", "tensorflow.compat.v1.py_func", "numpy.append", "tensorflow.add", "numpy.meshgrid", "numpy.zeros", "tensorflow.compat.v1.name_scope" ] ]
xenron/coco
[ "e318d534127b769612716c05d40e3d5b090eb5a3" ]
[ "package/ml/regression/logistic.py" ]
[ "from sklearn import linear_model\nfrom ml.regression.base import Regression\n\n\nclass LogisticRegression(Regression):\n\n def __init__(self):\n Regression.__init__(self)\n self._name = \"Logistic\"\n self._model = linear_model.LogisticRegression(C=1e5)\n\n def predict_proba(self, data):\n return self._model.predict_proba(data)\n" ]
[ [ "sklearn.linear_model.LogisticRegression" ] ]
POSTECH-CVLab/Geometric-Primitives
[ "e4b16d8930f4a9d1c906d06255988d02f54a6deb" ]
[ "geometric_primitives/rules/rules_mixed_22_12.py" ]
[ "import numpy as np\n\n\nPROBS_CONTACTS = np.array([4.0, 2.0, 4.0, 2.0])\nPROBS_CONTACTS /= np.sum(PROBS_CONTACTS)\n\nRULE_CONTACTS = [\n # [0.5, 1.0] -> 4\n {\n 'num_contacts': 1,\n 'translations': [[0.5, 1.0], [-0.5, 1.0], [0.5, -1.0], [-0.5, -1.0]],\n 'direction': 0\n },\n # [0.5, 0.0] -> 2\n {\n 'num_contacts': 2,\n 'translations': [[0.5, 0.0], [-0.5, 0.0]],\n 'direction': 0\n },\n # [1.0, 0.5] -> 4\n {\n 'num_contacts': 1,\n 'translations': [[1.0, 0.5], [-1.0, 0.5], [1.0, -0.5], [-1.0, -0.5]],\n 'direction': 1\n },\n # [0.0, 0.5] -> 2\n {\n 'num_contacts': 2,\n 'translations': [[0.0, 0.5], [0.0, -0.5]],\n 'direction': 1\n },\n]\n\nLIST_RULES = []\nind = 1\nfor rule in RULE_CONTACTS:\n cur_direction = rule['direction']\n cur_num_contacts = rule['num_contacts']\n\n for translation in rule['translations']:\n cur_rule = [ind, [cur_direction, translation, cur_num_contacts]]\n LIST_RULES.append(cur_rule)\n\n ind += 1\n" ]
[ [ "numpy.array", "numpy.sum" ] ]
wakky927/Computational-Engineering-B
[ "3720d96668a32dc73f38ed0bc8afe4705452de9e" ]
[ "src/lecture9/solver.py" ]
[ "import numpy as np\n\nimport condition\n\n\ndef solve_matrix(p, Ap, Ae, Aw, An, As, bb, m, n):\n md = 101\n nd = 101\n\n p_old = np.zeros((md, nd))\n\n ''' SOR algorithm '''\n iter_max = 300 # SOR max iteration steps\n relax_factor = 1.8 # SOR relaxation factor\n\n for iter_i in range(1, iter_max):\n error = 0.0\n\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n p_old[i][j] = p[i][j]\n\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n p[i][j] = (bb[i][j] - Ae[i][j] * p_old[i + 1][j] - Aw[i][j]\n * p[i - 1][j] - An[i][j] * p_old[i][j + 1]\n - As[i][j] * p[i][j - 1]) / Ap[i][j]\\\n * relax_factor + p_old[i][j] * (1 - relax_factor)\n a = np.abs(p[i][j] - p_old[i][j])\n e = max(error, a)\n error = e\n\n # print(f\"iteration no. {iter_i} -- error = {error}\")\n\n return\n\n\ndef solve_p(p, u, v, u_old, v_old, nue, density, dx, dy, dt, m, n):\n md = 101\n nd = 101\n\n Ap = np.zeros((md, nd))\n Ae = np.zeros((md, nd))\n Aw = np.zeros((md, nd))\n An = np.zeros((md, nd))\n As = np.zeros((md, nd))\n bb = np.zeros((md, nd))\n\n u_stg = 0.0\n v_stg = 0.0\n\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n ''' velocity u '''\n # convection_x (1st upwind scheme)\n u[i][j] = u_old[i][j]\\\n - dt * max(u_old[i][j], 0.0)\\\n * (u_old[i][j] - u_old[i - 1][j]) / dx\\\n - dt * min(u_old[i][j], 0.0)\\\n * (u_old[i + 1][j] - u_old[i][j]) / dx\n\n # convection_y\n # v_stg = 0.25 * (v_old[i][j] + v_old[i + 1][j] + v_old[i][j - 1]\n # + v_old[i + 1][j - 1]) # Staggered grid\n u[i][j] = u[i][j]\\\n - dt * max(v_old[i][j], 0.0)\\\n * (u_old[i][j] - u_old[i][j - 1]) / dy\\\n - dt * min(v_old[i][j], 0.0)\\\n * (u_old[i][j + 1] - u_old[i][j]) / dy\n\n # diffusion_x\n u[i][j] = u[i][j]\\\n + dt * nue * (u_old[i + 1][j] - 2 * u_old[i][j]\n + u_old[i - 1][j]) / dx**2\n\n # diffusion_y\n u[i][j] = u[i][j] \\\n + dt * nue * (u_old[i][j + 1] - 2 * u_old[i][j]\n + u_old[i][j - 1]) / dy**2\n\n ''' velocity v '''\n # convection_x (1st upwind scheme)\n # u_stg = 0.25 * (u_old[i][j] + u_old[i - 1][j] + u_old[i][j + 1]\n # + u_old[i - 1][j + 1]) # Staggered grid\n v[i][j] = v_old[i][j] \\\n - dt * max(u_old[i][j], 0.0) \\\n * (v_old[i][j] - v_old[i - 1][j]) / dx \\\n - dt * min(u_old[i][j], 0.0) \\\n * (v_old[i + 1][j] - v_old[i][j]) / dx\n\n # convection_y\n v[i][j] = v[i][j] \\\n - dt * max(v_old[i][j], 0.0) \\\n * (v_old[i][j] - v_old[i][j - 1]) / dy \\\n - dt * min(v_old[i][j], 0.0) \\\n * (v_old[i][j + 1] - v_old[i][j]) / dy\n\n # diffusion_x\n v[i][j] = v[i][j] \\\n + dt * nue * (v_old[i + 1][j] - 2 * v_old[i][j]\n + v_old[i - 1][j]) / dx**2\n\n # diffusion_y\n v[i][j] = v[i][j] \\\n + dt * nue * (v_old[i][j + 1] - 2 * v_old[i][j]\n + v_old[i][j - 1]) / dy**2\n\n ''' matrix solution '''\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n Ae[i][j] = dt / density / dx**2\n Aw[i][j] = dt / density / dx**2\n An[i][j] = dt / density / dy**2\n As[i][j] = dt / density / dy**2\n Ap[i][j] = - Ae[i][j] - Aw[i][j] - An[i][j] - As[i][j]\n\n # bb[i][j] = (u[i][j] - u[i - 1][j]) / dx\\\n # + (v[i][j] - v[i][j - 1]) / dy\n bb[i][j] = (u[i + 1][j] - u[i - 1][j]) / dx / 2 \\\n + (v[i][j + 1] - v[i][j - 1]) / dy / 2\n\n condition.matrix_c(p, Ap, Ae, Aw, An, As, bb, m, n)\n solve_matrix(p, Ap, Ae, Aw, An, As, bb, m, n)\n\n return\n\n\ndef solve_u(p, u, density, dx, dt, m, n):\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n # convection_x (1st upwind scheme) -> already calculated in solve_p\n # convection_y -> already calculated in solve_p\n # diffusion_x -> already calculated in solve_p\n # diffusion_y -> already calculated in solve_p\n # pressure\n # u[i][j] = u[i][j]\\\n # - dt / density * (p[i + 1][j] - p[i][j]) / dx\n u[i][j] = u[i][j] \\\n - dt / density * (p[i + 1][j] - p[i - 1][j]) / dx / 2\n\n\ndef solve_v(p, v, density, dy, dt, m, n):\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n # convection_x (1st upwind scheme) -> already calculated in solve_p\n # convection_y -> already calculated in solve_p\n # diffusion_x -> already calculated in solve_p\n # diffusion_y -> already calculated in solve_p\n # pressure\n # v[i][j] = v[i][j] \\\n # - dt / density * (p[i][j + 1] - p[i][j]) / dy\n v[i][j] = v[i][j] \\\n - dt / density * (p[i][j + 1] - p[i][j - 1]) / dy / 2\n" ]
[ [ "numpy.zeros", "numpy.abs" ] ]
tigerwlin/vel
[ "00e4fbb7b612e888e2cbb5d8455146664638cd0b", "00e4fbb7b612e888e2cbb5d8455146664638cd0b", "00e4fbb7b612e888e2cbb5d8455146664638cd0b", "00e4fbb7b612e888e2cbb5d8455146664638cd0b", "00e4fbb7b612e888e2cbb5d8455146664638cd0b", "00e4fbb7b612e888e2cbb5d8455146664638cd0b" ]
[ "vel/openai/baselines/common/vec_env/dummy_vec_env.py", "vel/rl/models/backbone/nature_cnn_two_tower.py", "vel/models/rnn/multilayer_sequence_classification_gru.py", "vel/rl/algo/policy_gradient/trpo.py", "vel/rl/modules/deterministic_action_head.py", "vel/api/base/source.py" ]
[ "import numpy as np\n# from gym import spaces\nfrom bc_gym_planning_env.envs.base import spaces\nfrom collections import OrderedDict\nfrom . import VecEnv\n\nclass DummyVecEnv(VecEnv):\n def __init__(self, env_fns):\n self.envs = [fn() for fn in env_fns]\n env = self.envs[0]\n VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)\n shapes, dtypes = {}, {}\n self.keys = []\n obs_space = env.observation_space\n\n if isinstance(obs_space, spaces.Dict):\n assert isinstance(obs_space.spaces, OrderedDict)\n subspaces = obs_space.spaces\n else:\n subspaces = {None: obs_space}\n\n for key, box in subspaces.items():\n shapes[key] = box.shape\n dtypes[key] = box.dtype\n self.keys.append(key)\n\n self.buf_obs = { k: np.zeros((self.num_envs,) + tuple(shapes[k]), dtype=dtypes[k]) for k in self.keys }\n self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)\n self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)\n self.buf_infos = [{} for _ in range(self.num_envs)]\n self.actions = None\n\n def step_async(self, actions):\n self.actions = actions\n\n def step_wait(self):\n for e in range(self.num_envs):\n obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e] = self.envs[e].step(self.actions)\n if self.buf_dones[e]:\n obs = self.envs[e].reset()\n self._save_obs(e, obs)\n return (self._obs_from_buf(), np.copy(self.buf_rews), np.copy(self.buf_dones),\n self.buf_infos.copy())\n\n def reset(self):\n for e in range(self.num_envs):\n obs = self.envs[e].reset()\n self._save_obs(e, obs)\n return self._obs_from_buf()\n\n def close(self):\n return\n\n def render(self, mode='human'):\n return [e.render(mode=mode) for e in self.envs]\n\n def _save_obs(self, e, obs):\n for k in self.keys:\n if k is None:\n self.buf_obs[k][e] = obs\n else:\n self.buf_obs[k][e] = obs[k]\n\n def _obs_from_buf(self):\n if self.keys==[None]:\n return self.buf_obs[None]\n else:\n return self.buf_obs\n", "\"\"\"\nCode based loosely on implementation:\nhttps://github.com/openai/baselines/blob/master/baselines/ppo2/policies.py\n\nUnder MIT license.\n\"\"\"\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\n\nimport vel.util.network as net_util\nfrom vel.rl.models.backbone.CoordConv import CoordConv\n\nfrom vel.api.base import LinearBackboneModel, ModelFactory\n\n\nclass NatureCnnTwoTower(LinearBackboneModel):\n \"\"\" Neural network as defined in the paper 'Human-level control through deep reinforcement learning' \"\"\"\n def __init__(self, input_width, input_height, input_channels, output_dim=512):\n super().__init__()\n\n self._output_dim = output_dim\n\n # self.conv1 = nn.Conv2d(\n # in_channels=input_channels,\n # out_channels=32,\n # kernel_size=(8, 8),\n # stride=2\n # )\n self.conv1 = CoordConv(x_dim=133, y_dim=133, with_r=False,\n in_channels=input_channels+2,\n out_channels=32,\n kernel_size=(8, 8),\n stride=2\n )\n\n self.conv2 = nn.Conv2d(\n in_channels=32,\n out_channels=64,\n kernel_size=(4, 4),\n stride=2\n )\n\n self.conv3 = nn.Conv2d(\n in_channels=64,\n out_channels=64,\n kernel_size=(3, 3),\n stride=1\n )\n\n self.linear1 = nn.Linear(5, 1024)\n self.linear2 = nn.Linear(1024, 512)\n\n\n self.final_width = net_util.convolutional_layer_series(input_width, [\n (8, 0, 2),\n (4, 0, 2),\n (3, 0, 1)\n ])\n\n self.final_height = net_util.convolutional_layer_series(input_height, [\n (8, 0, 2),\n (4, 0, 2),\n (3, 0, 1)\n ])\n\n self.linear_layer1 = nn.Linear(\n self.final_width * self.final_height * 64*1 + 512, # 64 is the number of channels of the last conv layer\n 1024\n )\n self.linear_layer2 = nn.Linear(1024, self.output_dim)\n\n @property\n def output_dim(self) -> int:\n \"\"\" Final dimension of model output \"\"\"\n return self._output_dim\n\n def reset_weights(self):\n \"\"\" Call proper initializers for the weights \"\"\"\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n # init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n init.orthogonal_(m.weight, gain=np.sqrt(2))\n init.constant_(m.bias, 0.0)\n elif isinstance(m, nn.Linear):\n # init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n init.orthogonal_(m.weight, gain=np.sqrt(2))\n init.constant_(m.bias, 0.0)\n\n def forward(self, image):\n input1 = image['environment']\n input2 = image['goal'].float()\n result1 = input1.permute(0, 3, 1, 2).contiguous().type(torch.float) / 255.0\n result1 = F.relu(self.conv1(result1))\n result1 = F.relu(self.conv2(result1))\n result1 = F.relu(self.conv3(result1))\n\n result2 = input2.view(input2.size(0), -1)\n result2 = F.leaky_relu(self.linear1(result2))\n result2 = F.leaky_relu(self.linear2(result2))\n\n flattened1 = result1.view(result1.size(0), -1)\n flattened2 = result2.view(result2.size(0), -1)\n flattened = torch.cat((flattened1, flattened2), 1)\n\n result = F.leaky_relu(self.linear_layer1(flattened))\n result = F.leaky_relu(self.linear_layer2(result))\n return result\n\n\ndef create(input_width, input_height, input_channels=1, output_dim=512):\n def instantiate(**_):\n return NatureCnnTwoTower(\n input_width=input_width, input_height=input_height, input_channels=input_channels,\n output_dim=output_dim\n )\n\n return ModelFactory.generic(instantiate)\n\n\n# Add this to make nicer scripting interface\nNatureCnnTwoTowerFactory = create\n\n", "import typing\n\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\n\nfrom vel.api.base import SupervisedModel, ModelFactory, LinearBackboneModel\nfrom vel.metrics.accuracy import Accuracy\nfrom vel.metrics.loss_metric import Loss\n\n\nclass MultilayerSequenceClassificationGRU(SupervisedModel):\n \"\"\" Multilayer GRU network for sequence modeling (n:1) \"\"\"\n\n def __init__(self, input_block: LinearBackboneModel, output_dim: int,\n rnn_layers: typing.List[int], rnn_dropout: float=0.0, bidirectional: bool=False,\n linear_layers: typing.List[int]=None, linear_dropout: float=0.0):\n super().__init__()\n\n self.output_dim = output_dim\n\n self.rnn_layers_sizes = rnn_layers\n self.rnn_dropout = rnn_dropout\n self.linear_layers_sizes = linear_layers\n self.linear_dropout = linear_dropout\n\n self.bidirectional = bidirectional\n self.input_block = input_block\n\n current_dim = self.input_block.output_dim\n\n self.rnn_layers = []\n self.rnn_dropout_layers = []\n\n bidirectional_multiplier = 1\n\n for idx, current_layer in enumerate(rnn_layers, 1):\n gru = nn.GRU(\n input_size=current_dim * bidirectional_multiplier,\n hidden_size=current_layer,\n bidirectional=bidirectional,\n batch_first=True,\n )\n\n self.add_module('gru{:02}'.format(idx), gru)\n self.rnn_layers.append(gru)\n\n if self.rnn_dropout > 0.0:\n dropout_layer = nn.Dropout(p=self.rnn_dropout)\n\n self.add_module('rnn_dropout{:02}'.format(idx), dropout_layer)\n self.rnn_dropout_layers.append(dropout_layer)\n\n current_dim = current_layer\n\n if self.bidirectional:\n bidirectional_multiplier = 2\n else:\n bidirectional_multiplier = 1\n\n self.linear_layers = []\n self.linear_dropout_layers = []\n\n for idx, current_layer in enumerate(linear_layers, 1):\n linear_layer = nn.Linear(current_dim * bidirectional_multiplier, current_layer)\n\n self.add_module('linear{:02}'.format(idx), linear_layer)\n self.linear_layers.append(linear_layer)\n\n if self.linear_dropout > 0.0:\n dropout_layer = nn.Dropout(p=self.linear_dropout)\n\n self.add_module('linear_dropout{:02}'.format(idx), dropout_layer)\n self.linear_dropout_layers.append(dropout_layer)\n\n bidirectional_multiplier = 1\n current_dim = current_layer\n\n if self.bidirectional:\n self.output_layer = nn.Linear(bidirectional_multiplier * current_dim, output_dim)\n else:\n self.output_layer = nn.Linear(current_dim, output_dim)\n\n self.output_activation = nn.LogSoftmax(dim=1)\n\n def reset_weights(self):\n self.input_block.reset_weights()\n\n for layer in self.linear_layers:\n nn.init.kaiming_normal_(layer.weight, nonlinearity='relu')\n nn.init.zeros_(layer.bias)\n\n nn.init.kaiming_normal_(self.output_layer.weight, nonlinearity='relu')\n nn.init.zeros_(self.output_layer.bias)\n\n def forward(self, sequence):\n \"\"\" Forward propagate batch of sequences through the network, without accounting for the state \"\"\"\n data = self.input_block(sequence)\n\n for idx in range(len(self.rnn_layers)):\n data, _ = self.rnn_layers[idx](data)\n\n if self.rnn_dropout_layers:\n data = self.rnn_dropout_layers[idx](data)\n\n # We are interested only in the last element of the sequence\n if self.bidirectional:\n last_hidden_size = self.rnn_layers_sizes[-1]\n data = torch.cat([data[:, -1, :last_hidden_size], data[:, 0, last_hidden_size:]], dim=1)\n else:\n data = data[:, -1]\n\n for idx in range(len(self.linear_layers_sizes)):\n data = F.relu(self.linear_layers[idx](data))\n\n if self.linear_dropout_layers:\n data = self.linear_dropout_layers[idx](data)\n\n data = self.output_layer(data)\n\n return self.output_activation(data)\n\n # def forward_state(self, sequence, state=None):\n # \"\"\" Forward propagate a sequence through the network accounting for the state \"\"\"\n # if state is None:\n # state = self.initial_state(sequence.size(0))\n #\n # data = self.input_layer(sequence)\n #\n # state_outputs = []\n #\n # # for layer_length, layer in zip(self.hidden_layers, self.gru_layers):\n # for idx in range(len(self.gru_layers)):\n # layer_length = self.hidden_layers[idx]\n #\n # # Partition hidden state, for each layer we have layer_length of h state and layer_length of c state\n # current_state = state[:, :, :layer_length]\n # state = state[:, :, layer_length:]\n #\n # # Propagate through the GRU state\n # data, new_h = self.gru_layers[idx](data, current_state)\n #\n # if self.dropout_layers:\n # data = self.dropout_layers[idx](data)\n #\n # state_outputs.append(new_h)\n #\n # output_data = self.output_activation(self.output_layer(data))\n #\n # concatenated_hidden_output = torch.cat(state_outputs, dim=2)\n #\n # return output_data, concatenated_hidden_output\n\n def get_layer_groups(self):\n return [\n self.input_block,\n self.rnn_layers,\n self.linear_layers,\n self.output_layer\n ]\n\n def initial_state(self, batch_size):\n \"\"\" Initial state of the network \"\"\"\n return torch.zeros(batch_size, 1, sum(self.rnn_layers_sizes))\n\n def loss_value(self, x_data, y_true, y_pred):\n \"\"\" Calculate a value of loss function \"\"\"\n return F.nll_loss(y_pred, y_true)\n\n def metrics(self) -> list:\n \"\"\" Set of metrics for this model \"\"\"\n return [Loss(), Accuracy()]\n\n\ndef create(input_block: LinearBackboneModel, output_dim: int,\n rnn_layers: typing.List[int], rnn_dropout: float=0.0, bidirectional: bool=False,\n linear_layers: typing.List[int]=None, linear_dropout: float=0.0):\n \"\"\" Vel creation function \"\"\"\n if linear_layers is None:\n linear_layers = []\n\n def instantiate(**_):\n return MultilayerSequenceClassificationGRU(\n input_block=input_block, output_dim=output_dim,\n rnn_layers=rnn_layers, rnn_dropout=rnn_dropout, bidirectional=bidirectional,\n linear_layers=linear_layers, linear_dropout=linear_dropout\n )\n\n return ModelFactory.generic(instantiate)\n", "import numpy as np\nimport torch\nimport torch.autograd as autograd\nimport torch.nn.functional as F\nimport torch.nn.utils\n\nfrom vel.api.metrics.averaging_metric import AveragingNamedMetric\nfrom vel.math.functions import explained_variance\nfrom vel.rl.api.base import AlgoBase\n\n\ndef p2v(params):\n \"\"\" Parameters to vector - shorthand utility version \"\"\"\n return torch.nn.utils.parameters_to_vector(params)\n\n\ndef v2p(vector, params):\n \"\"\" Vector to parameters - shorthand utility version \"\"\"\n return torch.nn.utils.vector_to_parameters(vector, params)\n\n\ndef conjugate_gradient_method(matrix_vector_operator, loss_gradient, nsteps, rdotr_tol=1e-10):\n \"\"\" Conjugate gradient algorithm \"\"\"\n x = torch.zeros_like(loss_gradient)\n\n r = loss_gradient.clone()\n p = loss_gradient.clone()\n\n rdotr = torch.dot(r, r)\n\n for i in range(nsteps):\n Avp = matrix_vector_operator(p)\n alpha = rdotr / torch.dot(p, Avp)\n\n x += alpha * p\n r -= alpha * Avp\n\n new_rdotr = torch.dot(r, r)\n betta = new_rdotr / rdotr\n p = r + betta * p\n rdotr = new_rdotr\n\n if rdotr < rdotr_tol:\n break\n\n return x\n\n\nclass TrpoPolicyGradient(AlgoBase):\n \"\"\" Trust Region Policy Optimization - https://arxiv.org/abs/1502.05477 \"\"\"\n\n def __init__(self, max_kl, cg_iters, line_search_iters, cg_damping, entropy_coef, vf_iters,\n improvement_acceptance_ratio, max_grad_norm):\n self.mak_kl = max_kl\n self.cg_iters = cg_iters\n self.line_search_iters = line_search_iters\n self.cg_damping = cg_damping\n self.entropy_coef = entropy_coef\n self.vf_iters = vf_iters\n self.improvement_acceptance_ratio = improvement_acceptance_ratio\n self.max_grad_norm = max_grad_norm\n\n def optimizer_step(self, batch_info, device, model, rollout):\n \"\"\" Single optimization step for a model \"\"\"\n rollout = rollout.to_transitions()\n\n # This algorithm makes quote strong assumptions about how does the model look\n # so it does not make that much sense to switch to the evaluator interface\n # As it would be more of a problem than actual benefit\n\n observations = rollout.batch_tensor('observations')\n returns = rollout.batch_tensor('estimated_returns')\n\n # Evaluate model on the observations\n policy_params = model.policy(observations)\n policy_entropy = torch.mean(model.entropy(policy_params))\n\n policy_loss = self.calc_policy_loss(model, policy_params, policy_entropy, rollout)\n policy_grad = p2v(autograd.grad(policy_loss, model.policy_parameters(), retain_graph=True)).detach()\n\n # Calculate gradient of KL divergence of model with fixed version of itself\n # Value of kl_divergence will be 0, but what we need is the gradient, actually the 2nd derivarive\n kl_divergence = torch.mean(model.kl_divergence(policy_params.detach(), policy_params))\n kl_divergence_gradient = p2v(torch.autograd.grad(kl_divergence, model.policy_parameters(), create_graph=True))\n\n step_direction = conjugate_gradient_method(\n matrix_vector_operator=lambda x: self.fisher_vector_product(x, kl_divergence_gradient, model),\n # Because we want to decrease the loss, we want to go into the direction of -gradient\n loss_gradient=-policy_grad,\n nsteps=self.cg_iters\n )\n\n shs = 0.5 * step_direction @ self.fisher_vector_product(step_direction, kl_divergence_gradient, model)\n lm = torch.sqrt(shs / self.mak_kl)\n full_step = step_direction / lm\n\n # Because we want to decrease the loss, we want to go into the direction of -gradient\n expected_improvement = (-policy_grad) @ full_step\n original_parameter_vec = p2v(model.policy_parameters()).detach_()\n\n policy_optimization_success, ratio, policy_loss_improvement, new_policy_loss, kl_divergence_step = self.line_search(\n model, rollout, policy_loss, policy_params, original_parameter_vec, full_step, expected_improvement\n )\n\n gradient_norms = []\n\n for i in range(self.vf_iters):\n batch_info.optimizer.zero_grad()\n value_loss = self.value_loss(model, observations, returns)\n\n value_loss.backward()\n\n # Gradient clipping\n if self.max_grad_norm is not None:\n grad_norm = torch.nn.utils.clip_grad_norm_(\n filter(lambda p: p.requires_grad, model.parameters()),\n max_norm=self.max_grad_norm\n )\n\n gradient_norms.append(grad_norm)\n\n batch_info.optimizer.step(closure=None)\n\n if gradient_norms:\n gradient_norm = np.mean(gradient_norms)\n else:\n gradient_norm = 0.0\n\n # noinspection PyUnboundLocalVariable\n return {\n 'new_policy_loss': new_policy_loss.item(),\n 'policy_entropy': policy_entropy.item(),\n 'value_loss': value_loss.item(),\n 'policy_optimization_success': float(policy_optimization_success),\n 'policy_improvement_ratio': ratio.item(),\n 'kl_divergence_step': kl_divergence_step.item(),\n 'policy_loss_improvement': policy_loss_improvement.item(),\n 'grad_norm': gradient_norm,\n 'advantage_norm': torch.norm(rollout.batch_tensor('estimated_advantages')).item(),\n 'explained_variance': explained_variance(returns, rollout.batch_tensor('estimated_values'))\n }\n\n def line_search(self, model, rollout, original_policy_loss, original_policy_params, original_parameter_vec,\n full_step, expected_improvement_full):\n \"\"\" Find the right stepsize to make sure policy improves \"\"\"\n current_parameter_vec = original_parameter_vec.clone()\n\n for idx in range(self.line_search_iters):\n stepsize = 0.5 ** idx\n\n new_parameter_vec = current_parameter_vec + stepsize * full_step\n\n # Update model parameters\n v2p(new_parameter_vec, model.policy_parameters())\n\n # Calculate new loss\n with torch.no_grad():\n policy_params = model.policy(rollout.batch_tensor('observations'))\n policy_entropy = torch.mean(model.entropy(policy_params))\n kl_divergence = torch.mean(model.kl_divergence(original_policy_params, policy_params))\n\n new_loss = self.calc_policy_loss(model, policy_params, policy_entropy, rollout)\n\n actual_improvement = original_policy_loss - new_loss\n expected_improvement = expected_improvement_full * stepsize\n\n ratio = actual_improvement / expected_improvement\n\n if kl_divergence.item() > self.mak_kl * 1.5:\n # KL divergence bound exceeded\n continue\n elif ratio < expected_improvement:\n # Not enough loss improvement\n continue\n else:\n # Optimization successful\n return True, ratio, actual_improvement, new_loss, kl_divergence\n\n # Optimization failed, revert to initial parameters\n v2p(original_parameter_vec, model.policy_parameters())\n return False, torch.tensor(0.0), torch.tensor(0.0), torch.tensor(0.0), torch.tensor(0.0)\n\n def fisher_vector_product(self, vector, kl_divergence_gradient, model):\n \"\"\" Calculate product Hessian @ vector \"\"\"\n assert not vector.requires_grad, \"Vector must not propagate gradient\"\n dot_product = vector @ kl_divergence_gradient\n\n # at least one dimension spans across two contiguous subspaces\n double_gradient = torch.autograd.grad(dot_product, model.policy_parameters(), retain_graph=True)\n fvp = p2v(x.contiguous() for x in double_gradient)\n\n return fvp + vector * self.cg_damping\n\n def value_loss(self, model, observations, discounted_rewards):\n \"\"\" Loss of value estimator \"\"\"\n value_outputs = model.value(observations)\n value_loss = 0.5 * F.mse_loss(value_outputs, discounted_rewards)\n return value_loss\n\n def calc_policy_loss(self, model, policy_params, policy_entropy, rollout):\n \"\"\"\n Policy gradient loss - calculate from probability distribution\n\n Calculate surrogate loss - advantage * policy_probability / fixed_initial_policy_probability\n\n Because we operate with logarithm of -probability (neglogp) we do\n - advantage * exp(fixed_neglogps - model_neglogps)\n \"\"\"\n actions = rollout.batch_tensor('actions')\n advantages = rollout.batch_tensor('estimated_advantages')\n fixed_logprobs = rollout.batch_tensor('action:logprobs')\n\n model_logprobs = model.logprob(actions, policy_params)\n\n # Normalize advantages\n advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)\n\n # We put - in front because we want to maximize the surrogate objective\n policy_loss = -advantages * torch.exp(model_logprobs - fixed_logprobs)\n\n return policy_loss.mean() - policy_entropy * self.entropy_coef\n\n def metrics(self) -> list:\n \"\"\" List of metrics to track for this learning process \"\"\"\n return [\n AveragingNamedMetric(\"new_policy_loss\"),\n AveragingNamedMetric(\"policy_entropy\"),\n AveragingNamedMetric(\"value_loss\"),\n AveragingNamedMetric(\"policy_optimization_success\"),\n AveragingNamedMetric(\"policy_improvement_ratio\"),\n AveragingNamedMetric(\"kl_divergence_step\"),\n AveragingNamedMetric(\"policy_loss_improvement\"),\n AveragingNamedMetric(\"grad_norm\"),\n AveragingNamedMetric(\"advantage_norm\"),\n AveragingNamedMetric(\"explained_variance\")\n ]\n\n\ndef create(max_kl, cg_iters, line_search_iters, cg_damping, entropy_coef, vf_iters, improvement_acceptance_ratio=0.1,\n max_grad_norm=0.5):\n return TrpoPolicyGradient(\n max_kl, int(cg_iters), int(line_search_iters), cg_damping, entropy_coef, vf_iters, improvement_acceptance_ratio,\n max_grad_norm=max_grad_norm\n )\n", "import numpy as np\nimport gym.spaces as spaces\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\n\n\nclass DeterministicActionHead(nn.Module):\n \"\"\"\n Network head for action determination. Returns deterministic action depending on the inputs\n \"\"\"\n\n def __init__(self, input_dim, action_space):\n super().__init__()\n\n self.action_space = action_space\n\n assert isinstance(action_space, spaces.Box)\n assert len(action_space.shape) == 1\n\n assert (np.abs(action_space.low) == action_space.high).all() # we assume symmetric actions.\n self.register_buffer('max_action', torch.from_numpy(action_space.high))\n\n self.linear_layer = nn.Linear(input_dim, action_space.shape[0])\n\n def forward(self, input_data):\n return torch.tanh(self.linear_layer(input_data)) * self.max_action\n\n def sample(self, params, **_):\n \"\"\" Sample from a probability space of all actions \"\"\"\n return {\n 'actions': self(params)\n }\n\n def reset_weights(self):\n \"\"\" Initialize weights to sane defaults \"\"\"\n init.orthogonal_(self.linear_layer.weight, gain=0.01)\n init.constant_(self.linear_layer.bias, 0.0)\n", "import torch.utils.data as data\n\n\nclass Source:\n \"\"\" Source of data for supervised learning algorithms \"\"\"\n def __init__(self):\n pass\n\n def train_loader(self):\n \"\"\" PyTorch loader of training data \"\"\"\n raise NotImplementedError\n\n def val_loader(self):\n \"\"\" PyTorch loader of validation data \"\"\"\n raise NotImplementedError\n\n def train_dataset(self):\n \"\"\" Return the training dataset \"\"\"\n raise NotImplementedError\n\n def val_dataset(self):\n \"\"\" Return the validation dataset \"\"\"\n raise NotImplementedError\n\n def train_iterations_per_epoch(self):\n \"\"\" Return number of iterations per epoch \"\"\"\n raise NotImplementedError\n\n def val_iterations_per_epoch(self):\n \"\"\" Return number of iterations per epoch - validation \"\"\"\n raise NotImplementedError\n\n\nclass TextData(Source):\n \"\"\" An NLP torchtext data source \"\"\"\n def __init__(self, train_source, val_source, train_iterator, val_iterator, data_field, target_field):\n super().__init__()\n\n self.train_source = train_source\n self.val_source = val_source\n self.train_iterator = train_iterator\n self.val_iterator = val_iterator\n self.data_field = data_field\n self.target_field = target_field\n\n def train_loader(self):\n \"\"\" PyTorch loader of training data \"\"\"\n return self.train_iterator\n\n def val_loader(self):\n \"\"\" PyTorch loader of validation data \"\"\"\n return self.val_iterator\n\n def train_dataset(self):\n \"\"\" Return the training dataset \"\"\"\n return self.train_source\n\n def val_dataset(self):\n \"\"\" Return the validation dataset \"\"\"\n return self.val_source\n\n def train_iterations_per_epoch(self):\n \"\"\" Return number of iterations per epoch \"\"\"\n return len(self.train_iterator)\n\n def val_iterations_per_epoch(self):\n \"\"\" Return number of iterations per epoch - validation \"\"\"\n return len(self.val_iterator)\n\n\nclass TrainingData(Source):\n \"\"\" Most common source of data combining a basic datasource and sampler \"\"\"\n def __init__(self, train_source, val_source, num_workers, batch_size, augmentations=None):\n import vel.api.data as vel_data\n\n super().__init__()\n\n self.train_source = train_source\n self.val_source = val_source\n\n self.num_workers = num_workers\n self.batch_size = batch_size\n\n self.augmentations = augmentations\n\n # Derived values\n self.train_ds = vel_data.DataFlow(self.train_source, augmentations, tag='train')\n self.val_ds = vel_data.DataFlow(self.val_source, augmentations, tag='val')\n\n self._train_loader = data.DataLoader(\n self.train_ds, batch_size=batch_size, shuffle=True, num_workers=num_workers\n )\n\n self._val_loader = data.DataLoader(\n self.val_ds, batch_size=batch_size, shuffle=False, num_workers=num_workers\n )\n\n def train_loader(self):\n \"\"\" PyTorch loader of training data \"\"\"\n return self._train_loader\n\n def val_loader(self):\n \"\"\" PyTorch loader of validation data \"\"\"\n return self._val_loader\n\n def train_dataset(self):\n \"\"\" Return the training dataset \"\"\"\n return self.train_ds\n\n def val_dataset(self):\n \"\"\" Return the validation dataset \"\"\"\n return self.val_ds\n\n def train_iterations_per_epoch(self):\n \"\"\" Return number of iterations per epoch \"\"\"\n return len(self._train_loader)\n\n def val_iterations_per_epoch(self):\n \"\"\" Return number of iterations per epoch - validation \"\"\"\n return len(self._val_loader)\n" ]
[ [ "numpy.copy", "numpy.zeros" ], [ "numpy.sqrt", "torch.cat", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.Linear" ], [ "torch.nn.Dropout", "torch.nn.LogSoftmax", "torch.nn.functional.nll_loss", "torch.cat", "torch.nn.GRU", "torch.nn.Linear", "torch.nn.init.zeros_", "torch.nn.init.kaiming_normal_" ], [ "torch.nn.utils.vector_to_parameters", "torch.sqrt", "torch.nn.utils.parameters_to_vector", "torch.zeros_like", "torch.tensor", "torch.exp", "torch.nn.functional.mse_loss", "numpy.mean", "torch.no_grad", "torch.dot" ], [ "numpy.abs", "torch.nn.init.constant_", "torch.from_numpy", "torch.nn.Linear", "torch.nn.init.orthogonal_" ], [ "torch.utils.data.DataLoader" ] ]
Smithsonian/Mass-Georeferencing
[ "bb7d81cd82684900003d3049764cd2d243325248" ]
[ "old/shiny/match_localities/match_SI_GBIF.py" ]
[ "#!/usr/bin/env python3\n#\n# Match SI GBIF records without coordinates to other GBIF records for the species/genus\n#\nimport psycopg2, os, logging, sys, locale, psycopg2.extras\nimport pandas as pd\nfrom time import localtime, strftime\nfrom fuzzywuzzy import fuzz\nimport pycountry\n\n\n#Import settings\nimport settings\n\n#Set locale for number format\nlocale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\n\n\n\n#Get current time\ncurrent_time = strftime(\"%Y%m%d_%H%M%S\", localtime())\n\n# Set Logging\nif not os.path.exists('logs'):\n os.makedirs('logs')\n\nlogfile_name = 'logs/{}.log'.format(current_time)\n# from http://stackoverflow.com/a/9321890\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M:%S',\n filename=logfile_name,\n filemode='a')\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\nformatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\nconsole.setFormatter(formatter)\nlogging.getLogger('').addHandler(console)\nlogger1 = logging.getLogger(\"si_georef\")\n\n\n#search_fuzzy(record['locality'], record['stateprovince'], candidates, method = 'set', threshold = 80)\ndef search_fuzzy(locality, stateprovince, data, filter_stateprovince = True, method = 'partial', threshold = 80):\n \"\"\"Search localities in the databases for matches using fuzzywuzzy.\"\"\"\n try:\n int(threshold)\n except:\n print('invalid threshold value')\n sys.exit(1)\n #Check results\n if method == 'partial':\n data['score1'] = data.apply(lambda row : fuzz.partial_ratio(locality, row['name']), axis = 1)\n if filter_stateprovince == True:\n data['score2'] = data.apply(lambda row : fuzz.partial_ratio(stateprovince, row['stateprovince']), axis = 1)\n data['score'] = (data['score1'] + data['score2'])/2\n results = data.drop(columns = ['score1', 'score2'])\n else:\n data['score'] = data['score1']\n results = data.drop(columns = ['score1']) \n elif method == 'set':\n data['score1'] = data.apply(lambda row : fuzz.token_set_ratio(locality, row['name']), axis = 1)\n if filter_stateprovince == True:\n data['score2'] = data.apply(lambda row : fuzz.token_set_ratio(stateprovince, row['stateprovince']), axis = 1)\n data['score'] = (data['score1'] + data['score2'])/2\n results = data.drop(columns = ['score1', 'score2'])\n else:\n data['score'] = data['score1']\n results = data.drop(columns = ['score1']) \n results = results[results.score > threshold]\n #print(results)\n return results\n\n\n\n#Connect to the dpogis database\ntry:\n logger1.info(\"Connecting to the database.\")\n conn = psycopg2.connect(host = settings.pg_host, database = settings.pg_db, user = settings.pg_user, connect_timeout = 60)\nexcept:\n print(\" ERROR: Could not connect to server.\")\n sys.exit(1)\n\nconn.autocommit = True\ncur = conn.cursor(cursor_factory = psycopg2.extras.RealDictCursor)\n\n\nif len(sys.argv) > 1:\n arg = sys.argv[1]\n if arg == \"plants\":\n sel_species = \"SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND phylum = 'Tracheophyta' GROUP BY species, kingdom, phylum, class, _order, family, genus\"\n #sel_species = \"SELECT DISTINCT species FROM gbif_si WHERE species != '' AND ((decimallatitude is null and decimallongitude is null) OR (georeferenceprotocol LIKE '%%unknown%%') OR (locality != '')) AND phylum = 'Tracheophyta'\"\n elif arg == \"birds\":\n sel_species = \"SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND class = 'Aves' GROUP BY species, kingdom, phylum, class, _order, family, genus\"\n elif arg == \"mammals\":\n sel_species = \"SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND class = 'Mammalia' GROUP BY species, kingdom, phylum, class, _order, family, genus\"\n elif arg == \"reptiles\":\n sel_species = \"SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND class = 'Reptilia' GROUP BY species, kingdom, phylum, class, _order, family, genus\"\n elif arg == \"amphibians\":\n sel_species = \"SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND class = 'Amphibia' GROUP BY species, kingdom, phylum, class, _order, family, genus\"\n elif arg == \"bivalves\":\n sel_species = \"SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND basisofrecord = 'FOSSIL_SPECIMEN' AND class = 'Bivalvia' GROUP BY species, kingdom, phylum, class, _order, family, genus\"\n elif arg == \"gastropods\":\n sel_species = \"SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND basisofrecord = 'FOSSIL_SPECIMEN' AND class = 'Gastropoda' GROUP BY species, kingdom, phylum, class, _order, family, genus\"\n elif arg == \"crabs\":\n sel_species = \"SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND basisofrecord = 'FOSSIL_SPECIMEN' AND class = 'Malacostraca' GROUP BY species, kingdom, phylum, class, _order, family, genus\"\n elif arg == \"echinoids\":\n sel_species = \"SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND basisofrecord = 'FOSSIL_SPECIMEN' AND class = 'Echinoidea' GROUP BY species, kingdom, phylum, class, _order, family, genus\"\n elif arg == \"iz\":\n sel_species = \"SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND family = 'Unionidae' GROUP BY species, kingdom, phylum, class, _order, family, genus\"\n else:\n print(\"Invalid argument\")\n sys.exit(1)\n\n\n\n#Select species\ncur.execute(sel_species)\nlogger1.debug(cur.query)\nscinames = cur.fetchall()\n\n\n\nfor sciname in scinames:\n cur.execute(\"DELETE FROM gbif_si_matches WHERE species = %s\", (sciname['species'],))\n logger1.debug(cur.query)\n cur.execute(\"DELETE FROM gbif_si_summary WHERE species = %(species)s AND kingdom = %(kingdom)s AND phylum = %(phylum)s AND class = %(class)s AND _order = %(_order)s AND family = %(family)s AND genus = %(genus)s\", {'species': sciname['species'], 'kingdom': sciname['kingdom'], 'phylum': sciname['phylum'], 'class': sciname['class'], '_order': sciname['_order'], 'family': sciname['family'], 'genus': sciname['genus']})\n logger1.debug(cur.query)\n\n\n\n#search_fuzzy(locality, scientificname, countrycode, db, cur, rank = 'species', method = 'partial', threshold = 80):\n#Loop the species\nfor sciname in scinames:\n logger1.info(\"sciname: {}\".format(sciname['species']))\n \n #Get countries\n cur.execute(\"SELECT countrycode FROM gbif_si WHERE species = %s AND decimallatitude is null and decimallongitude is null AND lower(locality) != 'unknown' AND locality != '' GROUP BY countrycode\", (sciname['species'],))\n logger1.debug(cur.query)\n countries = cur.fetchall()\n for country in countries:\n #Get records for the country\n cur.execute(\"SELECT MAX(gbifid::bigint)::text as gbifid, countrycode, stateprovince, locality, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species = %(species)s AND countrycode = %(countrycode)s AND decimallatitude is null and decimallongitude is null AND lower(locality) != 'unknown' AND locality != '' GROUP BY countrycode, stateprovince, locality, kingdom, phylum, class, _order, family, genus\", {'species': sciname['species'], 'countrycode': country['countrycode']})\n logger1.debug(cur.query)\n records = pd.DataFrame(cur.fetchall())\n ################\n #Get candidates\n ################\n #GBIF - species\n logger1.info(\"GBIF: {}\".format(country['countrycode']))\n query_template = \"SELECT MAX(gbifid::bigint)::text as uid, locality as name, count(*) as no_records, countrycode, trim(leading ', ' from replace(municipality || ', ' || county || ', ' || stateprovince || ', ' || countrycode, ', , ', '')) as located_at, stateprovince, recordedBy FROM gbif WHERE {rank} = '{scientificname}' AND lower(locality) != 'unknown' AND countrycode = '{countrycode}' GROUP BY countrycode, locality, municipality, county, stateprovince, recordedBy\"\n cur.execute(query_template.format(rank = 'species', scientificname = sciname['species'], countrycode = country['countrycode']))\n logger1.debug(cur.query)\n candidates = pd.DataFrame(cur.fetchall())\n logger1.info(\"No. of GBIF candidates: {}\".format(len(candidates)))\n if len(candidates) > 0:\n #Iterate each record\n for index, record in records.iterrows():\n logger1.info(\"record gbifid: {}\".format(record['gbifid']))\n logger1.info(\"locality: {}, {}, {}\".format(record['locality'], record['stateprovince'], record['countrycode']))\n if record['stateprovince'] == '':\n data = search_fuzzy(record['locality'], record['stateprovince'], candidates, filter_stateprovince = False, method = 'set', threshold = 80)\n else:\n data = search_fuzzy(record['locality'], record['stateprovince'], candidates, filter_stateprovince = True, method = 'set', threshold = 80)\n logger1.info(\"No. of possible matches: {}\".format(len(data)))\n if len(data) > 0:\n for index, row in data.iterrows():\n cur.execute(\"\"\"INSERT INTO gbif_si_matches (gbifid, source, no_records, species, match, score, located_at, timestamp) VALUES \n (%(gbifid)s, %(source)s, %(no_records)s, %(species)s, %(match)s, %(score)s, %(located_at)s, NOW())\"\"\", {'gbifid': record['gbifid'], 'source': 'gbif.species', 'no_records': str(row['no_records']), 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'located_at': row['located_at']})\n logger1.debug(cur.query)\n #GBIF - genus\n logger1.info(\"GBIF genus: {}\".format(country['countrycode']))\n query_template = \"SELECT MAX(gbifid::bigint)::text as uid, locality as name, count(*) as no_records, countrycode, trim(leading ', ' from replace(municipality || ', ' || county || ', ' || stateprovince || ', ' || countrycode, ', , ', '')) as located_at, stateprovince, recordedBy FROM gbif WHERE {rank} = '{genus}' AND species != '{scientificname}' AND lower(locality) != 'unknown' AND countrycode = '{countrycode}' GROUP BY countrycode, locality, municipality, county, stateprovince, recordedBy\"\n cur.execute(query_template.format(rank = 'genus', genus = sciname['genus'], scientificname = sciname['species'], countrycode = country['countrycode']))\n logger1.debug(cur.query)\n candidates = pd.DataFrame(cur.fetchall())\n logger1.info(\"No. of GBIF candidates: {}\".format(len(candidates)))\n if len(candidates) > 0:\n #Iterate each record\n for index, record in records.iterrows():\n logger1.info(\"record gbifid: {}\".format(record['gbifid']))\n logger1.info(\"locality: {}, {}, {}\".format(record['locality'], record['stateprovince'], record['countrycode']))\n if record['stateprovince'] == '':\n data = search_fuzzy(record['locality'], record['stateprovince'], candidates, filter_stateprovince = False, method = 'set', threshold = 80)\n else:\n data = search_fuzzy(record['locality'], record['stateprovince'], candidates, filter_stateprovince = True, method = 'set', threshold = 80)\n logger1.info(\"No. of possible matches: {}\".format(len(data)))\n if len(data) > 0:\n for index, row in data.iterrows():\n cur.execute(\"\"\"INSERT INTO gbif_si_matches (gbifid, source, no_records, species, match, score, located_at, timestamp) VALUES \n (%(gbifid)s, %(source)s, %(no_records)s, %(species)s, %(match)s, %(score)s, %(located_at)s, NOW())\"\"\", {'gbifid': record['gbifid'], 'source': 'gbif.genus', 'no_records': str(row['no_records']), 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'located_at': row['located_at']})\n logger1.debug(cur.query)\n ######################\n #WDPA\n logger1.info(\"WDPA: {}\".format(country['countrycode']))\n if pycountry.countries.get(alpha_2 = record['countrycode']) != None:\n iso = pycountry.countries.get(alpha_2 = record['countrycode']).alpha_3\n query_template = \"\"\"\n SELECT uid, name, gadm2 as stateprovince, 'wdpa_polygons' as source FROM wdpa_polygons WHERE parent_iso = '{iso}' AND lower(name) != 'unknown'\n UNION \n SELECT uid, orig_name AS name, gadm2 as stateprovince, 'wdpa_polygons' as source FROM wdpa_polygons WHERE parent_iso = '{iso}' AND lower(name) != 'unknown'\n UNION \n SELECT uid, name, gadm2 as stateprovince, 'wdpa_points' as source FROM wdpa_points WHERE parent_iso = '{iso}' AND lower(name) != 'unknown'\n UNION \n SELECT uid, orig_name AS name, gadm2 as stateprovince, 'wdpa_points' as source FROM wdpa_points WHERE parent_iso = '{iso}' AND lower(name) != 'unknown'\n \"\"\"\n cur.execute(query_template.format(iso = iso))\n logger1.debug(cur.query)\n candidates = pd.DataFrame(cur.fetchall())\n logger1.info(\"No. of WDPA candidates: {}\".format(len(candidates)))\n if len(candidates) > 0:\n #Iterate each record\n for index, record in records.iterrows():\n logger1.info(\"record gbifid: {}\".format(record['gbifid']))\n logger1.info(\"locality: {}, {}, {}\".format(record['locality'], record['stateprovince'], record['countrycode']))\n data = search_fuzzy(record['locality'], record['stateprovince'], candidates, method = 'set', threshold = 80)\n logger1.info(\"No. of possible matches: {}\".format(len(data)))\n if len(data) > 0:\n for index, row in data.iterrows():\n cur.execute(\"\"\"INSERT INTO gbif_si_matches (gbifid, source, species, match, score, located_at, timestamp) VALUES \n (%(gbifid)s, %(source)s, %(species)s, %(match)s, %(score)s, %(stateprovince)s, NOW())\"\"\", {'gbifid': record['gbifid'], 'source': row['source'], 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'stateprovince': row['stateprovince']})\n logger1.debug(cur.query)\n ######################\n #GADM\n logger1.info(\"GADM: {}\".format(country['countrycode']))\n if pycountry.countries.get(alpha_2 = record['countrycode']) != None:\n country = pycountry.countries.get(alpha_2 = record['countrycode']).name\n #GADM1\n query_template = \"SELECT uid, name_1 as name, name_0 as stateprovince, 'gadm1' as source FROM gadm1 WHERE name_0 = '{country}' UNION SELECT uid, varname_1 as name, name_0 as stateprovince, 'gadm1' as source FROM gadm1 WHERE name_0 = '{country}' AND varname_1 IS NOT NULL\"\n cur.execute(query_template.format(country = country.replace(\"'\", \"''\")))\n data = pd.DataFrame(cur.fetchall())\n #GADM2\n query_template = \"SELECT uid, name_2 as name, name_1 || ', ' || name_0 as stateprovince, 'gadm2' as source FROM gadm2 WHERE name_0 = '{country}' UNION SELECT uid, varname_2 as name, name_1 || ', ' || name_0 as stateprovince, 'gadm2' as source FROM gadm2 WHERE name_0 = '{country}' AND varname_2 IS NOT NULL\"\n cur.execute(query_template.format(country = country.replace(\"'\", \"''\")))\n data1 = pd.DataFrame(cur.fetchall())\n data = pd.concat([data, data1], ignore_index=True)\n #GADM3\n query_template = \"SELECT uid, name_3 as name, name_2 || ', ' || name_1 || ', ' || name_0 as stateprovince, 'gadm3' as source FROM gadm3 WHERE name_0 = '{country}' UNION SELECT uid, varname_3 as name, name_2 || ', ' || name_1 || ', ' || name_0 as stateprovince, 'gadm3' as source FROM gadm3 WHERE name_0 = '{country}' AND varname_3 IS NOT NULL\"\n cur.execute(query_template.format(country = country.replace(\"'\", \"''\")))\n data1 = pd.DataFrame(cur.fetchall())\n data = pd.concat([data, data1], ignore_index=True)\n #GADM4\n query_template = \"SELECT uid, name_4 as name, name_3 || ', ' || name_2 || ', ' || name_1 || ', ' || name_0 as stateprovince, 'gadm4' as source FROM gadm4 WHERE name_0 = '{country}' UNION SELECT uid, varname_4 as name, name_3 || ', ' || name_2 || ', ' || name_1 || ', ' || name_0 as stateprovince, 'gadm4' as source FROM gadm4 WHERE name_0 = '{country}' AND varname_4 IS NOT NULL\"\n cur.execute(query_template.format(country = country.replace(\"'\", \"''\")))\n data1 = pd.DataFrame(cur.fetchall())\n data = pd.concat([data, data1], ignore_index=True)\n #GADM5\n query_template = \"SELECT uid, name_5 as name, name_4 || ', ' || name_3 || ', ' || name_2 || ', ' || name_1 || ', ' || name_0 as stateprovince, 'gadm5' as source FROM gadm5 WHERE name_0 = '{country}'\"\n cur.execute(query_template.format(country = country.replace(\"'\", \"''\")))\n data1 = pd.DataFrame(cur.fetchall())\n candidates = pd.concat([data, data1], ignore_index=True)\n logger1.info(\"No. of GADM candidates: {}\".format(len(candidates)))\n if len(candidates) > 0:\n #Iterate each record\n for index, record in records.iterrows():\n logger1.info(\"record gbifid: {}\".format(record['gbifid']))\n logger1.info(\"locality: {}, {}, {}\".format(record['locality'], record['stateprovince'], record['countrycode']))\n data = search_fuzzy(record['locality'], record['stateprovince'], candidates, method = 'set', threshold = 80)\n logger1.info(\"No. of possible matches: {}\".format(len(data)))\n if len(data) > 0:\n for index, row in data.iterrows():\n cur.execute(\"\"\"INSERT INTO gbif_si_matches (gbifid, source, species, match, score, located_at, timestamp) VALUES \n (%(gbifid)s, %(source)s, %(species)s, %(match)s, %(score)s, %(stateprovince)s, NOW())\"\"\", {'gbifid': record['gbifid'], 'source': row['source'], 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'stateprovince': row['stateprovince']})\n logger1.debug(cur.query)\n ######################\n #Geonames\n # if record['countrycode'] != None:\n # query_template = \"\"\"\n # SELECT uid, name, gadm2 as stateprovince, 'geonames' as source FROM geonames WHERE country_code = '{countrycode}'\n # UNION\n # SELECT uid, unnest(string_to_array(alternatenames, ',')) as name, gadm2 as stateprovince, 'geonames' as source FROM geonames WHERE country_code = '{countrycode}'\n # \"\"\"\n # cur.execute(query_template.format(countrycode = record['countrycode']))\n # logger1.debug(cur.query)\n # candidates = pd.DataFrame(cur.fetchall())\n # logger1.info(\"No. of candidates: {}\".format(len(candidates)))\n # if len(candidates) > 0:\n # #Iterate each record\n # for index, record in records.iterrows():\n # logger1.info(\"locality: {}, {}, {}\".format(record['locality'], record['stateprovince'], record['countrycode']))\n # data = search_fuzzy(record['locality'], record['stateprovince'], candidates, method = 'set', threshold = 80)\n # for index, row in data.iterrows():\n # cur.execute(\"\"\"INSERT INTO gbif_si_matches (gbifid, source, species, match, score, located_at, timestamp) VALUES \n # (%(gbifid)s, %(source)s, %(species)s, %(match)s, %(score)s, %(stateprovince)s, NOW())\"\"\", {'gbifid': record['gbifid'], 'source': row['source'], 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'stateprovince': row['stateprovince']})\n # logger1.debug(cur.query)\n ######################\n #GNIS\n if record['countrycode'] == 'US':\n logger1.info(\"GNIS: {}, US\".format(record['stateprovince']))\n query_template = \"SELECT uid, feature_name as name, gadm2 as stateprovince, 'gnis' as source FROM gnis WHERE state_alpha ILIKE '%{stateprovince}%'\"\n cur.execute(query_template.format(stateprovince = record['stateprovince']))\n logger1.debug(cur.query)\n candidates = pd.DataFrame(cur.fetchall())\n logger1.info(\"No. of GNIS candidates: {}\".format(len(candidates)))\n if len(candidates) > 0:\n #Iterate each record\n for index, record in records.iterrows():\n logger1.info(\"record gbifid: {}\".format(record['gbifid']))\n logger1.info(\"locality: {}, {}, {}\".format(record['locality'], record['stateprovince'], record['countrycode']))\n data = search_fuzzy(record['locality'], record['stateprovince'], candidates, method = 'set', threshold = 80)\n logger1.info(\"No. of possible matches: {}\".format(len(data)))\n if len(data) > 0:\n for index, row in data.iterrows():\n cur.execute(\"\"\"INSERT INTO gbif_si_matches (gbifid, source, species, match, score, located_at, timestamp) VALUES \n (%(gbifid)s, %(source)s, %(species)s, %(match)s, %(score)s, %(stateprovince)s, NOW())\"\"\", {'gbifid': record['gbifid'], 'source': row['source'], 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'stateprovince': row['stateprovince']})\n logger1.debug(cur.query)\n #############\n #Lakes\n if pycountry.countries.get(alpha_2 = record['countrycode']) != None:\n country = pycountry.countries.get(alpha_2 = record['countrycode']).name\n logger1.info(\"Lakes: {}\".format(country.replace(\"'\", \"''\")))\n query_template = \"SELECT uid, lake_name as name, gadm2 as stateprovince, 'global_lakes' as source FROM global_lakes WHERE country ILIKE '%{country}%'\"\n cur.execute(query_template.format(country = country.replace(\"'\", \"''\")))\n logger1.debug(cur.query)\n else:\n query_template = \"SELECT uid, lake_name as name, gadm2 as stateprovince, 'global_lakes' as source FROM global_lakes\"\n cur.execute(query_template)\n candidates = pd.DataFrame(cur.fetchall())\n logger1.info(\"No. of global_lakes candidates: {}\".format(len(candidates)))\n if len(candidates) > 0:\n #Iterate each record\n for index, record in records.iterrows():\n logger1.info(\"record gbifid: {}\".format(record['gbifid']))\n logger1.info(\"locality: {}, {}, {}\".format(record['locality'], record['stateprovince'], record['countrycode']))\n data = search_fuzzy(record['locality'], record['stateprovince'], candidates, method = 'set', threshold = 80)\n logger1.info(\"No. of possible matches: {}\".format(len(data)))\n if len(data) > 0:\n for index, row in data.iterrows():\n cur.execute(\"\"\"INSERT INTO gbif_si_matches (gbifid, source, species, match, score, located_at, timestamp) VALUES \n (%(gbifid)s, %(source)s, %(species)s, %(match)s, %(score)s, %(stateprovince)s, NOW())\"\"\", {'gbifid': record['gbifid'], 'source': row['source'], 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'stateprovince': row['stateprovince']})\n logger1.debug(cur.query)\n #Save summary of results\n cur.execute(\"SELECT count(*) as no_records FROM gbif_si_matches WHERE species = %s\", (sciname['species'],))\n logger1.debug(cur.query)\n no_records = cur.fetchone()\n if no_records['no_records'] > 0:\n cur.execute(\"\"\"INSERT INTO gbif_si_summary (species, kingdom, phylum, class, _order, family, genus, no_records) \n (SELECT %(species)s, %(kingdom)s, %(phylum)s, %(class)s, %(_order)s, %(family)s, %(genus)s, count(*) FROM gbif_si_matches where species = %(species)s);\"\"\", {'species': sciname['species'], 'kingdom': sciname['kingdom'], 'phylum': sciname['phylum'], 'class': sciname['class'], '_order': sciname['_order'], 'family': sciname['family'], 'genus': sciname['genus']})\n logger1.debug(cur.query)\n cur.execute(\"DELETE FROM gbif_si_summary WHERE no_records = 0\")\n \n\n\n\nsys.exit(0)" ]
[ [ "pandas.concat" ] ]
OliviaNabbosa89/Disaster_Responses
[ "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "61cb9281d7dd22aee282b517e2fbf500f0ff9935", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570", "1e66d77c303cec685dfc2ca94f4fca4cc9400570" ]
[ "venv/Lib/site-packages/pandas/tests/series/test_analytics.py", "venv/Lib/site-packages/sklearn/decomposition/_nmf.py", "venv/Lib/site-packages/sklearn/utils/tests/test_testing.py", "venv/Lib/site-packages/pandas/tests/base/test_factorize.py", "venv/Lib/site-packages/sklearn/utils/metaestimators.py", "venv/Lib/site-packages/pandas/tests/io/formats/test_eng_formatting.py", "venv/Lib/site-packages/pandas/tests/series/apply/test_series_apply.py", "venv/Lib/site-packages/pandas/tests/indexes/timedeltas/test_setops.py", "venv/Lib/site-packages/pandas/tests/tslibs/test_parsing.py", "venv/Lib/site-packages/pandas/tests/scalar/interval/test_ops.py", "venv/Lib/site-packages/sklearn/model_selection/_validation.py", "venv/Lib/site-packages/pandas/tests/series/methods/test_fillna.py", "venv/Lib/site-packages/pandas/core/arrays/sparse/dtype.py", "venv/Lib/site-packages/pandas/tests/series/methods/test_searchsorted.py", "venv/Lib/site-packages/pandas/tests/indexes/ranges/test_range.py", "venv/Lib/site-packages/sklearn/ensemble/tests/test_iforest.py", "venv/Lib/site-packages/pandas/tests/frame/methods/test_round.py", "venv/Lib/site-packages/sklearn/feature_selection/_sequential.py", "venv/Lib/site-packages/pandas/core/arrays/string_.py", "venv/Lib/site-packages/pandas/tests/series/indexing/test_multiindex.py", "venv/Lib/site-packages/sklearn/metrics/pairwise.py", "venv/Lib/site-packages/pandas/tests/indexing/test_datetime.py", "venv/Lib/site-packages/pandas/tests/base/test_constructors.py", "venv/Lib/site-packages/pandas/tests/base/test_conversion.py", "venv/Lib/site-packages/sklearn/tests/test_config.py", "venv/Lib/site-packages/pandas/tests/io/excel/test_openpyxl.py", "venv/Lib/site-packages/pandas/core/arrays/sparse/accessor.py", "venv/Lib/site-packages/pandas/core/ops/common.py", "venv/Lib/site-packages/pandas/tests/plotting/test_series.py", "venv/Lib/site-packages/sklearn/decomposition/tests/test_dict_learning.py", "venv/Lib/site-packages/sklearn/datasets/_olivetti_faces.py", "venv/Lib/site-packages/pandas/tests/indexing/test_indexing.py", "venv/Lib/site-packages/pandas/core/tools/timedeltas.py", "venv/Lib/site-packages/sklearn/svm/tests/test_bounds.py", "venv/Lib/site-packages/sklearn/utils/_mask.py" ]
[ "import operator\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nimport pandas.util._test_decorators as td\r\n\r\nimport pandas as pd\r\nfrom pandas import DataFrame, Series\r\nimport pandas._testing as tm\r\n\r\n\r\nclass TestSeriesAnalytics:\r\n def test_prod_numpy16_bug(self):\r\n s = Series([1.0, 1.0, 1.0], index=range(3))\r\n result = s.prod()\r\n\r\n assert not isinstance(result, Series)\r\n\r\n def test_matmul(self):\r\n # matmul test is for GH #10259\r\n a = Series(np.random.randn(4), index=[\"p\", \"q\", \"r\", \"s\"])\r\n b = DataFrame(\r\n np.random.randn(3, 4), index=[\"1\", \"2\", \"3\"], columns=[\"p\", \"q\", \"r\", \"s\"]\r\n ).T\r\n\r\n # Series @ DataFrame -> Series\r\n result = operator.matmul(a, b)\r\n expected = Series(np.dot(a.values, b.values), index=[\"1\", \"2\", \"3\"])\r\n tm.assert_series_equal(result, expected)\r\n\r\n # DataFrame @ Series -> Series\r\n result = operator.matmul(b.T, a)\r\n expected = Series(np.dot(b.T.values, a.T.values), index=[\"1\", \"2\", \"3\"])\r\n tm.assert_series_equal(result, expected)\r\n\r\n # Series @ Series -> scalar\r\n result = operator.matmul(a, a)\r\n expected = np.dot(a.values, a.values)\r\n tm.assert_almost_equal(result, expected)\r\n\r\n # GH 21530\r\n # vector (1D np.array) @ Series (__rmatmul__)\r\n result = operator.matmul(a.values, a)\r\n expected = np.dot(a.values, a.values)\r\n tm.assert_almost_equal(result, expected)\r\n\r\n # GH 21530\r\n # vector (1D list) @ Series (__rmatmul__)\r\n result = operator.matmul(a.values.tolist(), a)\r\n expected = np.dot(a.values, a.values)\r\n tm.assert_almost_equal(result, expected)\r\n\r\n # GH 21530\r\n # matrix (2D np.array) @ Series (__rmatmul__)\r\n result = operator.matmul(b.T.values, a)\r\n expected = np.dot(b.T.values, a.values)\r\n tm.assert_almost_equal(result, expected)\r\n\r\n # GH 21530\r\n # matrix (2D nested lists) @ Series (__rmatmul__)\r\n result = operator.matmul(b.T.values.tolist(), a)\r\n expected = np.dot(b.T.values, a.values)\r\n tm.assert_almost_equal(result, expected)\r\n\r\n # mixed dtype DataFrame @ Series\r\n a[\"p\"] = int(a.p)\r\n result = operator.matmul(b.T, a)\r\n expected = Series(np.dot(b.T.values, a.T.values), index=[\"1\", \"2\", \"3\"])\r\n tm.assert_series_equal(result, expected)\r\n\r\n # different dtypes DataFrame @ Series\r\n a = a.astype(int)\r\n result = operator.matmul(b.T, a)\r\n expected = Series(np.dot(b.T.values, a.T.values), index=[\"1\", \"2\", \"3\"])\r\n tm.assert_series_equal(result, expected)\r\n\r\n msg = r\"Dot product shape mismatch, \\(4,\\) vs \\(3,\\)\"\r\n # exception raised is of type Exception\r\n with pytest.raises(Exception, match=msg):\r\n a.dot(a.values[:3])\r\n msg = \"matrices are not aligned\"\r\n with pytest.raises(ValueError, match=msg):\r\n a.dot(b.T)\r\n\r\n def test_ptp(self):\r\n # GH21614\r\n N = 1000\r\n arr = np.random.randn(N)\r\n ser = Series(arr)\r\n assert np.ptp(ser) == np.ptp(arr)\r\n\r\n def test_repeat(self):\r\n s = Series(np.random.randn(3), index=[\"a\", \"b\", \"c\"])\r\n\r\n reps = s.repeat(5)\r\n exp = Series(s.values.repeat(5), index=s.index.values.repeat(5))\r\n tm.assert_series_equal(reps, exp)\r\n\r\n to_rep = [2, 3, 4]\r\n reps = s.repeat(to_rep)\r\n exp = Series(s.values.repeat(to_rep), index=s.index.values.repeat(to_rep))\r\n tm.assert_series_equal(reps, exp)\r\n\r\n def test_numpy_repeat(self):\r\n s = Series(np.arange(3), name=\"x\")\r\n expected = Series(s.values.repeat(2), name=\"x\", index=s.index.values.repeat(2))\r\n tm.assert_series_equal(np.repeat(s, 2), expected)\r\n\r\n msg = \"the 'axis' parameter is not supported\"\r\n with pytest.raises(ValueError, match=msg):\r\n np.repeat(s, 2, axis=0)\r\n\r\n def test_is_monotonic(self):\r\n\r\n s = Series(np.random.randint(0, 10, size=1000))\r\n assert not s.is_monotonic\r\n s = Series(np.arange(1000))\r\n assert s.is_monotonic is True\r\n assert s.is_monotonic_increasing is True\r\n s = Series(np.arange(1000, 0, -1))\r\n assert s.is_monotonic_decreasing is True\r\n\r\n s = Series(pd.date_range(\"20130101\", periods=10))\r\n assert s.is_monotonic is True\r\n assert s.is_monotonic_increasing is True\r\n s = Series(list(reversed(s.tolist())))\r\n assert s.is_monotonic is False\r\n assert s.is_monotonic_decreasing is True\r\n\r\n @pytest.mark.parametrize(\"func\", [np.any, np.all])\r\n @pytest.mark.parametrize(\"kwargs\", [dict(keepdims=True), dict(out=object())])\r\n @td.skip_if_np_lt(\"1.15\")\r\n def test_validate_any_all_out_keepdims_raises(self, kwargs, func):\r\n s = pd.Series([1, 2])\r\n param = list(kwargs)[0]\r\n name = func.__name__\r\n\r\n msg = (\r\n f\"the '{param}' parameter is not \"\r\n \"supported in the pandas \"\r\n fr\"implementation of {name}\\(\\)\"\r\n )\r\n with pytest.raises(ValueError, match=msg):\r\n func(s, **kwargs)\r\n\r\n @td.skip_if_np_lt(\"1.15\")\r\n def test_validate_sum_initial(self):\r\n s = pd.Series([1, 2])\r\n msg = (\r\n r\"the 'initial' parameter is not \"\r\n r\"supported in the pandas \"\r\n r\"implementation of sum\\(\\)\"\r\n )\r\n with pytest.raises(ValueError, match=msg):\r\n np.sum(s, initial=10)\r\n\r\n def test_validate_median_initial(self):\r\n s = pd.Series([1, 2])\r\n msg = (\r\n r\"the 'overwrite_input' parameter is not \"\r\n r\"supported in the pandas \"\r\n r\"implementation of median\\(\\)\"\r\n )\r\n with pytest.raises(ValueError, match=msg):\r\n # It seems like np.median doesn't dispatch, so we use the\r\n # method instead of the ufunc.\r\n s.median(overwrite_input=True)\r\n\r\n @td.skip_if_np_lt(\"1.15\")\r\n def test_validate_stat_keepdims(self):\r\n s = pd.Series([1, 2])\r\n msg = (\r\n r\"the 'keepdims' parameter is not \"\r\n r\"supported in the pandas \"\r\n r\"implementation of sum\\(\\)\"\r\n )\r\n with pytest.raises(ValueError, match=msg):\r\n np.sum(s, keepdims=True)\r\n\r\n def test_td64_summation_overflow(self):\r\n # GH 9442\r\n s = pd.Series(pd.date_range(\"20130101\", periods=100000, freq=\"H\"))\r\n s[0] += pd.Timedelta(\"1s 1ms\")\r\n\r\n # mean\r\n result = (s - s.min()).mean()\r\n expected = pd.Timedelta((pd.TimedeltaIndex((s - s.min())).asi8 / len(s)).sum())\r\n\r\n # the computation is converted to float so\r\n # might be some loss of precision\r\n assert np.allclose(result.value / 1000, expected.value / 1000)\r\n\r\n # sum\r\n msg = \"overflow in timedelta operation\"\r\n with pytest.raises(ValueError, match=msg):\r\n (s - s.min()).sum()\r\n\r\n s1 = s[0:10000]\r\n with pytest.raises(ValueError, match=msg):\r\n (s1 - s1.min()).sum()\r\n s2 = s[0:1000]\r\n (s2 - s2.min()).sum()\r\n", "\"\"\" Non-negative matrix factorization.\r\n\"\"\"\r\n# Author: Vlad Niculae\r\n# Lars Buitinck\r\n# Mathieu Blondel <[email protected]>\r\n# Tom Dupre la Tour\r\n# License: BSD 3 clause\r\n\r\nimport numbers\r\nimport numpy as np\r\nimport scipy.sparse as sp\r\nimport time\r\nimport warnings\r\nfrom math import sqrt\r\n\r\nfrom ._cdnmf_fast import _update_cdnmf_fast\r\nfrom .._config import config_context\r\nfrom ..base import BaseEstimator, TransformerMixin\r\nfrom ..exceptions import ConvergenceWarning\r\nfrom ..utils import check_random_state, check_array\r\nfrom ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm\r\nfrom ..utils.validation import check_is_fitted, check_non_negative\r\nfrom ..utils.validation import _deprecate_positional_args\r\n\r\nEPSILON = np.finfo(np.float32).eps\r\n\r\n\r\ndef norm(x):\r\n \"\"\"Dot product-based Euclidean norm implementation.\r\n\r\n See: http://fseoane.net/blog/2011/computing-the-vector-norm/\r\n\r\n Parameters\r\n ----------\r\n x : array-like\r\n Vector for which to compute the norm.\r\n \"\"\"\r\n return sqrt(squared_norm(x))\r\n\r\n\r\ndef trace_dot(X, Y):\r\n \"\"\"Trace of np.dot(X, Y.T).\r\n\r\n Parameters\r\n ----------\r\n X : array-like\r\n First matrix.\r\n Y : array-like\r\n Second matrix.\r\n \"\"\"\r\n return np.dot(X.ravel(), Y.ravel())\r\n\r\n\r\ndef _check_init(A, shape, whom):\r\n A = check_array(A)\r\n if np.shape(A) != shape:\r\n raise ValueError('Array with wrong shape passed to %s. Expected %s, '\r\n 'but got %s ' % (whom, shape, np.shape(A)))\r\n check_non_negative(A, whom)\r\n if np.max(A) == 0:\r\n raise ValueError('Array passed to %s is full of zeros.' % whom)\r\n\r\n\r\ndef _beta_divergence(X, W, H, beta, square_root=False):\r\n \"\"\"Compute the beta-divergence of X and dot(W, H).\r\n\r\n Parameters\r\n ----------\r\n X : float or array-like of shape (n_samples, n_features)\r\n\r\n W : float or array-like of shape (n_samples, n_components)\r\n\r\n H : float or array-like of shape (n_components, n_features)\r\n\r\n beta : float or {'frobenius', 'kullback-leibler', 'itakura-saito'}\r\n Parameter of the beta-divergence.\r\n If beta == 2, this is half the Frobenius *squared* norm.\r\n If beta == 1, this is the generalized Kullback-Leibler divergence.\r\n If beta == 0, this is the Itakura-Saito divergence.\r\n Else, this is the general beta-divergence.\r\n\r\n square_root : bool, default=False\r\n If True, return np.sqrt(2 * res)\r\n For beta == 2, it corresponds to the Frobenius norm.\r\n\r\n Returns\r\n -------\r\n res : float\r\n Beta divergence of X and np.dot(X, H).\r\n \"\"\"\r\n beta = _beta_loss_to_float(beta)\r\n\r\n # The method can be called with scalars\r\n if not sp.issparse(X):\r\n X = np.atleast_2d(X)\r\n W = np.atleast_2d(W)\r\n H = np.atleast_2d(H)\r\n\r\n # Frobenius norm\r\n if beta == 2:\r\n # Avoid the creation of the dense np.dot(W, H) if X is sparse.\r\n if sp.issparse(X):\r\n norm_X = np.dot(X.data, X.data)\r\n norm_WH = trace_dot(np.linalg.multi_dot([W.T, W, H]), H)\r\n cross_prod = trace_dot((X * H.T), W)\r\n res = (norm_X + norm_WH - 2. * cross_prod) / 2.\r\n else:\r\n res = squared_norm(X - np.dot(W, H)) / 2.\r\n\r\n if square_root:\r\n return np.sqrt(res * 2)\r\n else:\r\n return res\r\n\r\n if sp.issparse(X):\r\n # compute np.dot(W, H) only where X is nonzero\r\n WH_data = _special_sparse_dot(W, H, X).data\r\n X_data = X.data\r\n else:\r\n WH = np.dot(W, H)\r\n WH_data = WH.ravel()\r\n X_data = X.ravel()\r\n\r\n # do not affect the zeros: here 0 ** (-1) = 0 and not infinity\r\n indices = X_data > EPSILON\r\n WH_data = WH_data[indices]\r\n X_data = X_data[indices]\r\n\r\n # used to avoid division by zero\r\n WH_data[WH_data == 0] = EPSILON\r\n\r\n # generalized Kullback-Leibler divergence\r\n if beta == 1:\r\n # fast and memory efficient computation of np.sum(np.dot(W, H))\r\n sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1))\r\n # computes np.sum(X * log(X / WH)) only where X is nonzero\r\n div = X_data / WH_data\r\n res = np.dot(X_data, np.log(div))\r\n # add full np.sum(np.dot(W, H)) - np.sum(X)\r\n res += sum_WH - X_data.sum()\r\n\r\n # Itakura-Saito divergence\r\n elif beta == 0:\r\n div = X_data / WH_data\r\n res = np.sum(div) - np.product(X.shape) - np.sum(np.log(div))\r\n\r\n # beta-divergence, beta not in (0, 1, 2)\r\n else:\r\n if sp.issparse(X):\r\n # slow loop, but memory efficient computation of :\r\n # np.sum(np.dot(W, H) ** beta)\r\n sum_WH_beta = 0\r\n for i in range(X.shape[1]):\r\n sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta)\r\n\r\n else:\r\n sum_WH_beta = np.sum(WH ** beta)\r\n\r\n sum_X_WH = np.dot(X_data, WH_data ** (beta - 1))\r\n res = (X_data ** beta).sum() - beta * sum_X_WH\r\n res += sum_WH_beta * (beta - 1)\r\n res /= beta * (beta - 1)\r\n\r\n if square_root:\r\n return np.sqrt(2 * res)\r\n else:\r\n return res\r\n\r\n\r\ndef _special_sparse_dot(W, H, X):\r\n \"\"\"Computes np.dot(W, H), only where X is non zero.\"\"\"\r\n if sp.issparse(X):\r\n ii, jj = X.nonzero()\r\n n_vals = ii.shape[0]\r\n dot_vals = np.empty(n_vals)\r\n n_components = W.shape[1]\r\n\r\n batch_size = max(n_components, n_vals // n_components)\r\n for start in range(0, n_vals, batch_size):\r\n batch = slice(start, start + batch_size)\r\n dot_vals[batch] = np.multiply(W[ii[batch], :],\r\n H.T[jj[batch], :]).sum(axis=1)\r\n\r\n WH = sp.coo_matrix((dot_vals, (ii, jj)), shape=X.shape)\r\n return WH.tocsr()\r\n else:\r\n return np.dot(W, H)\r\n\r\n\r\ndef _compute_regularization(alpha, l1_ratio, regularization):\r\n \"\"\"Compute L1 and L2 regularization coefficients for W and H.\"\"\"\r\n alpha_H = 0.\r\n alpha_W = 0.\r\n if regularization in ('both', 'components'):\r\n alpha_H = float(alpha)\r\n if regularization in ('both', 'transformation'):\r\n alpha_W = float(alpha)\r\n\r\n l1_reg_W = alpha_W * l1_ratio\r\n l1_reg_H = alpha_H * l1_ratio\r\n l2_reg_W = alpha_W * (1. - l1_ratio)\r\n l2_reg_H = alpha_H * (1. - l1_ratio)\r\n return l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H\r\n\r\n\r\ndef _check_string_param(solver, regularization, beta_loss, init):\r\n allowed_solver = ('cd', 'mu')\r\n if solver not in allowed_solver:\r\n raise ValueError(\r\n 'Invalid solver parameter: got %r instead of one of %r' %\r\n (solver, allowed_solver))\r\n\r\n allowed_regularization = ('both', 'components', 'transformation', None)\r\n if regularization not in allowed_regularization:\r\n raise ValueError(\r\n 'Invalid regularization parameter: got %r instead of one of %r' %\r\n (regularization, allowed_regularization))\r\n\r\n # 'mu' is the only solver that handles other beta losses than 'frobenius'\r\n if solver != 'mu' and beta_loss not in (2, 'frobenius'):\r\n raise ValueError(\r\n 'Invalid beta_loss parameter: solver %r does not handle beta_loss'\r\n ' = %r' % (solver, beta_loss))\r\n\r\n if solver == 'mu' and init == 'nndsvd':\r\n warnings.warn(\"The multiplicative update ('mu') solver cannot update \"\r\n \"zeros present in the initialization, and so leads to \"\r\n \"poorer results when used jointly with init='nndsvd'. \"\r\n \"You may try init='nndsvda' or init='nndsvdar' instead.\",\r\n UserWarning)\r\n\r\n beta_loss = _beta_loss_to_float(beta_loss)\r\n return beta_loss\r\n\r\n\r\ndef _beta_loss_to_float(beta_loss):\r\n \"\"\"Convert string beta_loss to float.\"\"\"\r\n allowed_beta_loss = {'frobenius': 2,\r\n 'kullback-leibler': 1,\r\n 'itakura-saito': 0}\r\n if isinstance(beta_loss, str) and beta_loss in allowed_beta_loss:\r\n beta_loss = allowed_beta_loss[beta_loss]\r\n\r\n if not isinstance(beta_loss, numbers.Number):\r\n raise ValueError('Invalid beta_loss parameter: got %r instead '\r\n 'of one of %r, or a float.' %\r\n (beta_loss, allowed_beta_loss.keys()))\r\n return beta_loss\r\n\r\n\r\ndef _initialize_nmf(X, n_components, init='warn', eps=1e-6,\r\n random_state=None):\r\n \"\"\"Algorithms for NMF initialization.\r\n\r\n Computes an initial guess for the non-negative\r\n rank k matrix approximation for X: X = WH.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n The data matrix to be decomposed.\r\n\r\n n_components : int\r\n The number of components desired in the approximation.\r\n\r\n init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar'}, default=None\r\n Method used to initialize the procedure.\r\n Default: None.\r\n Valid options:\r\n\r\n - None: 'nndsvd' if n_components <= min(n_samples, n_features),\r\n otherwise 'random'.\r\n\r\n - 'random': non-negative random matrices, scaled with:\r\n sqrt(X.mean() / n_components)\r\n\r\n - 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)\r\n initialization (better for sparseness)\r\n\r\n - 'nndsvda': NNDSVD with zeros filled with the average of X\r\n (better when sparsity is not desired)\r\n\r\n - 'nndsvdar': NNDSVD with zeros filled with small random values\r\n (generally faster, less accurate alternative to NNDSVDa\r\n for when sparsity is not desired)\r\n\r\n - 'custom': use custom matrices W and H\r\n\r\n eps : float, default=1e-6\r\n Truncate all values less then this in output to zero.\r\n\r\n random_state : int, RandomState instance or None, default=None\r\n Used when ``init`` == 'nndsvdar' or 'random'. Pass an int for\r\n reproducible results across multiple function calls.\r\n See :term:`Glossary <random_state>`.\r\n\r\n Returns\r\n -------\r\n W : array-like of shape (n_samples, n_components)\r\n Initial guesses for solving X ~= WH.\r\n\r\n H : array-like of shape (n_components, n_features)\r\n Initial guesses for solving X ~= WH.\r\n\r\n References\r\n ----------\r\n C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for\r\n nonnegative matrix factorization - Pattern Recognition, 2008\r\n http://tinyurl.com/nndsvd\r\n \"\"\"\r\n if init == 'warn':\r\n warnings.warn((\"The 'init' value, when 'init=None' and \"\r\n \"n_components is less than n_samples and \"\r\n \"n_features, will be changed from 'nndsvd' to \"\r\n \"'nndsvda' in 1.1 (renaming of 0.26).\"), FutureWarning)\r\n init = None\r\n\r\n check_non_negative(X, \"NMF initialization\")\r\n n_samples, n_features = X.shape\r\n\r\n if (init is not None and init != 'random'\r\n and n_components > min(n_samples, n_features)):\r\n raise ValueError(\"init = '{}' can only be used when \"\r\n \"n_components <= min(n_samples, n_features)\"\r\n .format(init))\r\n\r\n if init is None:\r\n if n_components <= min(n_samples, n_features):\r\n init = 'nndsvd'\r\n else:\r\n init = 'random'\r\n\r\n # Random initialization\r\n if init == 'random':\r\n avg = np.sqrt(X.mean() / n_components)\r\n rng = check_random_state(random_state)\r\n H = avg * rng.randn(n_components, n_features).astype(X.dtype,\r\n copy=False)\r\n W = avg * rng.randn(n_samples, n_components).astype(X.dtype,\r\n copy=False)\r\n np.abs(H, out=H)\r\n np.abs(W, out=W)\r\n return W, H\r\n\r\n # NNDSVD initialization\r\n U, S, V = randomized_svd(X, n_components, random_state=random_state)\r\n W = np.zeros_like(U)\r\n H = np.zeros_like(V)\r\n\r\n # The leading singular triplet is non-negative\r\n # so it can be used as is for initialization.\r\n W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])\r\n H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])\r\n\r\n for j in range(1, n_components):\r\n x, y = U[:, j], V[j, :]\r\n\r\n # extract positive and negative parts of column vectors\r\n x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)\r\n x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))\r\n\r\n # and their norms\r\n x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)\r\n x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)\r\n\r\n m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm\r\n\r\n # choose update\r\n if m_p > m_n:\r\n u = x_p / x_p_nrm\r\n v = y_p / y_p_nrm\r\n sigma = m_p\r\n else:\r\n u = x_n / x_n_nrm\r\n v = y_n / y_n_nrm\r\n sigma = m_n\r\n\r\n lbd = np.sqrt(S[j] * sigma)\r\n W[:, j] = lbd * u\r\n H[j, :] = lbd * v\r\n\r\n W[W < eps] = 0\r\n H[H < eps] = 0\r\n\r\n if init == \"nndsvd\":\r\n pass\r\n elif init == \"nndsvda\":\r\n avg = X.mean()\r\n W[W == 0] = avg\r\n H[H == 0] = avg\r\n elif init == \"nndsvdar\":\r\n rng = check_random_state(random_state)\r\n avg = X.mean()\r\n W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100)\r\n H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100)\r\n else:\r\n raise ValueError(\r\n 'Invalid init parameter: got %r instead of one of %r' %\r\n (init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))\r\n\r\n return W, H\r\n\r\n\r\ndef _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle,\r\n random_state):\r\n \"\"\"Helper function for _fit_coordinate_descent.\r\n\r\n Update W to minimize the objective function, iterating once over all\r\n coordinates. By symmetry, to update H, one can call\r\n _update_coordinate_descent(X.T, Ht, W, ...).\r\n\r\n \"\"\"\r\n n_components = Ht.shape[1]\r\n\r\n HHt = np.dot(Ht.T, Ht)\r\n XHt = safe_sparse_dot(X, Ht)\r\n\r\n # L2 regularization corresponds to increase of the diagonal of HHt\r\n if l2_reg != 0.:\r\n # adds l2_reg only on the diagonal\r\n HHt.flat[::n_components + 1] += l2_reg\r\n # L1 regularization corresponds to decrease of each element of XHt\r\n if l1_reg != 0.:\r\n XHt -= l1_reg\r\n\r\n if shuffle:\r\n permutation = random_state.permutation(n_components)\r\n else:\r\n permutation = np.arange(n_components)\r\n # The following seems to be required on 64-bit Windows w/ Python 3.5.\r\n permutation = np.asarray(permutation, dtype=np.intp)\r\n return _update_cdnmf_fast(W, HHt, XHt, permutation)\r\n\r\n\r\ndef _fit_coordinate_descent(X, W, H, tol=1e-4, max_iter=200, l1_reg_W=0,\r\n l1_reg_H=0, l2_reg_W=0, l2_reg_H=0, update_H=True,\r\n verbose=0, shuffle=False, random_state=None):\r\n \"\"\"Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent\r\n\r\n The objective function is minimized with an alternating minimization of W\r\n and H. Each minimization is done with a cyclic (up to a permutation of the\r\n features) Coordinate Descent.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n Constant matrix.\r\n\r\n W : array-like of shape (n_samples, n_components)\r\n Initial guess for the solution.\r\n\r\n H : array-like of shape (n_components, n_features)\r\n Initial guess for the solution.\r\n\r\n tol : float, default=1e-4\r\n Tolerance of the stopping condition.\r\n\r\n max_iter : int, default=200\r\n Maximum number of iterations before timing out.\r\n\r\n l1_reg_W : float, default=0.\r\n L1 regularization parameter for W.\r\n\r\n l1_reg_H : float, default=0.\r\n L1 regularization parameter for H.\r\n\r\n l2_reg_W : float, default=0.\r\n L2 regularization parameter for W.\r\n\r\n l2_reg_H : float, default=0.\r\n L2 regularization parameter for H.\r\n\r\n update_H : bool, default=True\r\n Set to True, both W and H will be estimated from initial guesses.\r\n Set to False, only W will be estimated.\r\n\r\n verbose : int, default=0\r\n The verbosity level.\r\n\r\n shuffle : bool, default=False\r\n If true, randomize the order of coordinates in the CD solver.\r\n\r\n random_state : int, RandomState instance or None, default=None\r\n Used to randomize the coordinates in the CD solver, when\r\n ``shuffle`` is set to ``True``. Pass an int for reproducible\r\n results across multiple function calls.\r\n See :term:`Glossary <random_state>`.\r\n\r\n Returns\r\n -------\r\n W : ndarray of shape (n_samples, n_components)\r\n Solution to the non-negative least squares problem.\r\n\r\n H : ndarray of shape (n_components, n_features)\r\n Solution to the non-negative least squares problem.\r\n\r\n n_iter : int\r\n The number of iterations done by the algorithm.\r\n\r\n References\r\n ----------\r\n Cichocki, Andrzej, and Phan, Anh-Huy. \"Fast local algorithms for\r\n large scale nonnegative matrix and tensor factorizations.\"\r\n IEICE transactions on fundamentals of electronics, communications and\r\n computer sciences 92.3: 708-721, 2009.\r\n \"\"\"\r\n # so W and Ht are both in C order in memory\r\n Ht = check_array(H.T, order='C')\r\n X = check_array(X, accept_sparse='csr')\r\n\r\n rng = check_random_state(random_state)\r\n\r\n for n_iter in range(1, max_iter + 1):\r\n violation = 0.\r\n\r\n # Update W\r\n violation += _update_coordinate_descent(X, W, Ht, l1_reg_W,\r\n l2_reg_W, shuffle, rng)\r\n # Update H\r\n if update_H:\r\n violation += _update_coordinate_descent(X.T, Ht, W, l1_reg_H,\r\n l2_reg_H, shuffle, rng)\r\n\r\n if n_iter == 1:\r\n violation_init = violation\r\n\r\n if violation_init == 0:\r\n break\r\n\r\n if verbose:\r\n print(\"violation:\", violation / violation_init)\r\n\r\n if violation / violation_init <= tol:\r\n if verbose:\r\n print(\"Converged at iteration\", n_iter + 1)\r\n break\r\n\r\n return W, Ht.T, n_iter\r\n\r\n\r\ndef _multiplicative_update_w(X, W, H, beta_loss, l1_reg_W, l2_reg_W, gamma,\r\n H_sum=None, HHt=None, XHt=None, update_H=True):\r\n \"\"\"Update W in Multiplicative Update NMF.\"\"\"\r\n if beta_loss == 2:\r\n # Numerator\r\n if XHt is None:\r\n XHt = safe_sparse_dot(X, H.T)\r\n if update_H:\r\n # avoid a copy of XHt, which will be re-computed (update_H=True)\r\n numerator = XHt\r\n else:\r\n # preserve the XHt, which is not re-computed (update_H=False)\r\n numerator = XHt.copy()\r\n\r\n # Denominator\r\n if HHt is None:\r\n HHt = np.dot(H, H.T)\r\n denominator = np.dot(W, HHt)\r\n\r\n else:\r\n # Numerator\r\n # if X is sparse, compute WH only where X is non zero\r\n WH_safe_X = _special_sparse_dot(W, H, X)\r\n if sp.issparse(X):\r\n WH_safe_X_data = WH_safe_X.data\r\n X_data = X.data\r\n else:\r\n WH_safe_X_data = WH_safe_X\r\n X_data = X\r\n # copy used in the Denominator\r\n WH = WH_safe_X.copy()\r\n if beta_loss - 1. < 0:\r\n WH[WH == 0] = EPSILON\r\n\r\n # to avoid taking a negative power of zero\r\n if beta_loss - 2. < 0:\r\n WH_safe_X_data[WH_safe_X_data == 0] = EPSILON\r\n\r\n if beta_loss == 1:\r\n np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)\r\n elif beta_loss == 0:\r\n # speeds up computation time\r\n # refer to /numpy/numpy/issues/9363\r\n WH_safe_X_data **= -1\r\n WH_safe_X_data **= 2\r\n # element-wise multiplication\r\n WH_safe_X_data *= X_data\r\n else:\r\n WH_safe_X_data **= beta_loss - 2\r\n # element-wise multiplication\r\n WH_safe_X_data *= X_data\r\n\r\n # here numerator = dot(X * (dot(W, H) ** (beta_loss - 2)), H.T)\r\n numerator = safe_sparse_dot(WH_safe_X, H.T)\r\n\r\n # Denominator\r\n if beta_loss == 1:\r\n if H_sum is None:\r\n H_sum = np.sum(H, axis=1) # shape(n_components, )\r\n denominator = H_sum[np.newaxis, :]\r\n\r\n else:\r\n # computation of WHHt = dot(dot(W, H) ** beta_loss - 1, H.T)\r\n if sp.issparse(X):\r\n # memory efficient computation\r\n # (compute row by row, avoiding the dense matrix WH)\r\n WHHt = np.empty(W.shape)\r\n for i in range(X.shape[0]):\r\n WHi = np.dot(W[i, :], H)\r\n if beta_loss - 1 < 0:\r\n WHi[WHi == 0] = EPSILON\r\n WHi **= beta_loss - 1\r\n WHHt[i, :] = np.dot(WHi, H.T)\r\n else:\r\n WH **= beta_loss - 1\r\n WHHt = np.dot(WH, H.T)\r\n denominator = WHHt\r\n\r\n # Add L1 and L2 regularization\r\n if l1_reg_W > 0:\r\n denominator += l1_reg_W\r\n if l2_reg_W > 0:\r\n denominator = denominator + l2_reg_W * W\r\n denominator[denominator == 0] = EPSILON\r\n\r\n numerator /= denominator\r\n delta_W = numerator\r\n\r\n # gamma is in ]0, 1]\r\n if gamma != 1:\r\n delta_W **= gamma\r\n\r\n return delta_W, H_sum, HHt, XHt\r\n\r\n\r\ndef _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma):\r\n \"\"\"Update H in Multiplicative Update NMF.\"\"\"\r\n if beta_loss == 2:\r\n numerator = safe_sparse_dot(W.T, X)\r\n denominator = np.linalg.multi_dot([W.T, W, H])\r\n\r\n else:\r\n # Numerator\r\n WH_safe_X = _special_sparse_dot(W, H, X)\r\n if sp.issparse(X):\r\n WH_safe_X_data = WH_safe_X.data\r\n X_data = X.data\r\n else:\r\n WH_safe_X_data = WH_safe_X\r\n X_data = X\r\n # copy used in the Denominator\r\n WH = WH_safe_X.copy()\r\n if beta_loss - 1. < 0:\r\n WH[WH == 0] = EPSILON\r\n\r\n # to avoid division by zero\r\n if beta_loss - 2. < 0:\r\n WH_safe_X_data[WH_safe_X_data == 0] = EPSILON\r\n\r\n if beta_loss == 1:\r\n np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)\r\n elif beta_loss == 0:\r\n # speeds up computation time\r\n # refer to /numpy/numpy/issues/9363\r\n WH_safe_X_data **= -1\r\n WH_safe_X_data **= 2\r\n # element-wise multiplication\r\n WH_safe_X_data *= X_data\r\n else:\r\n WH_safe_X_data **= beta_loss - 2\r\n # element-wise multiplication\r\n WH_safe_X_data *= X_data\r\n\r\n # here numerator = dot(W.T, (dot(W, H) ** (beta_loss - 2)) * X)\r\n numerator = safe_sparse_dot(W.T, WH_safe_X)\r\n\r\n # Denominator\r\n if beta_loss == 1:\r\n W_sum = np.sum(W, axis=0) # shape(n_components, )\r\n W_sum[W_sum == 0] = 1.\r\n denominator = W_sum[:, np.newaxis]\r\n\r\n # beta_loss not in (1, 2)\r\n else:\r\n # computation of WtWH = dot(W.T, dot(W, H) ** beta_loss - 1)\r\n if sp.issparse(X):\r\n # memory efficient computation\r\n # (compute column by column, avoiding the dense matrix WH)\r\n WtWH = np.empty(H.shape)\r\n for i in range(X.shape[1]):\r\n WHi = np.dot(W, H[:, i])\r\n if beta_loss - 1 < 0:\r\n WHi[WHi == 0] = EPSILON\r\n WHi **= beta_loss - 1\r\n WtWH[:, i] = np.dot(W.T, WHi)\r\n else:\r\n WH **= beta_loss - 1\r\n WtWH = np.dot(W.T, WH)\r\n denominator = WtWH\r\n\r\n # Add L1 and L2 regularization\r\n if l1_reg_H > 0:\r\n denominator += l1_reg_H\r\n if l2_reg_H > 0:\r\n denominator = denominator + l2_reg_H * H\r\n denominator[denominator == 0] = EPSILON\r\n\r\n numerator /= denominator\r\n delta_H = numerator\r\n\r\n # gamma is in ]0, 1]\r\n if gamma != 1:\r\n delta_H **= gamma\r\n\r\n return delta_H\r\n\r\n\r\ndef _fit_multiplicative_update(X, W, H, beta_loss='frobenius',\r\n max_iter=200, tol=1e-4,\r\n l1_reg_W=0, l1_reg_H=0, l2_reg_W=0, l2_reg_H=0,\r\n update_H=True, verbose=0):\r\n \"\"\"Compute Non-negative Matrix Factorization with Multiplicative Update.\r\n\r\n The objective function is _beta_divergence(X, WH) and is minimized with an\r\n alternating minimization of W and H. Each minimization is done with a\r\n Multiplicative Update.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n Constant input matrix.\r\n\r\n W : array-like of shape (n_samples, n_components)\r\n Initial guess for the solution.\r\n\r\n H : array-like of shape (n_components, n_features)\r\n Initial guess for the solution.\r\n\r\n beta_loss : float or {'frobenius', 'kullback-leibler', \\\r\n 'itakura-saito'}, default='frobenius'\r\n String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.\r\n Beta divergence to be minimized, measuring the distance between X\r\n and the dot product WH. Note that values different from 'frobenius'\r\n (or 2) and 'kullback-leibler' (or 1) lead to significantly slower\r\n fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input\r\n matrix X cannot contain zeros.\r\n\r\n max_iter : int, default=200\r\n Number of iterations.\r\n\r\n tol : float, default=1e-4\r\n Tolerance of the stopping condition.\r\n\r\n l1_reg_W : float, default=0.\r\n L1 regularization parameter for W.\r\n\r\n l1_reg_H : float, default=0.\r\n L1 regularization parameter for H.\r\n\r\n l2_reg_W : float, default=0.\r\n L2 regularization parameter for W.\r\n\r\n l2_reg_H : float, default=0.\r\n L2 regularization parameter for H.\r\n\r\n update_H : bool, default=True\r\n Set to True, both W and H will be estimated from initial guesses.\r\n Set to False, only W will be estimated.\r\n\r\n verbose : int, default=0\r\n The verbosity level.\r\n\r\n Returns\r\n -------\r\n W : ndarray of shape (n_samples, n_components)\r\n Solution to the non-negative least squares problem.\r\n\r\n H : ndarray of shape (n_components, n_features)\r\n Solution to the non-negative least squares problem.\r\n\r\n n_iter : int\r\n The number of iterations done by the algorithm.\r\n\r\n References\r\n ----------\r\n Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix\r\n factorization with the beta-divergence. Neural Computation, 23(9).\r\n \"\"\"\r\n start_time = time.time()\r\n\r\n beta_loss = _beta_loss_to_float(beta_loss)\r\n\r\n # gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011]\r\n if beta_loss < 1:\r\n gamma = 1. / (2. - beta_loss)\r\n elif beta_loss > 2:\r\n gamma = 1. / (beta_loss - 1.)\r\n else:\r\n gamma = 1.\r\n\r\n # used for the convergence criterion\r\n error_at_init = _beta_divergence(X, W, H, beta_loss, square_root=True)\r\n previous_error = error_at_init\r\n\r\n H_sum, HHt, XHt = None, None, None\r\n for n_iter in range(1, max_iter + 1):\r\n # update W\r\n # H_sum, HHt and XHt are saved and reused if not update_H\r\n delta_W, H_sum, HHt, XHt = _multiplicative_update_w(\r\n X, W, H, beta_loss, l1_reg_W, l2_reg_W, gamma,\r\n H_sum, HHt, XHt, update_H)\r\n W *= delta_W\r\n\r\n # necessary for stability with beta_loss < 1\r\n if beta_loss < 1:\r\n W[W < np.finfo(np.float64).eps] = 0.\r\n\r\n # update H\r\n if update_H:\r\n delta_H = _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H,\r\n l2_reg_H, gamma)\r\n H *= delta_H\r\n\r\n # These values will be recomputed since H changed\r\n H_sum, HHt, XHt = None, None, None\r\n\r\n # necessary for stability with beta_loss < 1\r\n if beta_loss <= 1:\r\n H[H < np.finfo(np.float64).eps] = 0.\r\n\r\n # test convergence criterion every 10 iterations\r\n if tol > 0 and n_iter % 10 == 0:\r\n error = _beta_divergence(X, W, H, beta_loss, square_root=True)\r\n\r\n if verbose:\r\n iter_time = time.time()\r\n print(\"Epoch %02d reached after %.3f seconds, error: %f\" %\r\n (n_iter, iter_time - start_time, error))\r\n\r\n if (previous_error - error) / error_at_init < tol:\r\n break\r\n previous_error = error\r\n\r\n # do not print if we have already printed in the convergence test\r\n if verbose and (tol == 0 or n_iter % 10 != 0):\r\n end_time = time.time()\r\n print(\"Epoch %02d reached after %.3f seconds.\" %\r\n (n_iter, end_time - start_time))\r\n\r\n return W, H, n_iter\r\n\r\n\r\n@_deprecate_positional_args\r\ndef non_negative_factorization(X, W=None, H=None, n_components=None, *,\r\n init='warn', update_H=True, solver='cd',\r\n beta_loss='frobenius', tol=1e-4,\r\n max_iter=200, alpha=0., l1_ratio=0.,\r\n regularization=None, random_state=None,\r\n verbose=0, shuffle=False):\r\n \"\"\"Compute Non-negative Matrix Factorization (NMF).\r\n\r\n Find two non-negative matrices (W, H) whose product approximates the non-\r\n negative matrix X. This factorization can be used for example for\r\n dimensionality reduction, source separation or topic extraction.\r\n\r\n The objective function is:\r\n\r\n .. math::\r\n\r\n 0.5 * ||X - WH||_{Fro}^2 + alpha * l1_{ratio} * ||vec(W)||_1\r\n\r\n + alpha * l1_{ratio} * ||vec(H)||_1\r\n\r\n + 0.5 * alpha * (1 - l1_{ratio}) * ||W||_{Fro}^2\r\n\r\n + 0.5 * alpha * (1 - l1_{ratio}) * ||H||_{Fro}^2\r\n\r\n Where:\r\n\r\n :math:`||A||_{Fro}^2 = \\\\sum_{i,j} A_{ij}^2` (Frobenius norm)\r\n\r\n :math:`||vec(A)||_1 = \\\\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm)\r\n\r\n For multiplicative-update ('mu') solver, the Frobenius norm\r\n :math:`(0.5 * ||X - WH||_{Fro}^2)` can be changed into another\r\n beta-divergence loss, by changing the beta_loss parameter.\r\n\r\n The objective function is minimized with an alternating minimization of W\r\n and H. If H is given and update_H=False, it solves for W only.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n Constant matrix.\r\n\r\n W : array-like of shape (n_samples, n_components), default=None\r\n If init='custom', it is used as initial guess for the solution.\r\n\r\n H : array-like of shape (n_components, n_features), default=None\r\n If init='custom', it is used as initial guess for the solution.\r\n If update_H=False, it is used as a constant, to solve for W only.\r\n\r\n n_components : int, default=None\r\n Number of components, if n_components is not set all features\r\n are kept.\r\n\r\n init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None\r\n Method used to initialize the procedure.\r\n\r\n Valid options:\r\n\r\n - None: 'nndsvd' if n_components < n_features, otherwise 'random'.\r\n\r\n - 'random': non-negative random matrices, scaled with:\r\n sqrt(X.mean() / n_components)\r\n\r\n - 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)\r\n initialization (better for sparseness)\r\n\r\n - 'nndsvda': NNDSVD with zeros filled with the average of X\r\n (better when sparsity is not desired)\r\n\r\n - 'nndsvdar': NNDSVD with zeros filled with small random values\r\n (generally faster, less accurate alternative to NNDSVDa\r\n for when sparsity is not desired)\r\n\r\n - 'custom': use custom matrices W and H if `update_H=True`. If\r\n `update_H=False`, then only custom matrix H is used.\r\n\r\n .. versionchanged:: 0.23\r\n The default value of `init` changed from 'random' to None in 0.23.\r\n\r\n update_H : bool, default=True\r\n Set to True, both W and H will be estimated from initial guesses.\r\n Set to False, only W will be estimated.\r\n\r\n solver : {'cd', 'mu'}, default='cd'\r\n Numerical solver to use:\r\n\r\n - 'cd' is a Coordinate Descent solver that uses Fast Hierarchical\r\n Alternating Least Squares (Fast HALS).\r\n\r\n - 'mu' is a Multiplicative Update solver.\r\n\r\n .. versionadded:: 0.17\r\n Coordinate Descent solver.\r\n\r\n .. versionadded:: 0.19\r\n Multiplicative Update solver.\r\n\r\n beta_loss : float or {'frobenius', 'kullback-leibler', \\\r\n 'itakura-saito'}, default='frobenius'\r\n Beta divergence to be minimized, measuring the distance between X\r\n and the dot product WH. Note that values different from 'frobenius'\r\n (or 2) and 'kullback-leibler' (or 1) lead to significantly slower\r\n fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input\r\n matrix X cannot contain zeros. Used only in 'mu' solver.\r\n\r\n .. versionadded:: 0.19\r\n\r\n tol : float, default=1e-4\r\n Tolerance of the stopping condition.\r\n\r\n max_iter : int, default=200\r\n Maximum number of iterations before timing out.\r\n\r\n alpha : float, default=0.\r\n Constant that multiplies the regularization terms.\r\n\r\n l1_ratio : float, default=0.\r\n The regularization mixing parameter, with 0 <= l1_ratio <= 1.\r\n For l1_ratio = 0 the penalty is an elementwise L2 penalty\r\n (aka Frobenius Norm).\r\n For l1_ratio = 1 it is an elementwise L1 penalty.\r\n For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.\r\n\r\n regularization : {'both', 'components', 'transformation'}, default=None\r\n Select whether the regularization affects the components (H), the\r\n transformation (W), both or none of them.\r\n\r\n random_state : int, RandomState instance or None, default=None\r\n Used for NMF initialisation (when ``init`` == 'nndsvdar' or\r\n 'random'), and in Coordinate Descent. Pass an int for reproducible\r\n results across multiple function calls.\r\n See :term:`Glossary <random_state>`.\r\n\r\n verbose : int, default=0\r\n The verbosity level.\r\n\r\n shuffle : bool, default=False\r\n If true, randomize the order of coordinates in the CD solver.\r\n\r\n Returns\r\n -------\r\n W : ndarray of shape (n_samples, n_components)\r\n Solution to the non-negative least squares problem.\r\n\r\n H : ndarray of shape (n_components, n_features)\r\n Solution to the non-negative least squares problem.\r\n\r\n n_iter : int\r\n Actual number of iterations.\r\n\r\n Examples\r\n --------\r\n >>> import numpy as np\r\n >>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])\r\n >>> from sklearn.decomposition import non_negative_factorization\r\n >>> W, H, n_iter = non_negative_factorization(X, n_components=2,\r\n ... init='random', random_state=0)\r\n\r\n References\r\n ----------\r\n Cichocki, Andrzej, and P. H. A. N. Anh-Huy. \"Fast local algorithms for\r\n large scale nonnegative matrix and tensor factorizations.\"\r\n IEICE transactions on fundamentals of electronics, communications and\r\n computer sciences 92.3: 708-721, 2009.\r\n\r\n Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix\r\n factorization with the beta-divergence. Neural Computation, 23(9).\r\n \"\"\"\r\n X = check_array(X, accept_sparse=('csr', 'csc'),\r\n dtype=[np.float64, np.float32])\r\n check_non_negative(X, \"NMF (input X)\")\r\n beta_loss = _check_string_param(solver, regularization, beta_loss, init)\r\n\r\n if X.min() == 0 and beta_loss <= 0:\r\n raise ValueError(\"When beta_loss <= 0 and X contains zeros, \"\r\n \"the solver may diverge. Please add small values to \"\r\n \"X, or use a positive beta_loss.\")\r\n\r\n n_samples, n_features = X.shape\r\n if n_components is None:\r\n n_components = n_features\r\n\r\n if not isinstance(n_components, numbers.Integral) or n_components <= 0:\r\n raise ValueError(\"Number of components must be a positive integer;\"\r\n \" got (n_components=%r)\" % n_components)\r\n if not isinstance(max_iter, numbers.Integral) or max_iter < 0:\r\n raise ValueError(\"Maximum number of iterations must be a positive \"\r\n \"integer; got (max_iter=%r)\" % max_iter)\r\n if not isinstance(tol, numbers.Number) or tol < 0:\r\n raise ValueError(\"Tolerance for stopping criteria must be \"\r\n \"positive; got (tol=%r)\" % tol)\r\n\r\n # check W and H, or initialize them\r\n if init == 'custom' and update_H:\r\n _check_init(H, (n_components, n_features), \"NMF (input H)\")\r\n _check_init(W, (n_samples, n_components), \"NMF (input W)\")\r\n if H.dtype != X.dtype or W.dtype != X.dtype:\r\n raise TypeError(\"H and W should have the same dtype as X. Got \"\r\n \"H.dtype = {} and W.dtype = {}.\"\r\n .format(H.dtype, W.dtype))\r\n elif not update_H:\r\n _check_init(H, (n_components, n_features), \"NMF (input H)\")\r\n if H.dtype != X.dtype:\r\n raise TypeError(\"H should have the same dtype as X. Got H.dtype = \"\r\n \"{}.\".format(H.dtype))\r\n # 'mu' solver should not be initialized by zeros\r\n if solver == 'mu':\r\n avg = np.sqrt(X.mean() / n_components)\r\n W = np.full((n_samples, n_components), avg, dtype=X.dtype)\r\n else:\r\n W = np.zeros((n_samples, n_components), dtype=X.dtype)\r\n else:\r\n W, H = _initialize_nmf(X, n_components, init=init,\r\n random_state=random_state)\r\n\r\n l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = _compute_regularization(\r\n alpha, l1_ratio, regularization)\r\n\r\n if solver == 'cd':\r\n W, H, n_iter = _fit_coordinate_descent(X, W, H, tol, max_iter,\r\n l1_reg_W, l1_reg_H,\r\n l2_reg_W, l2_reg_H,\r\n update_H=update_H,\r\n verbose=verbose,\r\n shuffle=shuffle,\r\n random_state=random_state)\r\n elif solver == 'mu':\r\n W, H, n_iter = _fit_multiplicative_update(X, W, H, beta_loss, max_iter,\r\n tol, l1_reg_W, l1_reg_H,\r\n l2_reg_W, l2_reg_H, update_H,\r\n verbose)\r\n\r\n else:\r\n raise ValueError(\"Invalid solver parameter '%s'.\" % solver)\r\n\r\n if n_iter == max_iter and tol > 0:\r\n warnings.warn(\"Maximum number of iterations %d reached. Increase it to\"\r\n \" improve convergence.\" % max_iter, ConvergenceWarning)\r\n\r\n return W, H, n_iter\r\n\r\n\r\nclass NMF(TransformerMixin, BaseEstimator):\r\n \"\"\"Non-Negative Matrix Factorization (NMF).\r\n\r\n Find two non-negative matrices (W, H) whose product approximates the non-\r\n negative matrix X. This factorization can be used for example for\r\n dimensionality reduction, source separation or topic extraction.\r\n\r\n The objective function is:\r\n\r\n .. math::\r\n\r\n 0.5 * ||X - WH||_{Fro}^2 + alpha * l1_{ratio} * ||vec(W)||_1\r\n\r\n + alpha * l1_{ratio} * ||vec(H)||_1\r\n\r\n + 0.5 * alpha * (1 - l1_{ratio}) * ||W||_{Fro}^2\r\n\r\n + 0.5 * alpha * (1 - l1_{ratio}) * ||H||_{Fro}^2\r\n\r\n Where:\r\n\r\n :math:`||A||_{Fro}^2 = \\\\sum_{i,j} A_{ij}^2` (Frobenius norm)\r\n\r\n :math:`||vec(A)||_1 = \\\\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm)\r\n\r\n For multiplicative-update ('mu') solver, the Frobenius norm\r\n (:math:`0.5 * ||X - WH||_{Fro}^2`) can be changed into another\r\n beta-divergence loss, by changing the beta_loss parameter.\r\n\r\n The objective function is minimized with an alternating minimization of W\r\n and H.\r\n\r\n Read more in the :ref:`User Guide <NMF>`.\r\n\r\n Parameters\r\n ----------\r\n n_components : int, default=None\r\n Number of components, if n_components is not set all features\r\n are kept.\r\n\r\n init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None\r\n Method used to initialize the procedure.\r\n Default: None.\r\n Valid options:\r\n\r\n - `None`: 'nndsvd' if n_components <= min(n_samples, n_features),\r\n otherwise random.\r\n\r\n - `'random'`: non-negative random matrices, scaled with:\r\n sqrt(X.mean() / n_components)\r\n\r\n - `'nndsvd'`: Nonnegative Double Singular Value Decomposition (NNDSVD)\r\n initialization (better for sparseness)\r\n\r\n - `'nndsvda'`: NNDSVD with zeros filled with the average of X\r\n (better when sparsity is not desired)\r\n\r\n - `'nndsvdar'` NNDSVD with zeros filled with small random values\r\n (generally faster, less accurate alternative to NNDSVDa\r\n for when sparsity is not desired)\r\n\r\n - `'custom'`: use custom matrices W and H\r\n\r\n solver : {'cd', 'mu'}, default='cd'\r\n Numerical solver to use:\r\n 'cd' is a Coordinate Descent solver.\r\n 'mu' is a Multiplicative Update solver.\r\n\r\n .. versionadded:: 0.17\r\n Coordinate Descent solver.\r\n\r\n .. versionadded:: 0.19\r\n Multiplicative Update solver.\r\n\r\n beta_loss : float or {'frobenius', 'kullback-leibler', \\\r\n 'itakura-saito'}, default='frobenius'\r\n Beta divergence to be minimized, measuring the distance between X\r\n and the dot product WH. Note that values different from 'frobenius'\r\n (or 2) and 'kullback-leibler' (or 1) lead to significantly slower\r\n fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input\r\n matrix X cannot contain zeros. Used only in 'mu' solver.\r\n\r\n .. versionadded:: 0.19\r\n\r\n tol : float, default=1e-4\r\n Tolerance of the stopping condition.\r\n\r\n max_iter : int, default=200\r\n Maximum number of iterations before timing out.\r\n\r\n random_state : int, RandomState instance or None, default=None\r\n Used for initialisation (when ``init`` == 'nndsvdar' or\r\n 'random'), and in Coordinate Descent. Pass an int for reproducible\r\n results across multiple function calls.\r\n See :term:`Glossary <random_state>`.\r\n\r\n alpha : float, default=0.\r\n Constant that multiplies the regularization terms. Set it to zero to\r\n have no regularization.\r\n\r\n .. versionadded:: 0.17\r\n *alpha* used in the Coordinate Descent solver.\r\n\r\n l1_ratio : float, default=0.\r\n The regularization mixing parameter, with 0 <= l1_ratio <= 1.\r\n For l1_ratio = 0 the penalty is an elementwise L2 penalty\r\n (aka Frobenius Norm).\r\n For l1_ratio = 1 it is an elementwise L1 penalty.\r\n For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.\r\n\r\n .. versionadded:: 0.17\r\n Regularization parameter *l1_ratio* used in the Coordinate Descent\r\n solver.\r\n\r\n verbose : int, default=0\r\n Whether to be verbose.\r\n\r\n shuffle : bool, default=False\r\n If true, randomize the order of coordinates in the CD solver.\r\n\r\n .. versionadded:: 0.17\r\n *shuffle* parameter used in the Coordinate Descent solver.\r\n\r\n regularization : {'both', 'components', 'transformation', None}, \\\r\n default='both'\r\n Select whether the regularization affects the components (H), the\r\n transformation (W), both or none of them.\r\n\r\n .. versionadded:: 0.24\r\n\r\n Attributes\r\n ----------\r\n components_ : ndarray of shape (n_components, n_features)\r\n Factorization matrix, sometimes called 'dictionary'.\r\n\r\n n_components_ : int\r\n The number of components. It is same as the `n_components` parameter\r\n if it was given. Otherwise, it will be same as the number of\r\n features.\r\n\r\n reconstruction_err_ : float\r\n Frobenius norm of the matrix difference, or beta-divergence, between\r\n the training data ``X`` and the reconstructed data ``WH`` from\r\n the fitted model.\r\n\r\n n_iter_ : int\r\n Actual number of iterations.\r\n\r\n Examples\r\n --------\r\n >>> import numpy as np\r\n >>> X = np.array([[1, 1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])\r\n >>> from sklearn.decomposition import NMF\r\n >>> model = NMF(n_components=2, init='random', random_state=0)\r\n >>> W = model.fit_transform(X)\r\n >>> H = model.components_\r\n\r\n References\r\n ----------\r\n Cichocki, Andrzej, and P. H. A. N. Anh-Huy. \"Fast local algorithms for\r\n large scale nonnegative matrix and tensor factorizations.\"\r\n IEICE transactions on fundamentals of electronics, communications and\r\n computer sciences 92.3: 708-721, 2009.\r\n\r\n Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix\r\n factorization with the beta-divergence. Neural Computation, 23(9).\r\n \"\"\"\r\n @_deprecate_positional_args\r\n def __init__(self, n_components=None, *, init='warn', solver='cd',\r\n beta_loss='frobenius', tol=1e-4, max_iter=200,\r\n random_state=None, alpha=0., l1_ratio=0., verbose=0,\r\n shuffle=False, regularization='both'):\r\n self.n_components = n_components\r\n self.init = init\r\n self.solver = solver\r\n self.beta_loss = beta_loss\r\n self.tol = tol\r\n self.max_iter = max_iter\r\n self.random_state = random_state\r\n self.alpha = alpha\r\n self.l1_ratio = l1_ratio\r\n self.verbose = verbose\r\n self.shuffle = shuffle\r\n self.regularization = regularization\r\n\r\n def _more_tags(self):\r\n return {'requires_positive_X': True}\r\n\r\n def fit_transform(self, X, y=None, W=None, H=None):\r\n \"\"\"Learn a NMF model for the data X and returns the transformed data.\r\n\r\n This is more efficient than calling fit followed by transform.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Data matrix to be decomposed\r\n\r\n y : Ignored\r\n\r\n W : array-like of shape (n_samples, n_components)\r\n If init='custom', it is used as initial guess for the solution.\r\n\r\n H : array-like of shape (n_components, n_features)\r\n If init='custom', it is used as initial guess for the solution.\r\n\r\n Returns\r\n -------\r\n W : ndarray of shape (n_samples, n_components)\r\n Transformed data.\r\n \"\"\"\r\n X = self._validate_data(X, accept_sparse=('csr', 'csc'),\r\n dtype=[np.float64, np.float32])\r\n\r\n with config_context(assume_finite=True):\r\n W, H, n_iter_ = non_negative_factorization(\r\n X=X, W=W, H=H, n_components=self.n_components, init=self.init,\r\n update_H=True, solver=self.solver, beta_loss=self.beta_loss,\r\n tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,\r\n l1_ratio=self.l1_ratio, regularization=self.regularization,\r\n random_state=self.random_state, verbose=self.verbose,\r\n shuffle=self.shuffle)\r\n\r\n self.reconstruction_err_ = _beta_divergence(X, W, H, self.beta_loss,\r\n square_root=True)\r\n\r\n self.n_components_ = H.shape[0]\r\n self.components_ = H\r\n self.n_iter_ = n_iter_\r\n\r\n return W\r\n\r\n def fit(self, X, y=None, **params):\r\n \"\"\"Learn a NMF model for the data X.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Data matrix to be decomposed\r\n\r\n y : Ignored\r\n\r\n Returns\r\n -------\r\n self\r\n \"\"\"\r\n self.fit_transform(X, **params)\r\n return self\r\n\r\n def transform(self, X):\r\n \"\"\"Transform the data X according to the fitted NMF model.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Data matrix to be transformed by the model.\r\n\r\n Returns\r\n -------\r\n W : ndarray of shape (n_samples, n_components)\r\n Transformed data.\r\n \"\"\"\r\n check_is_fitted(self)\r\n X = self._validate_data(X, accept_sparse=('csr', 'csc'),\r\n dtype=[np.float64, np.float32],\r\n reset=False)\r\n\r\n with config_context(assume_finite=True):\r\n W, _, n_iter_ = non_negative_factorization(\r\n X=X, W=None, H=self.components_,\r\n n_components=self.n_components_,\r\n init=self.init, update_H=False, solver=self.solver,\r\n beta_loss=self.beta_loss, tol=self.tol, max_iter=self.max_iter,\r\n alpha=self.alpha, l1_ratio=self.l1_ratio,\r\n regularization=self.regularization,\r\n random_state=self.random_state,\r\n verbose=self.verbose, shuffle=self.shuffle)\r\n\r\n return W\r\n\r\n def inverse_transform(self, W):\r\n \"\"\"Transform data back to its original space.\r\n\r\n Parameters\r\n ----------\r\n W : {ndarray, sparse matrix} of shape (n_samples, n_components)\r\n Transformed data matrix.\r\n\r\n Returns\r\n -------\r\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\r\n Data matrix of original shape.\r\n\r\n .. versionadded:: 0.18\r\n \"\"\"\r\n check_is_fitted(self)\r\n return np.dot(W, self.components_)\r\n", "import warnings\r\nimport unittest\r\nimport sys\r\nimport os\r\nimport atexit\r\n\r\nimport numpy as np\r\n\r\nfrom scipy import sparse\r\n\r\nimport pytest\r\n\r\nfrom sklearn.utils.deprecation import deprecated\r\nfrom sklearn.utils.metaestimators import if_delegate_has_method\r\nfrom sklearn.utils._testing import (\r\n assert_raises,\r\n assert_warns,\r\n assert_no_warnings,\r\n set_random_state,\r\n assert_raise_message,\r\n ignore_warnings,\r\n check_docstring_parameters,\r\n assert_allclose_dense_sparse,\r\n assert_raises_regex,\r\n TempMemmap,\r\n create_memmap_backed_data,\r\n _delete_folder,\r\n _convert_container,\r\n raises,\r\n)\r\n\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\n\r\n\r\ndef test_set_random_state():\r\n lda = LinearDiscriminantAnalysis()\r\n tree = DecisionTreeClassifier()\r\n # Linear Discriminant Analysis doesn't have random state: smoke test\r\n set_random_state(lda, 3)\r\n set_random_state(tree, 3)\r\n assert tree.random_state == 3\r\n\r\n\r\ndef test_assert_allclose_dense_sparse():\r\n x = np.arange(9).reshape(3, 3)\r\n msg = \"Not equal to tolerance \"\r\n y = sparse.csc_matrix(x)\r\n for X in [x, y]:\r\n # basic compare\r\n with pytest.raises(AssertionError, match=msg):\r\n assert_allclose_dense_sparse(X, X*2)\r\n assert_allclose_dense_sparse(X, X)\r\n\r\n with pytest.raises(ValueError, match=\"Can only compare two sparse\"):\r\n assert_allclose_dense_sparse(x, y)\r\n\r\n A = sparse.diags(np.ones(5), offsets=0).tocsr()\r\n B = sparse.csr_matrix(np.ones((1, 5)))\r\n with pytest.raises(AssertionError, match=\"Arrays are not equal\"):\r\n assert_allclose_dense_sparse(B, A)\r\n\r\n\r\ndef test_assert_raises_msg():\r\n with assert_raises_regex(AssertionError, 'Hello world'):\r\n with assert_raises(ValueError, msg='Hello world'):\r\n pass\r\n\r\n\r\ndef test_assert_raise_message():\r\n def _raise_ValueError(message):\r\n raise ValueError(message)\r\n\r\n def _no_raise():\r\n pass\r\n\r\n assert_raise_message(ValueError, \"test\",\r\n _raise_ValueError, \"test\")\r\n\r\n assert_raises(AssertionError,\r\n assert_raise_message, ValueError, \"something else\",\r\n _raise_ValueError, \"test\")\r\n\r\n assert_raises(ValueError,\r\n assert_raise_message, TypeError, \"something else\",\r\n _raise_ValueError, \"test\")\r\n\r\n assert_raises(AssertionError,\r\n assert_raise_message, ValueError, \"test\",\r\n _no_raise)\r\n\r\n # multiple exceptions in a tuple\r\n assert_raises(AssertionError,\r\n assert_raise_message, (ValueError, AttributeError),\r\n \"test\", _no_raise)\r\n\r\n\r\ndef test_ignore_warning():\r\n # This check that ignore_warning decorateur and context manager are working\r\n # as expected\r\n def _warning_function():\r\n warnings.warn(\"deprecation warning\", DeprecationWarning)\r\n\r\n def _multiple_warning_function():\r\n warnings.warn(\"deprecation warning\", DeprecationWarning)\r\n warnings.warn(\"deprecation warning\")\r\n\r\n # Check the function directly\r\n assert_no_warnings(ignore_warnings(_warning_function))\r\n assert_no_warnings(ignore_warnings(_warning_function,\r\n category=DeprecationWarning))\r\n assert_warns(DeprecationWarning, ignore_warnings(_warning_function,\r\n category=UserWarning))\r\n assert_warns(UserWarning,\r\n ignore_warnings(_multiple_warning_function,\r\n category=FutureWarning))\r\n assert_warns(DeprecationWarning,\r\n ignore_warnings(_multiple_warning_function,\r\n category=UserWarning))\r\n assert_no_warnings(ignore_warnings(_warning_function,\r\n category=(DeprecationWarning,\r\n UserWarning)))\r\n\r\n # Check the decorator\r\n @ignore_warnings\r\n def decorator_no_warning():\r\n _warning_function()\r\n _multiple_warning_function()\r\n\r\n @ignore_warnings(category=(DeprecationWarning, UserWarning))\r\n def decorator_no_warning_multiple():\r\n _multiple_warning_function()\r\n\r\n @ignore_warnings(category=DeprecationWarning)\r\n def decorator_no_deprecation_warning():\r\n _warning_function()\r\n\r\n @ignore_warnings(category=UserWarning)\r\n def decorator_no_user_warning():\r\n _warning_function()\r\n\r\n @ignore_warnings(category=DeprecationWarning)\r\n def decorator_no_deprecation_multiple_warning():\r\n _multiple_warning_function()\r\n\r\n @ignore_warnings(category=UserWarning)\r\n def decorator_no_user_multiple_warning():\r\n _multiple_warning_function()\r\n\r\n assert_no_warnings(decorator_no_warning)\r\n assert_no_warnings(decorator_no_warning_multiple)\r\n assert_no_warnings(decorator_no_deprecation_warning)\r\n assert_warns(DeprecationWarning, decorator_no_user_warning)\r\n assert_warns(UserWarning, decorator_no_deprecation_multiple_warning)\r\n assert_warns(DeprecationWarning, decorator_no_user_multiple_warning)\r\n\r\n # Check the context manager\r\n def context_manager_no_warning():\r\n with ignore_warnings():\r\n _warning_function()\r\n\r\n def context_manager_no_warning_multiple():\r\n with ignore_warnings(category=(DeprecationWarning, UserWarning)):\r\n _multiple_warning_function()\r\n\r\n def context_manager_no_deprecation_warning():\r\n with ignore_warnings(category=DeprecationWarning):\r\n _warning_function()\r\n\r\n def context_manager_no_user_warning():\r\n with ignore_warnings(category=UserWarning):\r\n _warning_function()\r\n\r\n def context_manager_no_deprecation_multiple_warning():\r\n with ignore_warnings(category=DeprecationWarning):\r\n _multiple_warning_function()\r\n\r\n def context_manager_no_user_multiple_warning():\r\n with ignore_warnings(category=UserWarning):\r\n _multiple_warning_function()\r\n\r\n assert_no_warnings(context_manager_no_warning)\r\n assert_no_warnings(context_manager_no_warning_multiple)\r\n assert_no_warnings(context_manager_no_deprecation_warning)\r\n assert_warns(DeprecationWarning, context_manager_no_user_warning)\r\n assert_warns(UserWarning, context_manager_no_deprecation_multiple_warning)\r\n assert_warns(DeprecationWarning, context_manager_no_user_multiple_warning)\r\n\r\n # Check that passing warning class as first positional argument\r\n warning_class = UserWarning\r\n match = \"'obj' should be a callable.+you should use 'category=UserWarning'\"\r\n\r\n with pytest.raises(ValueError, match=match):\r\n silence_warnings_func = ignore_warnings(warning_class)(\r\n _warning_function)\r\n silence_warnings_func()\r\n\r\n with pytest.raises(ValueError, match=match):\r\n @ignore_warnings(warning_class)\r\n def test():\r\n pass\r\n\r\n\r\nclass TestWarns(unittest.TestCase):\r\n def test_warn(self):\r\n def f():\r\n warnings.warn(\"yo\")\r\n return 3\r\n\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", UserWarning)\r\n filters_orig = warnings.filters[:]\r\n assert assert_warns(UserWarning, f) == 3\r\n # test that assert_warns doesn't have side effects on warnings\r\n # filters\r\n assert warnings.filters == filters_orig\r\n with pytest.raises(AssertionError):\r\n assert_no_warnings(f)\r\n assert assert_no_warnings(lambda x: x, 1) == 1\r\n\r\n def test_warn_wrong_warning(self):\r\n def f():\r\n warnings.warn(\"yo\", FutureWarning)\r\n\r\n failed = False\r\n filters = sys.modules['warnings'].filters[:]\r\n try:\r\n try:\r\n # Should raise an AssertionError\r\n\r\n # assert_warns has a special handling of \"FutureWarning\" that\r\n # pytest.warns does not have\r\n assert_warns(UserWarning, f)\r\n failed = True\r\n except AssertionError:\r\n pass\r\n finally:\r\n sys.modules['warnings'].filters = filters\r\n\r\n if failed:\r\n raise AssertionError(\"wrong warning caught by assert_warn\")\r\n\r\n\r\n# Tests for docstrings:\r\n\r\ndef f_ok(a, b):\r\n \"\"\"Function f\r\n\r\n Parameters\r\n ----------\r\n a : int\r\n Parameter a\r\n b : float\r\n Parameter b\r\n\r\n Returns\r\n -------\r\n c : list\r\n Parameter c\r\n \"\"\"\r\n c = a + b\r\n return c\r\n\r\n\r\ndef f_bad_sections(a, b):\r\n \"\"\"Function f\r\n\r\n Parameters\r\n ----------\r\n a : int\r\n Parameter a\r\n b : float\r\n Parameter b\r\n\r\n Results\r\n -------\r\n c : list\r\n Parameter c\r\n \"\"\"\r\n c = a + b\r\n return c\r\n\r\n\r\ndef f_bad_order(b, a):\r\n \"\"\"Function f\r\n\r\n Parameters\r\n ----------\r\n a : int\r\n Parameter a\r\n b : float\r\n Parameter b\r\n\r\n Returns\r\n -------\r\n c : list\r\n Parameter c\r\n \"\"\"\r\n c = a + b\r\n return c\r\n\r\n\r\ndef f_too_many_param_docstring(a, b):\r\n \"\"\"Function f\r\n\r\n Parameters\r\n ----------\r\n a : int\r\n Parameter a\r\n b : int\r\n Parameter b\r\n c : int\r\n Parameter c\r\n\r\n Returns\r\n -------\r\n d : list\r\n Parameter c\r\n \"\"\"\r\n d = a + b\r\n return d\r\n\r\n\r\ndef f_missing(a, b):\r\n \"\"\"Function f\r\n\r\n Parameters\r\n ----------\r\n a : int\r\n Parameter a\r\n\r\n Returns\r\n -------\r\n c : list\r\n Parameter c\r\n \"\"\"\r\n c = a + b\r\n return c\r\n\r\n\r\ndef f_check_param_definition(a, b, c, d, e):\r\n \"\"\"Function f\r\n\r\n Parameters\r\n ----------\r\n a: int\r\n Parameter a\r\n b:\r\n Parameter b\r\n c :\r\n Parameter c\r\n d:int\r\n Parameter d\r\n e\r\n No typespec is allowed without colon\r\n \"\"\"\r\n return a + b + c + d\r\n\r\n\r\nclass Klass:\r\n def f_missing(self, X, y):\r\n pass\r\n\r\n def f_bad_sections(self, X, y):\r\n \"\"\"Function f\r\n\r\n Parameter\r\n ----------\r\n a : int\r\n Parameter a\r\n b : float\r\n Parameter b\r\n\r\n Results\r\n -------\r\n c : list\r\n Parameter c\r\n \"\"\"\r\n pass\r\n\r\n\r\nclass MockEst:\r\n def __init__(self):\r\n \"\"\"MockEstimator\"\"\"\r\n def fit(self, X, y):\r\n return X\r\n\r\n def predict(self, X):\r\n return X\r\n\r\n def predict_proba(self, X):\r\n return X\r\n\r\n def score(self, X):\r\n return 1.\r\n\r\n\r\nclass MockMetaEstimator:\r\n def __init__(self, delegate):\r\n \"\"\"MetaEstimator to check if doctest on delegated methods work.\r\n\r\n Parameters\r\n ---------\r\n delegate : estimator\r\n Delegated estimator.\r\n \"\"\"\r\n self.delegate = delegate\r\n\r\n @if_delegate_has_method(delegate=('delegate'))\r\n def predict(self, X):\r\n \"\"\"This is available only if delegate has predict.\r\n\r\n Parameters\r\n ----------\r\n y : ndarray\r\n Parameter y\r\n \"\"\"\r\n return self.delegate.predict(X)\r\n\r\n @if_delegate_has_method(delegate=('delegate'))\r\n @deprecated(\"Testing a deprecated delegated method\")\r\n def score(self, X):\r\n \"\"\"This is available only if delegate has score.\r\n\r\n Parameters\r\n ---------\r\n y : ndarray\r\n Parameter y\r\n \"\"\"\r\n\r\n @if_delegate_has_method(delegate=('delegate'))\r\n def predict_proba(self, X):\r\n \"\"\"This is available only if delegate has predict_proba.\r\n\r\n Parameters\r\n ---------\r\n X : ndarray\r\n Parameter X\r\n \"\"\"\r\n return X\r\n\r\n @deprecated('Testing deprecated function with wrong params')\r\n def fit(self, X, y):\r\n \"\"\"Incorrect docstring but should not be tested\"\"\"\r\n\r\n\r\ndef test_check_docstring_parameters():\r\n pytest.importorskip('numpydoc',\r\n reason=\"numpydoc is required to test the docstrings\")\r\n\r\n incorrect = check_docstring_parameters(f_ok)\r\n assert incorrect == []\r\n incorrect = check_docstring_parameters(f_ok, ignore=['b'])\r\n assert incorrect == []\r\n incorrect = check_docstring_parameters(f_missing, ignore=['b'])\r\n assert incorrect == []\r\n with pytest.raises(RuntimeError, match=\"Unknown section Results\"):\r\n check_docstring_parameters(f_bad_sections)\r\n with pytest.raises(RuntimeError, match=\"Unknown section Parameter\"):\r\n check_docstring_parameters(Klass.f_bad_sections)\r\n\r\n incorrect = check_docstring_parameters(f_check_param_definition)\r\n assert (\r\n incorrect == [\r\n \"sklearn.utils.tests.test_testing.f_check_param_definition There \"\r\n \"was no space between the param name and colon ('a: int')\",\r\n\r\n \"sklearn.utils.tests.test_testing.f_check_param_definition There \"\r\n \"was no space between the param name and colon ('b:')\",\r\n\r\n \"sklearn.utils.tests.test_testing.f_check_param_definition \"\r\n \"Parameter 'c :' has an empty type spec. Remove the colon\",\r\n\r\n \"sklearn.utils.tests.test_testing.f_check_param_definition There \"\r\n \"was no space between the param name and colon ('d:int')\",\r\n ])\r\n\r\n messages = [\r\n [\"In function: sklearn.utils.tests.test_testing.f_bad_order\",\r\n \"There's a parameter name mismatch in function docstring w.r.t.\"\r\n \" function signature, at index 0 diff: 'b' != 'a'\",\r\n \"Full diff:\",\r\n \"- ['b', 'a']\",\r\n \"+ ['a', 'b']\"],\r\n\r\n [\"In function: \" +\r\n \"sklearn.utils.tests.test_testing.f_too_many_param_docstring\",\r\n \"Parameters in function docstring have more items w.r.t. function\"\r\n \" signature, first extra item: c\",\r\n \"Full diff:\",\r\n \"- ['a', 'b']\",\r\n \"+ ['a', 'b', 'c']\",\r\n \"? +++++\"],\r\n\r\n [\"In function: sklearn.utils.tests.test_testing.f_missing\",\r\n \"Parameters in function docstring have less items w.r.t. function\"\r\n \" signature, first missing item: b\",\r\n \"Full diff:\",\r\n \"- ['a', 'b']\",\r\n \"+ ['a']\"],\r\n\r\n [\"In function: sklearn.utils.tests.test_testing.Klass.f_missing\",\r\n \"Parameters in function docstring have less items w.r.t. function\"\r\n \" signature, first missing item: X\",\r\n \"Full diff:\",\r\n \"- ['X', 'y']\",\r\n \"+ []\"],\r\n\r\n [\"In function: \" +\r\n \"sklearn.utils.tests.test_testing.MockMetaEstimator.predict\",\r\n \"There's a parameter name mismatch in function docstring w.r.t.\"\r\n \" function signature, at index 0 diff: 'X' != 'y'\",\r\n \"Full diff:\",\r\n \"- ['X']\",\r\n \"? ^\",\r\n \"+ ['y']\",\r\n \"? ^\"],\r\n\r\n [\"In function: \" +\r\n \"sklearn.utils.tests.test_testing.MockMetaEstimator.\"\r\n + \"predict_proba\",\r\n \"Parameters in function docstring have less items w.r.t. function\"\r\n \" signature, first missing item: X\",\r\n \"Full diff:\",\r\n \"- ['X']\",\r\n \"+ []\"],\r\n\r\n [\"In function: \" +\r\n \"sklearn.utils.tests.test_testing.MockMetaEstimator.score\",\r\n \"Parameters in function docstring have less items w.r.t. function\"\r\n \" signature, first missing item: X\",\r\n \"Full diff:\",\r\n \"- ['X']\",\r\n \"+ []\"],\r\n\r\n [\"In function: \" +\r\n \"sklearn.utils.tests.test_testing.MockMetaEstimator.fit\",\r\n \"Parameters in function docstring have less items w.r.t. function\"\r\n \" signature, first missing item: X\",\r\n \"Full diff:\",\r\n \"- ['X', 'y']\",\r\n \"+ []\"],\r\n\r\n ]\r\n\r\n mock_meta = MockMetaEstimator(delegate=MockEst())\r\n\r\n for msg, f in zip(messages,\r\n [f_bad_order,\r\n f_too_many_param_docstring,\r\n f_missing,\r\n Klass.f_missing,\r\n mock_meta.predict,\r\n mock_meta.predict_proba,\r\n mock_meta.score,\r\n mock_meta.fit]):\r\n incorrect = check_docstring_parameters(f)\r\n assert msg == incorrect, ('\\n\"%s\"\\n not in \\n\"%s\"' % (msg, incorrect))\r\n\r\n\r\nclass RegistrationCounter:\r\n def __init__(self):\r\n self.nb_calls = 0\r\n\r\n def __call__(self, to_register_func):\r\n self.nb_calls += 1\r\n assert to_register_func.func is _delete_folder\r\n\r\n\r\ndef check_memmap(input_array, mmap_data, mmap_mode='r'):\r\n assert isinstance(mmap_data, np.memmap)\r\n writeable = mmap_mode != 'r'\r\n assert mmap_data.flags.writeable is writeable\r\n np.testing.assert_array_equal(input_array, mmap_data)\r\n\r\n\r\ndef test_tempmemmap(monkeypatch):\r\n registration_counter = RegistrationCounter()\r\n monkeypatch.setattr(atexit, 'register', registration_counter)\r\n\r\n input_array = np.ones(3)\r\n with TempMemmap(input_array) as data:\r\n check_memmap(input_array, data)\r\n temp_folder = os.path.dirname(data.filename)\r\n if os.name != 'nt':\r\n assert not os.path.exists(temp_folder)\r\n assert registration_counter.nb_calls == 1\r\n\r\n mmap_mode = 'r+'\r\n with TempMemmap(input_array, mmap_mode=mmap_mode) as data:\r\n check_memmap(input_array, data, mmap_mode=mmap_mode)\r\n temp_folder = os.path.dirname(data.filename)\r\n if os.name != 'nt':\r\n assert not os.path.exists(temp_folder)\r\n assert registration_counter.nb_calls == 2\r\n\r\n\r\ndef test_create_memmap_backed_data(monkeypatch):\r\n registration_counter = RegistrationCounter()\r\n monkeypatch.setattr(atexit, 'register', registration_counter)\r\n\r\n input_array = np.ones(3)\r\n data = create_memmap_backed_data(input_array)\r\n check_memmap(input_array, data)\r\n assert registration_counter.nb_calls == 1\r\n\r\n data, folder = create_memmap_backed_data(input_array,\r\n return_folder=True)\r\n check_memmap(input_array, data)\r\n assert folder == os.path.dirname(data.filename)\r\n assert registration_counter.nb_calls == 2\r\n\r\n mmap_mode = 'r+'\r\n data = create_memmap_backed_data(input_array, mmap_mode=mmap_mode)\r\n check_memmap(input_array, data, mmap_mode)\r\n assert registration_counter.nb_calls == 3\r\n\r\n input_list = [input_array, input_array + 1, input_array + 2]\r\n mmap_data_list = create_memmap_backed_data(input_list)\r\n for input_array, data in zip(input_list, mmap_data_list):\r\n check_memmap(input_array, data)\r\n assert registration_counter.nb_calls == 4\r\n\r\n\r\[email protected](\r\n \"constructor_name, container_type\",\r\n [('list', list),\r\n ('tuple', tuple),\r\n ('array', np.ndarray),\r\n ('sparse', sparse.csr_matrix),\r\n ('dataframe', pytest.importorskip('pandas').DataFrame),\r\n ('series', pytest.importorskip('pandas').Series),\r\n ('index', pytest.importorskip('pandas').Index),\r\n ('slice', slice)]\r\n)\r\ndef test_convert_container(constructor_name, container_type):\r\n container = [0, 1]\r\n assert isinstance(_convert_container(container, constructor_name),\r\n container_type)\r\n\r\n\r\ndef test_raises():\r\n # Tests for the raises context manager\r\n\r\n # Proper type, no match\r\n with raises(TypeError):\r\n raise TypeError()\r\n\r\n # Proper type, proper match\r\n with raises(TypeError, match=\"how are you\") as cm:\r\n raise TypeError(\"hello how are you\")\r\n assert cm.raised_and_matched\r\n\r\n # Proper type, proper match with multiple patterns\r\n with raises(TypeError, match=[\"not this one\", \"how are you\"]) as cm:\r\n raise TypeError(\"hello how are you\")\r\n assert cm.raised_and_matched\r\n\r\n # bad type, no match\r\n with pytest.raises(ValueError, match=\"this will be raised\"):\r\n with raises(TypeError) as cm:\r\n raise ValueError(\"this will be raised\")\r\n assert not cm.raised_and_matched\r\n\r\n # Bad type, no match, with a err_msg\r\n with pytest.raises(AssertionError, match=\"the failure message\"):\r\n with raises(TypeError, err_msg=\"the failure message\") as cm:\r\n raise ValueError()\r\n assert not cm.raised_and_matched\r\n\r\n # bad type, with match (is ignored anyway)\r\n with pytest.raises(ValueError, match=\"this will be raised\"):\r\n with raises(TypeError, match=\"this is ignored\") as cm:\r\n raise ValueError(\"this will be raised\")\r\n assert not cm.raised_and_matched\r\n\r\n # proper type but bad match\r\n with pytest.raises(\r\n AssertionError, match=\"should contain one of the following patterns\"\r\n ):\r\n with raises(TypeError, match=\"hello\") as cm:\r\n raise TypeError(\"Bad message\")\r\n assert not cm.raised_and_matched\r\n\r\n # proper type but bad match, with err_msg\r\n with pytest.raises(AssertionError, match=\"the failure message\"):\r\n with raises(\r\n TypeError, match=\"hello\", err_msg=\"the failure message\"\r\n ) as cm:\r\n raise TypeError(\"Bad message\")\r\n assert not cm.raised_and_matched\r\n\r\n # no raise with default may_pass=False\r\n with pytest.raises(AssertionError, match=\"Did not raise\"):\r\n with raises(TypeError) as cm:\r\n pass\r\n assert not cm.raised_and_matched\r\n\r\n # no raise with may_pass=True\r\n with raises(TypeError, match=\"hello\", may_pass=True) as cm:\r\n pass # still OK\r\n assert not cm.raised_and_matched\r\n\r\n # Multiple exception types:\r\n with raises((TypeError, ValueError)):\r\n raise TypeError()\r\n with raises((TypeError, ValueError)):\r\n raise ValueError()\r\n with pytest.raises(AssertionError):\r\n with raises((TypeError, ValueError)):\r\n pass\r\n", "import numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nimport pandas._testing as tm\r\n\r\n\r\[email protected](\"sort\", [True, False])\r\ndef test_factorize(index_or_series_obj, sort):\r\n obj = index_or_series_obj\r\n result_codes, result_uniques = obj.factorize(sort=sort)\r\n\r\n constructor = pd.Index\r\n if isinstance(obj, pd.MultiIndex):\r\n constructor = pd.MultiIndex.from_tuples\r\n expected_uniques = constructor(obj.unique())\r\n\r\n if sort:\r\n expected_uniques = expected_uniques.sort_values()\r\n\r\n # construct an integer ndarray so that\r\n # `expected_uniques.take(expected_codes)` is equal to `obj`\r\n expected_uniques_list = list(expected_uniques)\r\n expected_codes = [expected_uniques_list.index(val) for val in obj]\r\n expected_codes = np.asarray(expected_codes, dtype=np.intp)\r\n\r\n tm.assert_numpy_array_equal(result_codes, expected_codes)\r\n tm.assert_index_equal(result_uniques, expected_uniques)\r\n\r\n\r\ndef test_series_factorize_na_sentinel_none():\r\n # GH35667\r\n values = np.array([1, 2, 1, np.nan])\r\n ser = pd.Series(values)\r\n codes, uniques = ser.factorize(na_sentinel=None)\r\n\r\n expected_codes = np.array([0, 1, 0, 2], dtype=np.intp)\r\n expected_uniques = pd.Index([1.0, 2.0, np.nan])\r\n\r\n tm.assert_numpy_array_equal(codes, expected_codes)\r\n tm.assert_index_equal(uniques, expected_uniques)\r\n", "\"\"\"Utilities for meta-estimators\"\"\"\r\n# Author: Joel Nothman\r\n# Andreas Mueller\r\n# License: BSD\r\nfrom typing import List, Any\r\n\r\nfrom abc import ABCMeta, abstractmethod\r\nfrom operator import attrgetter\r\nfrom functools import update_wrapper\r\nimport numpy as np\r\n\r\nfrom ..utils import _safe_indexing\r\nfrom ..base import BaseEstimator\r\nfrom ..base import _is_pairwise\r\n\r\n__all__ = ['if_delegate_has_method']\r\n\r\n\r\nclass _BaseComposition(BaseEstimator, metaclass=ABCMeta):\r\n \"\"\"Handles parameter management for classifiers composed of named estimators.\r\n \"\"\"\r\n steps: List[Any]\r\n\r\n @abstractmethod\r\n def __init__(self):\r\n pass\r\n\r\n def _get_params(self, attr, deep=True):\r\n out = super().get_params(deep=deep)\r\n if not deep:\r\n return out\r\n estimators = getattr(self, attr)\r\n out.update(estimators)\r\n for name, estimator in estimators:\r\n if hasattr(estimator, 'get_params'):\r\n for key, value in estimator.get_params(deep=True).items():\r\n out['%s__%s' % (name, key)] = value\r\n return out\r\n\r\n def _set_params(self, attr, **params):\r\n # Ensure strict ordering of parameter setting:\r\n # 1. All steps\r\n if attr in params:\r\n setattr(self, attr, params.pop(attr))\r\n # 2. Step replacement\r\n items = getattr(self, attr)\r\n names = []\r\n if items:\r\n names, _ = zip(*items)\r\n for name in list(params.keys()):\r\n if '__' not in name and name in names:\r\n self._replace_estimator(attr, name, params.pop(name))\r\n # 3. Step parameters and other initialisation arguments\r\n super().set_params(**params)\r\n return self\r\n\r\n def _replace_estimator(self, attr, name, new_val):\r\n # assumes `name` is a valid estimator name\r\n new_estimators = list(getattr(self, attr))\r\n for i, (estimator_name, _) in enumerate(new_estimators):\r\n if estimator_name == name:\r\n new_estimators[i] = (name, new_val)\r\n break\r\n setattr(self, attr, new_estimators)\r\n\r\n def _validate_names(self, names):\r\n if len(set(names)) != len(names):\r\n raise ValueError('Names provided are not unique: '\r\n '{0!r}'.format(list(names)))\r\n invalid_names = set(names).intersection(self.get_params(deep=False))\r\n if invalid_names:\r\n raise ValueError('Estimator names conflict with constructor '\r\n 'arguments: {0!r}'.format(sorted(invalid_names)))\r\n invalid_names = [name for name in names if '__' in name]\r\n if invalid_names:\r\n raise ValueError('Estimator names must not contain __: got '\r\n '{0!r}'.format(invalid_names))\r\n\r\n\r\nclass _IffHasAttrDescriptor:\r\n \"\"\"Implements a conditional property using the descriptor protocol.\r\n\r\n Using this class to create a decorator will raise an ``AttributeError``\r\n if none of the delegates (specified in ``delegate_names``) is an attribute\r\n of the base object or the first found delegate does not have an attribute\r\n ``attribute_name``.\r\n\r\n This allows ducktyping of the decorated method based on\r\n ``delegate.attribute_name``. Here ``delegate`` is the first item in\r\n ``delegate_names`` for which ``hasattr(object, delegate) is True``.\r\n\r\n See https://docs.python.org/3/howto/descriptor.html for an explanation of\r\n descriptors.\r\n \"\"\"\r\n def __init__(self, fn, delegate_names, attribute_name):\r\n self.fn = fn\r\n self.delegate_names = delegate_names\r\n self.attribute_name = attribute_name\r\n\r\n # update the docstring of the descriptor\r\n update_wrapper(self, fn)\r\n\r\n def __get__(self, obj, type=None):\r\n # raise an AttributeError if the attribute is not present on the object\r\n if obj is not None:\r\n # delegate only on instances, not the classes.\r\n # this is to allow access to the docstrings.\r\n for delegate_name in self.delegate_names:\r\n try:\r\n delegate = attrgetter(delegate_name)(obj)\r\n except AttributeError:\r\n continue\r\n else:\r\n getattr(delegate, self.attribute_name)\r\n break\r\n else:\r\n attrgetter(self.delegate_names[-1])(obj)\r\n\r\n # lambda, but not partial, allows help() to work with update_wrapper\r\n out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)\r\n # update the docstring of the returned function\r\n update_wrapper(out, self.fn)\r\n return out\r\n\r\n\r\ndef if_delegate_has_method(delegate):\r\n \"\"\"Create a decorator for methods that are delegated to a sub-estimator\r\n\r\n This enables ducktyping by hasattr returning True according to the\r\n sub-estimator.\r\n\r\n Parameters\r\n ----------\r\n delegate : string, list of strings or tuple of strings\r\n Name of the sub-estimator that can be accessed as an attribute of the\r\n base object. If a list or a tuple of names are provided, the first\r\n sub-estimator that is an attribute of the base object will be used.\r\n\r\n \"\"\"\r\n if isinstance(delegate, list):\r\n delegate = tuple(delegate)\r\n if not isinstance(delegate, tuple):\r\n delegate = (delegate,)\r\n\r\n return lambda fn: _IffHasAttrDescriptor(fn, delegate,\r\n attribute_name=fn.__name__)\r\n\r\n\r\ndef _safe_split(estimator, X, y, indices, train_indices=None):\r\n \"\"\"Create subset of dataset and properly handle kernels.\r\n\r\n Slice X, y according to indices for cross-validation, but take care of\r\n precomputed kernel-matrices or pairwise affinities / distances.\r\n\r\n If ``estimator._pairwise is True``, X needs to be square and\r\n we slice rows and columns. If ``train_indices`` is not None,\r\n we slice rows using ``indices`` (assumed the test set) and columns\r\n using ``train_indices``, indicating the training set.\r\n\r\n .. deprecated:: 0.24\r\n\r\n The _pairwise attribute is deprecated in 0.24. From 1.1\r\n (renaming of 0.26) and onward, this function will check for the\r\n pairwise estimator tag.\r\n\r\n Labels y will always be indexed only along the first axis.\r\n\r\n Parameters\r\n ----------\r\n estimator : object\r\n Estimator to determine whether we should slice only rows or rows and\r\n columns.\r\n\r\n X : array-like, sparse matrix or iterable\r\n Data to be indexed. If ``estimator._pairwise is True``,\r\n this needs to be a square array-like or sparse matrix.\r\n\r\n y : array-like, sparse matrix or iterable\r\n Targets to be indexed.\r\n\r\n indices : array of int\r\n Rows to select from X and y.\r\n If ``estimator._pairwise is True`` and ``train_indices is None``\r\n then ``indices`` will also be used to slice columns.\r\n\r\n train_indices : array of int or None, default=None\r\n If ``estimator._pairwise is True`` and ``train_indices is not None``,\r\n then ``train_indices`` will be use to slice the columns of X.\r\n\r\n Returns\r\n -------\r\n X_subset : array-like, sparse matrix or list\r\n Indexed data.\r\n\r\n y_subset : array-like, sparse matrix or list\r\n Indexed targets.\r\n\r\n \"\"\"\r\n if _is_pairwise(estimator):\r\n if not hasattr(X, \"shape\"):\r\n raise ValueError(\"Precomputed kernels or affinity matrices have \"\r\n \"to be passed as arrays or sparse matrices.\")\r\n # X is a precomputed square kernel matrix\r\n if X.shape[0] != X.shape[1]:\r\n raise ValueError(\"X should be a square kernel matrix\")\r\n if train_indices is None:\r\n X_subset = X[np.ix_(indices, indices)]\r\n else:\r\n X_subset = X[np.ix_(indices, train_indices)]\r\n else:\r\n X_subset = _safe_indexing(X, indices)\r\n\r\n if y is not None:\r\n y_subset = _safe_indexing(y, indices)\r\n else:\r\n y_subset = None\r\n\r\n return X_subset, y_subset\r\n", "import numpy as np\r\n\r\nimport pandas as pd\r\nfrom pandas import DataFrame\r\nimport pandas._testing as tm\r\n\r\nimport pandas.io.formats.format as fmt\r\n\r\n\r\nclass TestEngFormatter:\r\n def test_eng_float_formatter(self):\r\n df = DataFrame({\"A\": [1.41, 141.0, 14100, 1410000.0]})\r\n\r\n fmt.set_eng_float_format()\r\n result = df.to_string()\r\n expected = (\r\n \" A\\n\"\r\n \"0 1.410E+00\\n\"\r\n \"1 141.000E+00\\n\"\r\n \"2 14.100E+03\\n\"\r\n \"3 1.410E+06\"\r\n )\r\n assert result == expected\r\n\r\n fmt.set_eng_float_format(use_eng_prefix=True)\r\n result = df.to_string()\r\n expected = \" A\\n0 1.410\\n1 141.000\\n2 14.100k\\n3 1.410M\"\r\n assert result == expected\r\n\r\n fmt.set_eng_float_format(accuracy=0)\r\n result = df.to_string()\r\n expected = \" A\\n0 1E+00\\n1 141E+00\\n2 14E+03\\n3 1E+06\"\r\n assert result == expected\r\n\r\n tm.reset_display_options()\r\n\r\n def compare(self, formatter, input, output):\r\n formatted_input = formatter(input)\r\n assert formatted_input == output\r\n\r\n def compare_all(self, formatter, in_out):\r\n \"\"\"\r\n Parameters:\r\n -----------\r\n formatter: EngFormatter under test\r\n in_out: list of tuples. Each tuple = (number, expected_formatting)\r\n\r\n It is tested if 'formatter(number) == expected_formatting'.\r\n *number* should be >= 0 because formatter(-number) == fmt is also\r\n tested. *fmt* is derived from *expected_formatting*\r\n \"\"\"\r\n for input, output in in_out:\r\n self.compare(formatter, input, output)\r\n self.compare(formatter, -input, \"-\" + output[1:])\r\n\r\n def test_exponents_with_eng_prefix(self):\r\n formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)\r\n f = np.sqrt(2)\r\n in_out = [\r\n (f * 10 ** -24, \" 1.414y\"),\r\n (f * 10 ** -23, \" 14.142y\"),\r\n (f * 10 ** -22, \" 141.421y\"),\r\n (f * 10 ** -21, \" 1.414z\"),\r\n (f * 10 ** -20, \" 14.142z\"),\r\n (f * 10 ** -19, \" 141.421z\"),\r\n (f * 10 ** -18, \" 1.414a\"),\r\n (f * 10 ** -17, \" 14.142a\"),\r\n (f * 10 ** -16, \" 141.421a\"),\r\n (f * 10 ** -15, \" 1.414f\"),\r\n (f * 10 ** -14, \" 14.142f\"),\r\n (f * 10 ** -13, \" 141.421f\"),\r\n (f * 10 ** -12, \" 1.414p\"),\r\n (f * 10 ** -11, \" 14.142p\"),\r\n (f * 10 ** -10, \" 141.421p\"),\r\n (f * 10 ** -9, \" 1.414n\"),\r\n (f * 10 ** -8, \" 14.142n\"),\r\n (f * 10 ** -7, \" 141.421n\"),\r\n (f * 10 ** -6, \" 1.414u\"),\r\n (f * 10 ** -5, \" 14.142u\"),\r\n (f * 10 ** -4, \" 141.421u\"),\r\n (f * 10 ** -3, \" 1.414m\"),\r\n (f * 10 ** -2, \" 14.142m\"),\r\n (f * 10 ** -1, \" 141.421m\"),\r\n (f * 10 ** 0, \" 1.414\"),\r\n (f * 10 ** 1, \" 14.142\"),\r\n (f * 10 ** 2, \" 141.421\"),\r\n (f * 10 ** 3, \" 1.414k\"),\r\n (f * 10 ** 4, \" 14.142k\"),\r\n (f * 10 ** 5, \" 141.421k\"),\r\n (f * 10 ** 6, \" 1.414M\"),\r\n (f * 10 ** 7, \" 14.142M\"),\r\n (f * 10 ** 8, \" 141.421M\"),\r\n (f * 10 ** 9, \" 1.414G\"),\r\n (f * 10 ** 10, \" 14.142G\"),\r\n (f * 10 ** 11, \" 141.421G\"),\r\n (f * 10 ** 12, \" 1.414T\"),\r\n (f * 10 ** 13, \" 14.142T\"),\r\n (f * 10 ** 14, \" 141.421T\"),\r\n (f * 10 ** 15, \" 1.414P\"),\r\n (f * 10 ** 16, \" 14.142P\"),\r\n (f * 10 ** 17, \" 141.421P\"),\r\n (f * 10 ** 18, \" 1.414E\"),\r\n (f * 10 ** 19, \" 14.142E\"),\r\n (f * 10 ** 20, \" 141.421E\"),\r\n (f * 10 ** 21, \" 1.414Z\"),\r\n (f * 10 ** 22, \" 14.142Z\"),\r\n (f * 10 ** 23, \" 141.421Z\"),\r\n (f * 10 ** 24, \" 1.414Y\"),\r\n (f * 10 ** 25, \" 14.142Y\"),\r\n (f * 10 ** 26, \" 141.421Y\"),\r\n ]\r\n self.compare_all(formatter, in_out)\r\n\r\n def test_exponents_without_eng_prefix(self):\r\n formatter = fmt.EngFormatter(accuracy=4, use_eng_prefix=False)\r\n f = np.pi\r\n in_out = [\r\n (f * 10 ** -24, \" 3.1416E-24\"),\r\n (f * 10 ** -23, \" 31.4159E-24\"),\r\n (f * 10 ** -22, \" 314.1593E-24\"),\r\n (f * 10 ** -21, \" 3.1416E-21\"),\r\n (f * 10 ** -20, \" 31.4159E-21\"),\r\n (f * 10 ** -19, \" 314.1593E-21\"),\r\n (f * 10 ** -18, \" 3.1416E-18\"),\r\n (f * 10 ** -17, \" 31.4159E-18\"),\r\n (f * 10 ** -16, \" 314.1593E-18\"),\r\n (f * 10 ** -15, \" 3.1416E-15\"),\r\n (f * 10 ** -14, \" 31.4159E-15\"),\r\n (f * 10 ** -13, \" 314.1593E-15\"),\r\n (f * 10 ** -12, \" 3.1416E-12\"),\r\n (f * 10 ** -11, \" 31.4159E-12\"),\r\n (f * 10 ** -10, \" 314.1593E-12\"),\r\n (f * 10 ** -9, \" 3.1416E-09\"),\r\n (f * 10 ** -8, \" 31.4159E-09\"),\r\n (f * 10 ** -7, \" 314.1593E-09\"),\r\n (f * 10 ** -6, \" 3.1416E-06\"),\r\n (f * 10 ** -5, \" 31.4159E-06\"),\r\n (f * 10 ** -4, \" 314.1593E-06\"),\r\n (f * 10 ** -3, \" 3.1416E-03\"),\r\n (f * 10 ** -2, \" 31.4159E-03\"),\r\n (f * 10 ** -1, \" 314.1593E-03\"),\r\n (f * 10 ** 0, \" 3.1416E+00\"),\r\n (f * 10 ** 1, \" 31.4159E+00\"),\r\n (f * 10 ** 2, \" 314.1593E+00\"),\r\n (f * 10 ** 3, \" 3.1416E+03\"),\r\n (f * 10 ** 4, \" 31.4159E+03\"),\r\n (f * 10 ** 5, \" 314.1593E+03\"),\r\n (f * 10 ** 6, \" 3.1416E+06\"),\r\n (f * 10 ** 7, \" 31.4159E+06\"),\r\n (f * 10 ** 8, \" 314.1593E+06\"),\r\n (f * 10 ** 9, \" 3.1416E+09\"),\r\n (f * 10 ** 10, \" 31.4159E+09\"),\r\n (f * 10 ** 11, \" 314.1593E+09\"),\r\n (f * 10 ** 12, \" 3.1416E+12\"),\r\n (f * 10 ** 13, \" 31.4159E+12\"),\r\n (f * 10 ** 14, \" 314.1593E+12\"),\r\n (f * 10 ** 15, \" 3.1416E+15\"),\r\n (f * 10 ** 16, \" 31.4159E+15\"),\r\n (f * 10 ** 17, \" 314.1593E+15\"),\r\n (f * 10 ** 18, \" 3.1416E+18\"),\r\n (f * 10 ** 19, \" 31.4159E+18\"),\r\n (f * 10 ** 20, \" 314.1593E+18\"),\r\n (f * 10 ** 21, \" 3.1416E+21\"),\r\n (f * 10 ** 22, \" 31.4159E+21\"),\r\n (f * 10 ** 23, \" 314.1593E+21\"),\r\n (f * 10 ** 24, \" 3.1416E+24\"),\r\n (f * 10 ** 25, \" 31.4159E+24\"),\r\n (f * 10 ** 26, \" 314.1593E+24\"),\r\n ]\r\n self.compare_all(formatter, in_out)\r\n\r\n def test_rounding(self):\r\n formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)\r\n in_out = [\r\n (5.55555, \" 5.556\"),\r\n (55.5555, \" 55.556\"),\r\n (555.555, \" 555.555\"),\r\n (5555.55, \" 5.556k\"),\r\n (55555.5, \" 55.556k\"),\r\n (555555, \" 555.555k\"),\r\n ]\r\n self.compare_all(formatter, in_out)\r\n\r\n formatter = fmt.EngFormatter(accuracy=1, use_eng_prefix=True)\r\n in_out = [\r\n (5.55555, \" 5.6\"),\r\n (55.5555, \" 55.6\"),\r\n (555.555, \" 555.6\"),\r\n (5555.55, \" 5.6k\"),\r\n (55555.5, \" 55.6k\"),\r\n (555555, \" 555.6k\"),\r\n ]\r\n self.compare_all(formatter, in_out)\r\n\r\n formatter = fmt.EngFormatter(accuracy=0, use_eng_prefix=True)\r\n in_out = [\r\n (5.55555, \" 6\"),\r\n (55.5555, \" 56\"),\r\n (555.555, \" 556\"),\r\n (5555.55, \" 6k\"),\r\n (55555.5, \" 56k\"),\r\n (555555, \" 556k\"),\r\n ]\r\n self.compare_all(formatter, in_out)\r\n\r\n formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)\r\n result = formatter(0)\r\n assert result == \" 0.000\"\r\n\r\n def test_nan(self):\r\n # Issue #11981\r\n\r\n formatter = fmt.EngFormatter(accuracy=1, use_eng_prefix=True)\r\n result = formatter(np.nan)\r\n assert result == \"NaN\"\r\n\r\n df = pd.DataFrame(\r\n {\r\n \"a\": [1.5, 10.3, 20.5],\r\n \"b\": [50.3, 60.67, 70.12],\r\n \"c\": [100.2, 101.33, 120.33],\r\n }\r\n )\r\n pt = df.pivot_table(values=\"a\", index=\"b\", columns=\"c\")\r\n fmt.set_eng_float_format(accuracy=1)\r\n result = pt.to_string()\r\n assert \"NaN\" in result\r\n tm.reset_display_options()\r\n\r\n def test_inf(self):\r\n # Issue #11981\r\n\r\n formatter = fmt.EngFormatter(accuracy=1, use_eng_prefix=True)\r\n result = formatter(np.inf)\r\n assert result == \"inf\"\r\n", "from collections import Counter, defaultdict\r\nfrom itertools import chain\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nfrom pandas import DataFrame, Index, MultiIndex, Series, isna\r\nimport pandas._testing as tm\r\nfrom pandas.core.base import SpecificationError\r\n\r\n\r\nclass TestSeriesApply:\r\n def test_apply(self, datetime_series):\r\n with np.errstate(all=\"ignore\"):\r\n tm.assert_series_equal(\r\n datetime_series.apply(np.sqrt), np.sqrt(datetime_series)\r\n )\r\n\r\n # element-wise apply\r\n import math\r\n\r\n tm.assert_series_equal(\r\n datetime_series.apply(math.exp), np.exp(datetime_series)\r\n )\r\n\r\n # empty series\r\n s = Series(dtype=object, name=\"foo\", index=pd.Index([], name=\"bar\"))\r\n rs = s.apply(lambda x: x)\r\n tm.assert_series_equal(s, rs)\r\n\r\n # check all metadata (GH 9322)\r\n assert s is not rs\r\n assert s.index is rs.index\r\n assert s.dtype == rs.dtype\r\n assert s.name == rs.name\r\n\r\n # index but no data\r\n s = Series(index=[1, 2, 3], dtype=np.float64)\r\n rs = s.apply(lambda x: x)\r\n tm.assert_series_equal(s, rs)\r\n\r\n def test_apply_same_length_inference_bug(self):\r\n s = Series([1, 2])\r\n\r\n def f(x):\r\n return (x, x + 1)\r\n\r\n result = s.apply(f)\r\n expected = s.map(f)\r\n tm.assert_series_equal(result, expected)\r\n\r\n s = Series([1, 2, 3])\r\n result = s.apply(f)\r\n expected = s.map(f)\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_apply_dont_convert_dtype(self):\r\n s = Series(np.random.randn(10))\r\n\r\n def f(x):\r\n return x if x > 0 else np.nan\r\n\r\n result = s.apply(f, convert_dtype=False)\r\n assert result.dtype == object\r\n\r\n def test_with_string_args(self, datetime_series):\r\n\r\n for arg in [\"sum\", \"mean\", \"min\", \"max\", \"std\"]:\r\n result = datetime_series.apply(arg)\r\n expected = getattr(datetime_series, arg)()\r\n assert result == expected\r\n\r\n def test_apply_args(self):\r\n s = Series([\"foo,bar\"])\r\n\r\n result = s.apply(str.split, args=(\",\",))\r\n assert result[0] == [\"foo\", \"bar\"]\r\n assert isinstance(result[0], list)\r\n\r\n def test_series_map_box_timestamps(self):\r\n # GH#2689, GH#2627\r\n ser = Series(pd.date_range(\"1/1/2000\", periods=10))\r\n\r\n def func(x):\r\n return (x.hour, x.day, x.month)\r\n\r\n # it works!\r\n ser.map(func)\r\n ser.apply(func)\r\n\r\n def test_apply_box(self):\r\n # ufunc will not be boxed. Same test cases as the test_map_box\r\n vals = [pd.Timestamp(\"2011-01-01\"), pd.Timestamp(\"2011-01-02\")]\r\n s = pd.Series(vals)\r\n assert s.dtype == \"datetime64[ns]\"\r\n # boxed value must be Timestamp instance\r\n res = s.apply(lambda x: f\"{type(x).__name__}_{x.day}_{x.tz}\")\r\n exp = pd.Series([\"Timestamp_1_None\", \"Timestamp_2_None\"])\r\n tm.assert_series_equal(res, exp)\r\n\r\n vals = [\r\n pd.Timestamp(\"2011-01-01\", tz=\"US/Eastern\"),\r\n pd.Timestamp(\"2011-01-02\", tz=\"US/Eastern\"),\r\n ]\r\n s = pd.Series(vals)\r\n assert s.dtype == \"datetime64[ns, US/Eastern]\"\r\n res = s.apply(lambda x: f\"{type(x).__name__}_{x.day}_{x.tz}\")\r\n exp = pd.Series([\"Timestamp_1_US/Eastern\", \"Timestamp_2_US/Eastern\"])\r\n tm.assert_series_equal(res, exp)\r\n\r\n # timedelta\r\n vals = [pd.Timedelta(\"1 days\"), pd.Timedelta(\"2 days\")]\r\n s = pd.Series(vals)\r\n assert s.dtype == \"timedelta64[ns]\"\r\n res = s.apply(lambda x: f\"{type(x).__name__}_{x.days}\")\r\n exp = pd.Series([\"Timedelta_1\", \"Timedelta_2\"])\r\n tm.assert_series_equal(res, exp)\r\n\r\n # period\r\n vals = [pd.Period(\"2011-01-01\", freq=\"M\"), pd.Period(\"2011-01-02\", freq=\"M\")]\r\n s = pd.Series(vals)\r\n assert s.dtype == \"Period[M]\"\r\n res = s.apply(lambda x: f\"{type(x).__name__}_{x.freqstr}\")\r\n exp = pd.Series([\"Period_M\", \"Period_M\"])\r\n tm.assert_series_equal(res, exp)\r\n\r\n def test_apply_datetimetz(self):\r\n values = pd.date_range(\"2011-01-01\", \"2011-01-02\", freq=\"H\").tz_localize(\r\n \"Asia/Tokyo\"\r\n )\r\n s = pd.Series(values, name=\"XX\")\r\n\r\n result = s.apply(lambda x: x + pd.offsets.Day())\r\n exp_values = pd.date_range(\"2011-01-02\", \"2011-01-03\", freq=\"H\").tz_localize(\r\n \"Asia/Tokyo\"\r\n )\r\n exp = pd.Series(exp_values, name=\"XX\")\r\n tm.assert_series_equal(result, exp)\r\n\r\n # change dtype\r\n # GH 14506 : Returned dtype changed from int32 to int64\r\n result = s.apply(lambda x: x.hour)\r\n exp = pd.Series(list(range(24)) + [0], name=\"XX\", dtype=np.int64)\r\n tm.assert_series_equal(result, exp)\r\n\r\n # not vectorized\r\n def f(x):\r\n if not isinstance(x, pd.Timestamp):\r\n raise ValueError\r\n return str(x.tz)\r\n\r\n result = s.map(f)\r\n exp = pd.Series([\"Asia/Tokyo\"] * 25, name=\"XX\")\r\n tm.assert_series_equal(result, exp)\r\n\r\n def test_apply_dict_depr(self):\r\n\r\n tsdf = pd.DataFrame(\r\n np.random.randn(10, 3),\r\n columns=[\"A\", \"B\", \"C\"],\r\n index=pd.date_range(\"1/1/2000\", periods=10),\r\n )\r\n msg = \"nested renamer is not supported\"\r\n with pytest.raises(SpecificationError, match=msg):\r\n tsdf.A.agg({\"foo\": [\"sum\", \"mean\"]})\r\n\r\n def test_apply_categorical(self):\r\n values = pd.Categorical(list(\"ABBABCD\"), categories=list(\"DCBA\"), ordered=True)\r\n ser = pd.Series(values, name=\"XX\", index=list(\"abcdefg\"))\r\n result = ser.apply(lambda x: x.lower())\r\n\r\n # should be categorical dtype when the number of categories are\r\n # the same\r\n values = pd.Categorical(list(\"abbabcd\"), categories=list(\"dcba\"), ordered=True)\r\n exp = pd.Series(values, name=\"XX\", index=list(\"abcdefg\"))\r\n tm.assert_series_equal(result, exp)\r\n tm.assert_categorical_equal(result.values, exp.values)\r\n\r\n result = ser.apply(lambda x: \"A\")\r\n exp = pd.Series([\"A\"] * 7, name=\"XX\", index=list(\"abcdefg\"))\r\n tm.assert_series_equal(result, exp)\r\n assert result.dtype == object\r\n\r\n @pytest.mark.parametrize(\"series\", [[\"1-1\", \"1-1\", np.NaN], [\"1-1\", \"1-2\", np.NaN]])\r\n def test_apply_categorical_with_nan_values(self, series):\r\n # GH 20714 bug fixed in: GH 24275\r\n s = pd.Series(series, dtype=\"category\")\r\n result = s.apply(lambda x: x.split(\"-\")[0])\r\n result = result.astype(object)\r\n expected = pd.Series([\"1\", \"1\", np.NaN], dtype=\"category\")\r\n expected = expected.astype(object)\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_apply_empty_integer_series_with_datetime_index(self):\r\n # GH 21245\r\n s = pd.Series([], index=pd.date_range(start=\"2018-01-01\", periods=0), dtype=int)\r\n result = s.apply(lambda x: x)\r\n tm.assert_series_equal(result, s)\r\n\r\n\r\nclass TestSeriesAggregate:\r\n def test_transform(self, string_series):\r\n # transforming functions\r\n\r\n with np.errstate(all=\"ignore\"):\r\n\r\n f_sqrt = np.sqrt(string_series)\r\n f_abs = np.abs(string_series)\r\n\r\n # ufunc\r\n result = string_series.transform(np.sqrt)\r\n expected = f_sqrt.copy()\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = string_series.apply(np.sqrt)\r\n tm.assert_series_equal(result, expected)\r\n\r\n # list-like\r\n result = string_series.transform([np.sqrt])\r\n expected = f_sqrt.to_frame().copy()\r\n expected.columns = [\"sqrt\"]\r\n tm.assert_frame_equal(result, expected)\r\n\r\n result = string_series.transform([np.sqrt])\r\n tm.assert_frame_equal(result, expected)\r\n\r\n result = string_series.transform([\"sqrt\"])\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # multiple items in list\r\n # these are in the order as if we are applying both functions per\r\n # series and then concatting\r\n expected = pd.concat([f_sqrt, f_abs], axis=1)\r\n expected.columns = [\"sqrt\", \"absolute\"]\r\n result = string_series.apply([np.sqrt, np.abs])\r\n tm.assert_frame_equal(result, expected)\r\n\r\n result = string_series.transform([\"sqrt\", \"abs\"])\r\n expected.columns = [\"sqrt\", \"abs\"]\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # dict, provide renaming\r\n expected = pd.concat([f_sqrt, f_abs], axis=1)\r\n expected.columns = [\"foo\", \"bar\"]\r\n expected = expected.unstack().rename(\"series\")\r\n\r\n result = string_series.apply({\"foo\": np.sqrt, \"bar\": np.abs})\r\n tm.assert_series_equal(result.reindex_like(expected), expected)\r\n\r\n def test_transform_and_agg_error(self, string_series):\r\n # we are trying to transform with an aggregator\r\n msg = \"transforms cannot produce aggregated results\"\r\n with pytest.raises(ValueError, match=msg):\r\n string_series.transform([\"min\", \"max\"])\r\n\r\n msg = \"cannot combine transform and aggregation\"\r\n with pytest.raises(ValueError, match=msg):\r\n with np.errstate(all=\"ignore\"):\r\n string_series.agg([\"sqrt\", \"max\"])\r\n\r\n with pytest.raises(ValueError, match=msg):\r\n with np.errstate(all=\"ignore\"):\r\n string_series.transform([\"sqrt\", \"max\"])\r\n\r\n msg = \"cannot perform both aggregation and transformation\"\r\n with pytest.raises(ValueError, match=msg):\r\n with np.errstate(all=\"ignore\"):\r\n string_series.agg({\"foo\": np.sqrt, \"bar\": \"sum\"})\r\n\r\n def test_demo(self):\r\n # demonstration tests\r\n s = Series(range(6), dtype=\"int64\", name=\"series\")\r\n\r\n result = s.agg([\"min\", \"max\"])\r\n expected = Series([0, 5], index=[\"min\", \"max\"], name=\"series\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = s.agg({\"foo\": \"min\"})\r\n expected = Series([0], index=[\"foo\"], name=\"series\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n # nested renaming\r\n msg = \"nested renamer is not supported\"\r\n with pytest.raises(SpecificationError, match=msg):\r\n s.agg({\"foo\": [\"min\", \"max\"]})\r\n\r\n def test_multiple_aggregators_with_dict_api(self):\r\n\r\n s = Series(range(6), dtype=\"int64\", name=\"series\")\r\n # nested renaming\r\n msg = \"nested renamer is not supported\"\r\n with pytest.raises(SpecificationError, match=msg):\r\n s.agg({\"foo\": [\"min\", \"max\"], \"bar\": [\"sum\", \"mean\"]})\r\n\r\n def test_agg_apply_evaluate_lambdas_the_same(self, string_series):\r\n # test that we are evaluating row-by-row first\r\n # before vectorized evaluation\r\n result = string_series.apply(lambda x: str(x))\r\n expected = string_series.agg(lambda x: str(x))\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = string_series.apply(str)\r\n expected = string_series.agg(str)\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_with_nested_series(self, datetime_series):\r\n # GH 2316\r\n # .agg with a reducer and a transform, what to do\r\n result = datetime_series.apply(\r\n lambda x: Series([x, x ** 2], index=[\"x\", \"x^2\"])\r\n )\r\n expected = DataFrame({\"x\": datetime_series, \"x^2\": datetime_series ** 2})\r\n tm.assert_frame_equal(result, expected)\r\n\r\n result = datetime_series.agg(lambda x: Series([x, x ** 2], index=[\"x\", \"x^2\"]))\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_replicate_describe(self, string_series):\r\n # this also tests a result set that is all scalars\r\n expected = string_series.describe()\r\n result = string_series.apply(\r\n {\r\n \"count\": \"count\",\r\n \"mean\": \"mean\",\r\n \"std\": \"std\",\r\n \"min\": \"min\",\r\n \"25%\": lambda x: x.quantile(0.25),\r\n \"50%\": \"median\",\r\n \"75%\": lambda x: x.quantile(0.75),\r\n \"max\": \"max\",\r\n }\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_reduce(self, string_series):\r\n # reductions with named functions\r\n result = string_series.agg([\"sum\", \"mean\"])\r\n expected = Series(\r\n [string_series.sum(), string_series.mean()],\r\n [\"sum\", \"mean\"],\r\n name=string_series.name,\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_non_callable_aggregates(self):\r\n # test agg using non-callable series attributes\r\n s = Series([1, 2, None])\r\n\r\n # Calling agg w/ just a string arg same as calling s.arg\r\n result = s.agg(\"size\")\r\n expected = s.size\r\n assert result == expected\r\n\r\n # test when mixed w/ callable reducers\r\n result = s.agg([\"size\", \"count\", \"mean\"])\r\n expected = Series({\"size\": 3.0, \"count\": 2.0, \"mean\": 1.5})\r\n tm.assert_series_equal(result[expected.index], expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"series, func, expected\",\r\n chain(\r\n tm.get_cython_table_params(\r\n Series(dtype=np.float64),\r\n [\r\n (\"sum\", 0),\r\n (\"max\", np.nan),\r\n (\"min\", np.nan),\r\n (\"all\", True),\r\n (\"any\", False),\r\n (\"mean\", np.nan),\r\n (\"prod\", 1),\r\n (\"std\", np.nan),\r\n (\"var\", np.nan),\r\n (\"median\", np.nan),\r\n ],\r\n ),\r\n tm.get_cython_table_params(\r\n Series([np.nan, 1, 2, 3]),\r\n [\r\n (\"sum\", 6),\r\n (\"max\", 3),\r\n (\"min\", 1),\r\n (\"all\", True),\r\n (\"any\", True),\r\n (\"mean\", 2),\r\n (\"prod\", 6),\r\n (\"std\", 1),\r\n (\"var\", 1),\r\n (\"median\", 2),\r\n ],\r\n ),\r\n tm.get_cython_table_params(\r\n Series(\"a b c\".split()),\r\n [\r\n (\"sum\", \"abc\"),\r\n (\"max\", \"c\"),\r\n (\"min\", \"a\"),\r\n (\"all\", \"c\"), # see GH12863\r\n (\"any\", \"a\"),\r\n ],\r\n ),\r\n ),\r\n )\r\n def test_agg_cython_table(self, series, func, expected):\r\n # GH21224\r\n # test reducing functions in\r\n # pandas.core.base.SelectionMixin._cython_table\r\n result = series.agg(func)\r\n if tm.is_number(expected):\r\n assert np.isclose(result, expected, equal_nan=True)\r\n else:\r\n assert result == expected\r\n\r\n @pytest.mark.parametrize(\r\n \"series, func, expected\",\r\n chain(\r\n tm.get_cython_table_params(\r\n Series(dtype=np.float64),\r\n [\r\n (\"cumprod\", Series([], Index([]), dtype=np.float64)),\r\n (\"cumsum\", Series([], Index([]), dtype=np.float64)),\r\n ],\r\n ),\r\n tm.get_cython_table_params(\r\n Series([np.nan, 1, 2, 3]),\r\n [\r\n (\"cumprod\", Series([np.nan, 1, 2, 6])),\r\n (\"cumsum\", Series([np.nan, 1, 3, 6])),\r\n ],\r\n ),\r\n tm.get_cython_table_params(\r\n Series(\"a b c\".split()), [(\"cumsum\", Series([\"a\", \"ab\", \"abc\"]))]\r\n ),\r\n ),\r\n )\r\n def test_agg_cython_table_transform(self, series, func, expected):\r\n # GH21224\r\n # test transforming functions in\r\n # pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)\r\n result = series.agg(func)\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"series, func, expected\",\r\n chain(\r\n tm.get_cython_table_params(\r\n Series(\"a b c\".split()),\r\n [\r\n (\"mean\", TypeError), # mean raises TypeError\r\n (\"prod\", TypeError),\r\n (\"std\", TypeError),\r\n (\"var\", TypeError),\r\n (\"median\", TypeError),\r\n (\"cumprod\", TypeError),\r\n ],\r\n )\r\n ),\r\n )\r\n def test_agg_cython_table_raises(self, series, func, expected):\r\n # GH21224\r\n with pytest.raises(expected):\r\n # e.g. Series('a b'.split()).cumprod() will raise\r\n series.agg(func)\r\n\r\n def test_transform_none_to_type(self):\r\n # GH34377\r\n df = pd.DataFrame({\"a\": [None]})\r\n\r\n msg = \"DataFrame constructor called with incompatible data and dtype\"\r\n with pytest.raises(TypeError, match=msg):\r\n df.transform({\"a\": int})\r\n\r\n def test_series_apply_no_suffix_index(self):\r\n # GH36189\r\n s = pd.Series([4] * 3)\r\n result = s.apply([\"sum\", lambda x: x.sum(), lambda x: x.sum()])\r\n expected = pd.Series([12, 12, 12], index=[\"sum\", \"<lambda>\", \"<lambda>\"])\r\n\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\nclass TestSeriesMap:\r\n def test_map(self, datetime_series):\r\n index, data = tm.getMixedTypeDict()\r\n\r\n source = Series(data[\"B\"], index=data[\"C\"])\r\n target = Series(data[\"C\"][:4], index=data[\"D\"][:4])\r\n\r\n merged = target.map(source)\r\n\r\n for k, v in merged.items():\r\n assert v == source[target[k]]\r\n\r\n # input could be a dict\r\n merged = target.map(source.to_dict())\r\n\r\n for k, v in merged.items():\r\n assert v == source[target[k]]\r\n\r\n # function\r\n result = datetime_series.map(lambda x: x * 2)\r\n tm.assert_series_equal(result, datetime_series * 2)\r\n\r\n # GH 10324\r\n a = Series([1, 2, 3, 4])\r\n b = Series([\"even\", \"odd\", \"even\", \"odd\"], dtype=\"category\")\r\n c = Series([\"even\", \"odd\", \"even\", \"odd\"])\r\n\r\n exp = Series([\"odd\", \"even\", \"odd\", np.nan], dtype=\"category\")\r\n tm.assert_series_equal(a.map(b), exp)\r\n exp = Series([\"odd\", \"even\", \"odd\", np.nan])\r\n tm.assert_series_equal(a.map(c), exp)\r\n\r\n a = Series([\"a\", \"b\", \"c\", \"d\"])\r\n b = Series([1, 2, 3, 4], index=pd.CategoricalIndex([\"b\", \"c\", \"d\", \"e\"]))\r\n c = Series([1, 2, 3, 4], index=Index([\"b\", \"c\", \"d\", \"e\"]))\r\n\r\n exp = Series([np.nan, 1, 2, 3])\r\n tm.assert_series_equal(a.map(b), exp)\r\n exp = Series([np.nan, 1, 2, 3])\r\n tm.assert_series_equal(a.map(c), exp)\r\n\r\n a = Series([\"a\", \"b\", \"c\", \"d\"])\r\n b = Series(\r\n [\"B\", \"C\", \"D\", \"E\"],\r\n dtype=\"category\",\r\n index=pd.CategoricalIndex([\"b\", \"c\", \"d\", \"e\"]),\r\n )\r\n c = Series([\"B\", \"C\", \"D\", \"E\"], index=Index([\"b\", \"c\", \"d\", \"e\"]))\r\n\r\n exp = Series(\r\n pd.Categorical([np.nan, \"B\", \"C\", \"D\"], categories=[\"B\", \"C\", \"D\", \"E\"])\r\n )\r\n tm.assert_series_equal(a.map(b), exp)\r\n exp = Series([np.nan, \"B\", \"C\", \"D\"])\r\n tm.assert_series_equal(a.map(c), exp)\r\n\r\n def test_map_empty(self, index):\r\n if isinstance(index, MultiIndex):\r\n pytest.skip(\"Initializing a Series from a MultiIndex is not supported\")\r\n\r\n s = Series(index)\r\n result = s.map({})\r\n\r\n expected = pd.Series(np.nan, index=s.index)\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_map_compat(self):\r\n # related GH 8024\r\n s = Series([True, True, False], index=[1, 2, 3])\r\n result = s.map({True: \"foo\", False: \"bar\"})\r\n expected = Series([\"foo\", \"foo\", \"bar\"], index=[1, 2, 3])\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_map_int(self):\r\n left = Series({\"a\": 1.0, \"b\": 2.0, \"c\": 3.0, \"d\": 4})\r\n right = Series({1: 11, 2: 22, 3: 33})\r\n\r\n assert left.dtype == np.float_\r\n assert issubclass(right.dtype.type, np.integer)\r\n\r\n merged = left.map(right)\r\n assert merged.dtype == np.float_\r\n assert isna(merged[\"d\"])\r\n assert not isna(merged[\"c\"])\r\n\r\n def test_map_type_inference(self):\r\n s = Series(range(3))\r\n s2 = s.map(lambda x: np.where(x == 0, 0, 1))\r\n assert issubclass(s2.dtype.type, np.integer)\r\n\r\n def test_map_decimal(self, string_series):\r\n from decimal import Decimal\r\n\r\n result = string_series.map(lambda x: Decimal(str(x)))\r\n assert result.dtype == np.object_\r\n assert isinstance(result[0], Decimal)\r\n\r\n def test_map_na_exclusion(self):\r\n s = Series([1.5, np.nan, 3, np.nan, 5])\r\n\r\n result = s.map(lambda x: x * 2, na_action=\"ignore\")\r\n exp = s * 2\r\n tm.assert_series_equal(result, exp)\r\n\r\n def test_map_dict_with_tuple_keys(self):\r\n \"\"\"\r\n Due to new MultiIndex-ing behaviour in v0.14.0,\r\n dicts with tuple keys passed to map were being\r\n converted to a multi-index, preventing tuple values\r\n from being mapped properly.\r\n \"\"\"\r\n # GH 18496\r\n df = pd.DataFrame({\"a\": [(1,), (2,), (3, 4), (5, 6)]})\r\n label_mappings = {(1,): \"A\", (2,): \"B\", (3, 4): \"A\", (5, 6): \"B\"}\r\n\r\n df[\"labels\"] = df[\"a\"].map(label_mappings)\r\n df[\"expected_labels\"] = pd.Series([\"A\", \"B\", \"A\", \"B\"], index=df.index)\r\n # All labels should be filled now\r\n tm.assert_series_equal(df[\"labels\"], df[\"expected_labels\"], check_names=False)\r\n\r\n def test_map_counter(self):\r\n s = Series([\"a\", \"b\", \"c\"], index=[1, 2, 3])\r\n counter = Counter()\r\n counter[\"b\"] = 5\r\n counter[\"c\"] += 1\r\n result = s.map(counter)\r\n expected = Series([0, 5, 1], index=[1, 2, 3])\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_map_defaultdict(self):\r\n s = Series([1, 2, 3], index=[\"a\", \"b\", \"c\"])\r\n default_dict = defaultdict(lambda: \"blank\")\r\n default_dict[1] = \"stuff\"\r\n result = s.map(default_dict)\r\n expected = Series([\"stuff\", \"blank\", \"blank\"], index=[\"a\", \"b\", \"c\"])\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_map_dict_na_key(self):\r\n # https://github.com/pandas-dev/pandas/issues/17648\r\n # Checks that np.nan key is appropriately mapped\r\n s = Series([1, 2, np.nan])\r\n expected = Series([\"a\", \"b\", \"c\"])\r\n result = s.map({1: \"a\", 2: \"b\", np.nan: \"c\"})\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_map_dict_subclass_with_missing(self):\r\n \"\"\"\r\n Test Series.map with a dictionary subclass that defines __missing__,\r\n i.e. sets a default value (GH #15999).\r\n \"\"\"\r\n\r\n class DictWithMissing(dict):\r\n def __missing__(self, key):\r\n return \"missing\"\r\n\r\n s = Series([1, 2, 3])\r\n dictionary = DictWithMissing({3: \"three\"})\r\n result = s.map(dictionary)\r\n expected = Series([\"missing\", \"missing\", \"three\"])\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_map_dict_subclass_without_missing(self):\r\n class DictWithoutMissing(dict):\r\n pass\r\n\r\n s = Series([1, 2, 3])\r\n dictionary = DictWithoutMissing({3: \"three\"})\r\n result = s.map(dictionary)\r\n expected = Series([np.nan, np.nan, \"three\"])\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_map_abc_mapping(self, non_dict_mapping_subclass):\r\n # https://github.com/pandas-dev/pandas/issues/29733\r\n # Check collections.abc.Mapping support as mapper for Series.map\r\n s = Series([1, 2, 3])\r\n not_a_dictionary = non_dict_mapping_subclass({3: \"three\"})\r\n result = s.map(not_a_dictionary)\r\n expected = Series([np.nan, np.nan, \"three\"])\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_map_abc_mapping_with_missing(self, non_dict_mapping_subclass):\r\n # https://github.com/pandas-dev/pandas/issues/29733\r\n # Check collections.abc.Mapping support as mapper for Series.map\r\n class NonDictMappingWithMissing(non_dict_mapping_subclass):\r\n def __missing__(self, key):\r\n return \"missing\"\r\n\r\n s = Series([1, 2, 3])\r\n not_a_dictionary = NonDictMappingWithMissing({3: \"three\"})\r\n result = s.map(not_a_dictionary)\r\n # __missing__ is a dict concept, not a Mapping concept,\r\n # so it should not change the result!\r\n expected = Series([np.nan, np.nan, \"three\"])\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_map_box(self):\r\n vals = [pd.Timestamp(\"2011-01-01\"), pd.Timestamp(\"2011-01-02\")]\r\n s = pd.Series(vals)\r\n assert s.dtype == \"datetime64[ns]\"\r\n # boxed value must be Timestamp instance\r\n res = s.apply(lambda x: f\"{type(x).__name__}_{x.day}_{x.tz}\")\r\n exp = pd.Series([\"Timestamp_1_None\", \"Timestamp_2_None\"])\r\n tm.assert_series_equal(res, exp)\r\n\r\n vals = [\r\n pd.Timestamp(\"2011-01-01\", tz=\"US/Eastern\"),\r\n pd.Timestamp(\"2011-01-02\", tz=\"US/Eastern\"),\r\n ]\r\n s = pd.Series(vals)\r\n assert s.dtype == \"datetime64[ns, US/Eastern]\"\r\n res = s.apply(lambda x: f\"{type(x).__name__}_{x.day}_{x.tz}\")\r\n exp = pd.Series([\"Timestamp_1_US/Eastern\", \"Timestamp_2_US/Eastern\"])\r\n tm.assert_series_equal(res, exp)\r\n\r\n # timedelta\r\n vals = [pd.Timedelta(\"1 days\"), pd.Timedelta(\"2 days\")]\r\n s = pd.Series(vals)\r\n assert s.dtype == \"timedelta64[ns]\"\r\n res = s.apply(lambda x: f\"{type(x).__name__}_{x.days}\")\r\n exp = pd.Series([\"Timedelta_1\", \"Timedelta_2\"])\r\n tm.assert_series_equal(res, exp)\r\n\r\n # period\r\n vals = [pd.Period(\"2011-01-01\", freq=\"M\"), pd.Period(\"2011-01-02\", freq=\"M\")]\r\n s = pd.Series(vals)\r\n assert s.dtype == \"Period[M]\"\r\n res = s.apply(lambda x: f\"{type(x).__name__}_{x.freqstr}\")\r\n exp = pd.Series([\"Period_M\", \"Period_M\"])\r\n tm.assert_series_equal(res, exp)\r\n\r\n def test_map_categorical(self):\r\n values = pd.Categorical(list(\"ABBABCD\"), categories=list(\"DCBA\"), ordered=True)\r\n s = pd.Series(values, name=\"XX\", index=list(\"abcdefg\"))\r\n\r\n result = s.map(lambda x: x.lower())\r\n exp_values = pd.Categorical(\r\n list(\"abbabcd\"), categories=list(\"dcba\"), ordered=True\r\n )\r\n exp = pd.Series(exp_values, name=\"XX\", index=list(\"abcdefg\"))\r\n tm.assert_series_equal(result, exp)\r\n tm.assert_categorical_equal(result.values, exp_values)\r\n\r\n result = s.map(lambda x: \"A\")\r\n exp = pd.Series([\"A\"] * 7, name=\"XX\", index=list(\"abcdefg\"))\r\n tm.assert_series_equal(result, exp)\r\n assert result.dtype == object\r\n\r\n with pytest.raises(NotImplementedError):\r\n s.map(lambda x: x, na_action=\"ignore\")\r\n\r\n def test_map_datetimetz(self):\r\n values = pd.date_range(\"2011-01-01\", \"2011-01-02\", freq=\"H\").tz_localize(\r\n \"Asia/Tokyo\"\r\n )\r\n s = pd.Series(values, name=\"XX\")\r\n\r\n # keep tz\r\n result = s.map(lambda x: x + pd.offsets.Day())\r\n exp_values = pd.date_range(\"2011-01-02\", \"2011-01-03\", freq=\"H\").tz_localize(\r\n \"Asia/Tokyo\"\r\n )\r\n exp = pd.Series(exp_values, name=\"XX\")\r\n tm.assert_series_equal(result, exp)\r\n\r\n # change dtype\r\n # GH 14506 : Returned dtype changed from int32 to int64\r\n result = s.map(lambda x: x.hour)\r\n exp = pd.Series(list(range(24)) + [0], name=\"XX\", dtype=np.int64)\r\n tm.assert_series_equal(result, exp)\r\n\r\n with pytest.raises(NotImplementedError):\r\n s.map(lambda x: x, na_action=\"ignore\")\r\n\r\n # not vectorized\r\n def f(x):\r\n if not isinstance(x, pd.Timestamp):\r\n raise ValueError\r\n return str(x.tz)\r\n\r\n result = s.map(f)\r\n exp = pd.Series([\"Asia/Tokyo\"] * 25, name=\"XX\")\r\n tm.assert_series_equal(result, exp)\r\n\r\n @pytest.mark.parametrize(\r\n \"vals,mapping,exp\",\r\n [\r\n (list(\"abc\"), {np.nan: \"not NaN\"}, [np.nan] * 3 + [\"not NaN\"]),\r\n (list(\"abc\"), {\"a\": \"a letter\"}, [\"a letter\"] + [np.nan] * 3),\r\n (list(range(3)), {0: 42}, [42] + [np.nan] * 3),\r\n ],\r\n )\r\n def test_map_missing_mixed(self, vals, mapping, exp):\r\n # GH20495\r\n s = pd.Series(vals + [np.nan])\r\n result = s.map(mapping)\r\n\r\n tm.assert_series_equal(result, pd.Series(exp))\r\n\r\n @pytest.mark.parametrize(\r\n \"dti,exp\",\r\n [\r\n (\r\n Series([1, 2], index=pd.DatetimeIndex([0, 31536000000])),\r\n DataFrame(np.repeat([[1, 2]], 2, axis=0), dtype=\"int64\"),\r\n ),\r\n (\r\n tm.makeTimeSeries(nper=30),\r\n DataFrame(np.repeat([[1, 2]], 30, axis=0), dtype=\"int64\"),\r\n ),\r\n ],\r\n )\r\n def test_apply_series_on_date_time_index_aware_series(self, dti, exp):\r\n # GH 25959\r\n # Calling apply on a localized time series should not cause an error\r\n index = dti.tz_localize(\"UTC\").index\r\n result = pd.Series(index).apply(lambda x: pd.Series([1, 2]))\r\n tm.assert_frame_equal(result, exp)\r\n\r\n def test_apply_scaler_on_date_time_index_aware_series(self):\r\n # GH 25959\r\n # Calling apply on a localized time series should not cause an error\r\n series = tm.makeTimeSeries(nper=30).tz_localize(\"UTC\")\r\n result = pd.Series(series.index).apply(lambda x: 1)\r\n tm.assert_series_equal(result, pd.Series(np.ones(30), dtype=\"int64\"))\r\n\r\n def test_map_float_to_string_precision(self):\r\n # GH 13228\r\n ser = pd.Series(1 / 3)\r\n result = ser.map(lambda val: str(val)).to_dict()\r\n expected = {0: \"0.3333333333333333\"}\r\n assert result == expected\r\n\r\n def test_map_with_invalid_na_action_raises(self):\r\n # https://github.com/pandas-dev/pandas/issues/32815\r\n s = pd.Series([1, 2, 3])\r\n msg = \"na_action must either be 'ignore' or None\"\r\n with pytest.raises(ValueError, match=msg):\r\n s.map(lambda x: x, na_action=\"____\")\r\n\r\n def test_apply_to_timedelta(self):\r\n list_of_valid_strings = [\"00:00:01\", \"00:00:02\"]\r\n a = pd.to_timedelta(list_of_valid_strings)\r\n b = Series(list_of_valid_strings).apply(pd.to_timedelta)\r\n # FIXME: dont leave commented-out\r\n # Can't compare until apply on a Series gives the correct dtype\r\n # assert_series_equal(a, b)\r\n\r\n list_of_strings = [\"00:00:01\", np.nan, pd.NaT, pd.NaT]\r\n\r\n a = pd.to_timedelta(list_of_strings) # noqa\r\n b = Series(list_of_strings).apply(pd.to_timedelta) # noqa\r\n # Can't compare until apply on a Series gives the correct dtype\r\n # assert_series_equal(a, b)\r\n", "import numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nfrom pandas import Int64Index, TimedeltaIndex, timedelta_range\r\nimport pandas._testing as tm\r\n\r\nfrom pandas.tseries.offsets import Hour\r\n\r\n\r\nclass TestTimedeltaIndex:\r\n def test_union(self):\r\n\r\n i1 = timedelta_range(\"1day\", periods=5)\r\n i2 = timedelta_range(\"3day\", periods=5)\r\n result = i1.union(i2)\r\n expected = timedelta_range(\"1day\", periods=7)\r\n tm.assert_index_equal(result, expected)\r\n\r\n i1 = Int64Index(np.arange(0, 20, 2))\r\n i2 = timedelta_range(start=\"1 day\", periods=10, freq=\"D\")\r\n i1.union(i2) # Works\r\n i2.union(i1) # Fails with \"AttributeError: can't set attribute\"\r\n\r\n def test_union_sort_false(self):\r\n tdi = timedelta_range(\"1day\", periods=5)\r\n\r\n left = tdi[3:]\r\n right = tdi[:3]\r\n\r\n # Check that we are testing the desired code path\r\n assert left._can_fast_union(right)\r\n\r\n result = left.union(right)\r\n tm.assert_index_equal(result, tdi)\r\n\r\n result = left.union(right, sort=False)\r\n expected = pd.TimedeltaIndex([\"4 Days\", \"5 Days\", \"1 Days\", \"2 Day\", \"3 Days\"])\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_union_coverage(self):\r\n\r\n idx = TimedeltaIndex([\"3d\", \"1d\", \"2d\"])\r\n ordered = TimedeltaIndex(idx.sort_values(), freq=\"infer\")\r\n result = ordered.union(idx)\r\n tm.assert_index_equal(result, ordered)\r\n\r\n result = ordered[:0].union(ordered)\r\n tm.assert_index_equal(result, ordered)\r\n assert result.freq == ordered.freq\r\n\r\n def test_union_bug_1730(self):\r\n\r\n rng_a = timedelta_range(\"1 day\", periods=4, freq=\"3H\")\r\n rng_b = timedelta_range(\"1 day\", periods=4, freq=\"4H\")\r\n\r\n result = rng_a.union(rng_b)\r\n exp = TimedeltaIndex(sorted(set(rng_a) | set(rng_b)))\r\n tm.assert_index_equal(result, exp)\r\n\r\n def test_union_bug_1745(self):\r\n\r\n left = TimedeltaIndex([\"1 day 15:19:49.695000\"])\r\n right = TimedeltaIndex(\r\n [\"2 day 13:04:21.322000\", \"1 day 15:27:24.873000\", \"1 day 15:31:05.350000\"]\r\n )\r\n\r\n result = left.union(right)\r\n exp = TimedeltaIndex(sorted(set(left) | set(right)))\r\n tm.assert_index_equal(result, exp)\r\n\r\n def test_union_bug_4564(self):\r\n\r\n left = timedelta_range(\"1 day\", \"30d\")\r\n right = left + pd.offsets.Minute(15)\r\n\r\n result = left.union(right)\r\n exp = TimedeltaIndex(sorted(set(left) | set(right)))\r\n tm.assert_index_equal(result, exp)\r\n\r\n def test_union_freq_infer(self):\r\n # When taking the union of two TimedeltaIndexes, we infer\r\n # a freq even if the arguments don't have freq. This matches\r\n # DatetimeIndex behavior.\r\n tdi = pd.timedelta_range(\"1 Day\", periods=5)\r\n left = tdi[[0, 1, 3, 4]]\r\n right = tdi[[2, 3, 1]]\r\n\r\n assert left.freq is None\r\n assert right.freq is None\r\n\r\n result = left.union(right)\r\n tm.assert_index_equal(result, tdi)\r\n assert result.freq == \"D\"\r\n\r\n def test_intersection_bug_1708(self):\r\n index_1 = timedelta_range(\"1 day\", periods=4, freq=\"h\")\r\n index_2 = index_1 + pd.offsets.Hour(5)\r\n\r\n result = index_1 & index_2\r\n assert len(result) == 0\r\n\r\n index_1 = timedelta_range(\"1 day\", periods=4, freq=\"h\")\r\n index_2 = index_1 + pd.offsets.Hour(1)\r\n\r\n result = index_1 & index_2\r\n expected = timedelta_range(\"1 day 01:00:00\", periods=3, freq=\"h\")\r\n tm.assert_index_equal(result, expected)\r\n assert result.freq == expected.freq\r\n\r\n def test_intersection_equal(self, sort):\r\n # GH 24471 Test intersection outcome given the sort keyword\r\n # for equal indicies intersection should return the original index\r\n first = timedelta_range(\"1 day\", periods=4, freq=\"h\")\r\n second = timedelta_range(\"1 day\", periods=4, freq=\"h\")\r\n intersect = first.intersection(second, sort=sort)\r\n if sort is None:\r\n tm.assert_index_equal(intersect, second.sort_values())\r\n assert tm.equalContents(intersect, second)\r\n\r\n # Corner cases\r\n inter = first.intersection(first, sort=sort)\r\n assert inter is first\r\n\r\n @pytest.mark.parametrize(\"period_1, period_2\", [(0, 4), (4, 0)])\r\n def test_intersection_zero_length(self, period_1, period_2, sort):\r\n # GH 24471 test for non overlap the intersection should be zero length\r\n index_1 = timedelta_range(\"1 day\", periods=period_1, freq=\"h\")\r\n index_2 = timedelta_range(\"1 day\", periods=period_2, freq=\"h\")\r\n expected = timedelta_range(\"1 day\", periods=0, freq=\"h\")\r\n result = index_1.intersection(index_2, sort=sort)\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_zero_length_input_index(self, sort):\r\n # GH 24966 test for 0-len intersections are copied\r\n index_1 = timedelta_range(\"1 day\", periods=0, freq=\"h\")\r\n index_2 = timedelta_range(\"1 day\", periods=3, freq=\"h\")\r\n result = index_1.intersection(index_2, sort=sort)\r\n assert index_1 is not result\r\n assert index_2 is not result\r\n tm.assert_copy(result, index_1)\r\n\r\n @pytest.mark.parametrize(\r\n \"rng, expected\",\r\n # if target has the same name, it is preserved\r\n [\r\n (\r\n timedelta_range(\"1 day\", periods=5, freq=\"h\", name=\"idx\"),\r\n timedelta_range(\"1 day\", periods=4, freq=\"h\", name=\"idx\"),\r\n ),\r\n # if target name is different, it will be reset\r\n (\r\n timedelta_range(\"1 day\", periods=5, freq=\"h\", name=\"other\"),\r\n timedelta_range(\"1 day\", periods=4, freq=\"h\", name=None),\r\n ),\r\n # if no overlap exists return empty index\r\n (\r\n timedelta_range(\"1 day\", periods=10, freq=\"h\", name=\"idx\")[5:],\r\n TimedeltaIndex([], freq=\"h\", name=\"idx\"),\r\n ),\r\n ],\r\n )\r\n def test_intersection(self, rng, expected, sort):\r\n # GH 4690 (with tz)\r\n base = timedelta_range(\"1 day\", periods=4, freq=\"h\", name=\"idx\")\r\n result = base.intersection(rng, sort=sort)\r\n if sort is None:\r\n expected = expected.sort_values()\r\n tm.assert_index_equal(result, expected)\r\n assert result.name == expected.name\r\n assert result.freq == expected.freq\r\n\r\n @pytest.mark.parametrize(\r\n \"rng, expected\",\r\n # part intersection works\r\n [\r\n (\r\n TimedeltaIndex([\"5 hour\", \"2 hour\", \"4 hour\", \"9 hour\"], name=\"idx\"),\r\n TimedeltaIndex([\"2 hour\", \"4 hour\"], name=\"idx\"),\r\n ),\r\n # reordered part intersection\r\n (\r\n TimedeltaIndex([\"2 hour\", \"5 hour\", \"5 hour\", \"1 hour\"], name=\"other\"),\r\n TimedeltaIndex([\"1 hour\", \"2 hour\"], name=None),\r\n ),\r\n # reversed index\r\n (\r\n TimedeltaIndex([\"1 hour\", \"2 hour\", \"4 hour\", \"3 hour\"], name=\"idx\")[\r\n ::-1\r\n ],\r\n TimedeltaIndex([\"1 hour\", \"2 hour\", \"4 hour\", \"3 hour\"], name=\"idx\"),\r\n ),\r\n ],\r\n )\r\n def test_intersection_non_monotonic(self, rng, expected, sort):\r\n # 24471 non-monotonic\r\n base = TimedeltaIndex([\"1 hour\", \"2 hour\", \"4 hour\", \"3 hour\"], name=\"idx\")\r\n result = base.intersection(rng, sort=sort)\r\n if sort is None:\r\n expected = expected.sort_values()\r\n tm.assert_index_equal(result, expected)\r\n assert result.name == expected.name\r\n\r\n # if reversed order, frequency is still the same\r\n if all(base == rng[::-1]) and sort is None:\r\n assert isinstance(result.freq, Hour)\r\n else:\r\n assert result.freq is None\r\n\r\n\r\nclass TestTimedeltaIndexDifference:\r\n def test_difference_freq(self, sort):\r\n # GH14323: Difference of TimedeltaIndex should not preserve frequency\r\n\r\n index = timedelta_range(\"0 days\", \"5 days\", freq=\"D\")\r\n\r\n other = timedelta_range(\"1 days\", \"4 days\", freq=\"D\")\r\n expected = TimedeltaIndex([\"0 days\", \"5 days\"], freq=None)\r\n idx_diff = index.difference(other, sort)\r\n tm.assert_index_equal(idx_diff, expected)\r\n tm.assert_attr_equal(\"freq\", idx_diff, expected)\r\n\r\n other = timedelta_range(\"2 days\", \"5 days\", freq=\"D\")\r\n idx_diff = index.difference(other, sort)\r\n expected = TimedeltaIndex([\"0 days\", \"1 days\"], freq=None)\r\n tm.assert_index_equal(idx_diff, expected)\r\n tm.assert_attr_equal(\"freq\", idx_diff, expected)\r\n\r\n def test_difference_sort(self, sort):\r\n\r\n index = pd.TimedeltaIndex(\r\n [\"5 days\", \"3 days\", \"2 days\", \"4 days\", \"1 days\", \"0 days\"]\r\n )\r\n\r\n other = timedelta_range(\"1 days\", \"4 days\", freq=\"D\")\r\n idx_diff = index.difference(other, sort)\r\n\r\n expected = TimedeltaIndex([\"5 days\", \"0 days\"], freq=None)\r\n\r\n if sort is None:\r\n expected = expected.sort_values()\r\n\r\n tm.assert_index_equal(idx_diff, expected)\r\n tm.assert_attr_equal(\"freq\", idx_diff, expected)\r\n\r\n other = timedelta_range(\"2 days\", \"5 days\", freq=\"D\")\r\n idx_diff = index.difference(other, sort)\r\n expected = TimedeltaIndex([\"1 days\", \"0 days\"], freq=None)\r\n\r\n if sort is None:\r\n expected = expected.sort_values()\r\n\r\n tm.assert_index_equal(idx_diff, expected)\r\n tm.assert_attr_equal(\"freq\", idx_diff, expected)\r\n", "\"\"\"\r\nTests for Timestamp parsing, aimed at pandas/_libs/tslibs/parsing.pyx\r\n\"\"\"\r\nfrom datetime import datetime\r\nimport re\r\n\r\nfrom dateutil.parser import parse\r\nimport numpy as np\r\nimport pytest\r\n\r\nfrom pandas._libs.tslibs import parsing\r\nfrom pandas._libs.tslibs.parsing import parse_time_string\r\nimport pandas.util._test_decorators as td\r\n\r\nimport pandas._testing as tm\r\n\r\n\r\ndef test_parse_time_string():\r\n (parsed, reso) = parse_time_string(\"4Q1984\")\r\n (parsed_lower, reso_lower) = parse_time_string(\"4q1984\")\r\n\r\n assert reso == reso_lower\r\n assert parsed == parsed_lower\r\n\r\n\r\ndef test_parse_time_string_invalid_type():\r\n # Raise on invalid input, don't just return it\r\n msg = \"Argument 'arg' has incorrect type (expected str, got tuple)\"\r\n with pytest.raises(TypeError, match=re.escape(msg)):\r\n parse_time_string((4, 5))\r\n\r\n\r\[email protected](\r\n \"dashed,normal\", [(\"1988-Q2\", \"1988Q2\"), (\"2Q-1988\", \"2Q1988\")]\r\n)\r\ndef test_parse_time_quarter_with_dash(dashed, normal):\r\n # see gh-9688\r\n (parsed_dash, reso_dash) = parse_time_string(dashed)\r\n (parsed, reso) = parse_time_string(normal)\r\n\r\n assert parsed_dash == parsed\r\n assert reso_dash == reso\r\n\r\n\r\[email protected](\"dashed\", [\"-2Q1992\", \"2-Q1992\", \"4-4Q1992\"])\r\ndef test_parse_time_quarter_with_dash_error(dashed):\r\n msg = f\"Unknown datetime string format, unable to parse: {dashed}\"\r\n\r\n with pytest.raises(parsing.DateParseError, match=msg):\r\n parse_time_string(dashed)\r\n\r\n\r\[email protected](\r\n \"date_string,expected\",\r\n [\r\n (\"123.1234\", False),\r\n (\"-50000\", False),\r\n (\"999\", False),\r\n (\"m\", False),\r\n (\"T\", False),\r\n (\"Mon Sep 16, 2013\", True),\r\n (\"2012-01-01\", True),\r\n (\"01/01/2012\", True),\r\n (\"01012012\", True),\r\n (\"0101\", True),\r\n (\"1-1\", True),\r\n ],\r\n)\r\ndef test_does_not_convert_mixed_integer(date_string, expected):\r\n assert parsing._does_string_look_like_datetime(date_string) is expected\r\n\r\n\r\[email protected](\r\n \"date_str,kwargs,msg\",\r\n [\r\n (\r\n \"2013Q5\",\r\n dict(),\r\n (\r\n \"Incorrect quarterly string is given, \"\r\n \"quarter must be between 1 and 4: 2013Q5\"\r\n ),\r\n ),\r\n # see gh-5418\r\n (\r\n \"2013Q1\",\r\n dict(freq=\"INVLD-L-DEC-SAT\"),\r\n (\r\n \"Unable to retrieve month information \"\r\n \"from given freq: INVLD-L-DEC-SAT\"\r\n ),\r\n ),\r\n ],\r\n)\r\ndef test_parsers_quarterly_with_freq_error(date_str, kwargs, msg):\r\n with pytest.raises(parsing.DateParseError, match=msg):\r\n parsing.parse_time_string(date_str, **kwargs)\r\n\r\n\r\[email protected](\r\n \"date_str,freq,expected\",\r\n [\r\n (\"2013Q2\", None, datetime(2013, 4, 1)),\r\n (\"2013Q2\", \"A-APR\", datetime(2012, 8, 1)),\r\n (\"2013-Q2\", \"A-DEC\", datetime(2013, 4, 1)),\r\n ],\r\n)\r\ndef test_parsers_quarterly_with_freq(date_str, freq, expected):\r\n result, _ = parsing.parse_time_string(date_str, freq=freq)\r\n assert result == expected\r\n\r\n\r\[email protected](\r\n \"date_str\", [\"2Q 2005\", \"2Q-200A\", \"2Q-200\", \"22Q2005\", \"2Q200.\", \"6Q-20\"]\r\n)\r\ndef test_parsers_quarter_invalid(date_str):\r\n if date_str == \"6Q-20\":\r\n msg = (\r\n \"Incorrect quarterly string is given, quarter \"\r\n f\"must be between 1 and 4: {date_str}\"\r\n )\r\n else:\r\n msg = f\"Unknown datetime string format, unable to parse: {date_str}\"\r\n\r\n with pytest.raises(ValueError, match=msg):\r\n parsing.parse_time_string(date_str)\r\n\r\n\r\[email protected](\r\n \"date_str,expected\",\r\n [(\"201101\", datetime(2011, 1, 1, 0, 0)), (\"200005\", datetime(2000, 5, 1, 0, 0))],\r\n)\r\ndef test_parsers_month_freq(date_str, expected):\r\n result, _ = parsing.parse_time_string(date_str, freq=\"M\")\r\n assert result == expected\r\n\r\n\r\[email protected]_if_not_us_locale\r\[email protected](\r\n \"string,fmt\",\r\n [\r\n (\"20111230\", \"%Y%m%d\"),\r\n (\"2011-12-30\", \"%Y-%m-%d\"),\r\n (\"30-12-2011\", \"%d-%m-%Y\"),\r\n (\"2011-12-30 00:00:00\", \"%Y-%m-%d %H:%M:%S\"),\r\n (\"2011-12-30T00:00:00\", \"%Y-%m-%dT%H:%M:%S\"),\r\n (\"2011-12-30 00:00:00.000000\", \"%Y-%m-%d %H:%M:%S.%f\"),\r\n ],\r\n)\r\ndef test_guess_datetime_format_with_parseable_formats(string, fmt):\r\n result = parsing._guess_datetime_format(string)\r\n assert result == fmt\r\n\r\n\r\[email protected](\"dayfirst,expected\", [(True, \"%d/%m/%Y\"), (False, \"%m/%d/%Y\")])\r\ndef test_guess_datetime_format_with_dayfirst(dayfirst, expected):\r\n ambiguous_string = \"01/01/2011\"\r\n result = parsing._guess_datetime_format(ambiguous_string, dayfirst=dayfirst)\r\n assert result == expected\r\n\r\n\r\[email protected]_if_has_locale\r\[email protected](\r\n \"string,fmt\",\r\n [\r\n (\"30/Dec/2011\", \"%d/%b/%Y\"),\r\n (\"30/December/2011\", \"%d/%B/%Y\"),\r\n (\"30/Dec/2011 00:00:00\", \"%d/%b/%Y %H:%M:%S\"),\r\n ],\r\n)\r\ndef test_guess_datetime_format_with_locale_specific_formats(string, fmt):\r\n result = parsing._guess_datetime_format(string)\r\n assert result == fmt\r\n\r\n\r\[email protected](\r\n \"invalid_dt\",\r\n [\r\n \"2013\",\r\n \"01/2013\",\r\n \"12:00:00\",\r\n \"1/1/1/1\",\r\n \"this_is_not_a_datetime\",\r\n \"51a\",\r\n 9,\r\n datetime(2011, 1, 1),\r\n ],\r\n)\r\ndef test_guess_datetime_format_invalid_inputs(invalid_dt):\r\n # A datetime string must include a year, month and a day for it to be\r\n # guessable, in addition to being a string that looks like a datetime.\r\n assert parsing._guess_datetime_format(invalid_dt) is None\r\n\r\n\r\[email protected](\r\n \"string,fmt\",\r\n [\r\n (\"2011-1-1\", \"%Y-%m-%d\"),\r\n (\"1/1/2011\", \"%m/%d/%Y\"),\r\n (\"30-1-2011\", \"%d-%m-%Y\"),\r\n (\"2011-1-1 0:0:0\", \"%Y-%m-%d %H:%M:%S\"),\r\n (\"2011-1-3T00:00:0\", \"%Y-%m-%dT%H:%M:%S\"),\r\n (\"2011-1-1 00:00:00\", \"%Y-%m-%d %H:%M:%S\"),\r\n ],\r\n)\r\ndef test_guess_datetime_format_no_padding(string, fmt):\r\n # see gh-11142\r\n result = parsing._guess_datetime_format(string)\r\n assert result == fmt\r\n\r\n\r\ndef test_try_parse_dates():\r\n arr = np.array([\"5/1/2000\", \"6/1/2000\", \"7/1/2000\"], dtype=object)\r\n result = parsing.try_parse_dates(arr, dayfirst=True)\r\n\r\n expected = np.array([parse(d, dayfirst=True) for d in arr])\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n\r\ndef test_parse_time_string_check_instance_type_raise_exception():\r\n # issue 20684\r\n msg = \"Argument 'arg' has incorrect type (expected str, got tuple)\"\r\n with pytest.raises(TypeError, match=re.escape(msg)):\r\n parse_time_string((1, 2, 3))\r\n\r\n result = parse_time_string(\"2019\")\r\n expected = (datetime(2019, 1, 1), \"year\")\r\n assert result == expected\r\n", "\"\"\"Tests for Interval-Interval operations, such as overlaps, contains, etc.\"\"\"\r\nimport pytest\r\n\r\nfrom pandas import Interval, Timedelta, Timestamp\r\n\r\n\r\[email protected](\r\n params=[\r\n (Timedelta(\"0 days\"), Timedelta(\"1 day\")),\r\n (Timestamp(\"2018-01-01\"), Timedelta(\"1 day\")),\r\n (0, 1),\r\n ],\r\n ids=lambda x: type(x[0]).__name__,\r\n)\r\ndef start_shift(request):\r\n \"\"\"\r\n Fixture for generating intervals of types from a start value and a shift\r\n value that can be added to start to generate an endpoint\r\n \"\"\"\r\n return request.param\r\n\r\n\r\nclass TestOverlaps:\r\n def test_overlaps_self(self, start_shift, closed):\r\n start, shift = start_shift\r\n interval = Interval(start, start + shift, closed)\r\n assert interval.overlaps(interval)\r\n\r\n def test_overlaps_nested(self, start_shift, closed, other_closed):\r\n start, shift = start_shift\r\n interval1 = Interval(start, start + 3 * shift, other_closed)\r\n interval2 = Interval(start + shift, start + 2 * shift, closed)\r\n\r\n # nested intervals should always overlap\r\n assert interval1.overlaps(interval2)\r\n\r\n def test_overlaps_disjoint(self, start_shift, closed, other_closed):\r\n start, shift = start_shift\r\n interval1 = Interval(start, start + shift, other_closed)\r\n interval2 = Interval(start + 2 * shift, start + 3 * shift, closed)\r\n\r\n # disjoint intervals should never overlap\r\n assert not interval1.overlaps(interval2)\r\n\r\n def test_overlaps_endpoint(self, start_shift, closed, other_closed):\r\n start, shift = start_shift\r\n interval1 = Interval(start, start + shift, other_closed)\r\n interval2 = Interval(start + shift, start + 2 * shift, closed)\r\n\r\n # overlap if shared endpoint is closed for both (overlap at a point)\r\n result = interval1.overlaps(interval2)\r\n expected = interval1.closed_right and interval2.closed_left\r\n assert result == expected\r\n\r\n @pytest.mark.parametrize(\r\n \"other\",\r\n [10, True, \"foo\", Timedelta(\"1 day\"), Timestamp(\"2018-01-01\")],\r\n ids=lambda x: type(x).__name__,\r\n )\r\n def test_overlaps_invalid_type(self, other):\r\n interval = Interval(0, 1)\r\n msg = f\"`other` must be an Interval, got {type(other).__name__}\"\r\n with pytest.raises(TypeError, match=msg):\r\n interval.overlaps(other)\r\n", "\"\"\"\r\nThe :mod:`sklearn.model_selection._validation` module includes classes and\r\nfunctions to validate the model.\r\n\"\"\"\r\n\r\n# Author: Alexandre Gramfort <[email protected]>\r\n# Gael Varoquaux <[email protected]>\r\n# Olivier Grisel <[email protected]>\r\n# Raghav RV <[email protected]>\r\n# License: BSD 3 clause\r\n\r\n\r\nimport warnings\r\nimport numbers\r\nimport time\r\nfrom traceback import format_exc\r\nfrom contextlib import suppress\r\n\r\nimport numpy as np\r\nimport scipy.sparse as sp\r\nfrom joblib import Parallel, logger\r\n\r\nfrom ..base import is_classifier, clone\r\nfrom ..utils import indexable, check_random_state, _safe_indexing\r\nfrom ..utils.validation import _check_fit_params\r\nfrom ..utils.validation import _num_samples\r\nfrom ..utils.validation import _deprecate_positional_args\r\nfrom ..utils.fixes import delayed\r\nfrom ..utils.metaestimators import _safe_split\r\nfrom ..metrics import check_scoring\r\nfrom ..metrics._scorer import _check_multimetric_scoring, _MultimetricScorer\r\nfrom ..exceptions import FitFailedWarning, NotFittedError\r\nfrom ._split import check_cv\r\nfrom ..preprocessing import LabelEncoder\r\n\r\n\r\n__all__ = ['cross_validate', 'cross_val_score', 'cross_val_predict',\r\n 'permutation_test_score', 'learning_curve', 'validation_curve']\r\n\r\n\r\n@_deprecate_positional_args\r\ndef cross_validate(estimator, X, y=None, *, groups=None, scoring=None, cv=None,\r\n n_jobs=None, verbose=0, fit_params=None,\r\n pre_dispatch='2*n_jobs', return_train_score=False,\r\n return_estimator=False, error_score=np.nan):\r\n \"\"\"Evaluate metric(s) by cross-validation and also record fit/score times.\r\n\r\n Read more in the :ref:`User Guide <multimetric_cross_validation>`.\r\n\r\n Parameters\r\n ----------\r\n estimator : estimator object implementing 'fit'\r\n The object to use to fit the data.\r\n\r\n X : array-like of shape (n_samples, n_features)\r\n The data to fit. Can be for example a list, or an array.\r\n\r\n y : array-like of shape (n_samples,) or (n_samples, n_outputs), \\\r\n default=None\r\n The target variable to try to predict in the case of\r\n supervised learning.\r\n\r\n groups : array-like of shape (n_samples,), default=None\r\n Group labels for the samples used while splitting the dataset into\r\n train/test set. Only used in conjunction with a \"Group\" :term:`cv`\r\n instance (e.g., :class:`GroupKFold`).\r\n\r\n scoring : str, callable, list/tuple, or dict, default=None\r\n A single str (see :ref:`scoring_parameter`) or a callable\r\n (see :ref:`scoring`) to evaluate the predictions on the test set.\r\n\r\n For evaluating multiple metrics, either give a list of (unique) strings\r\n or a dict with names as keys and callables as values.\r\n\r\n NOTE that when using custom scorers, each scorer should return a single\r\n value. Metric functions returning a list/array of values can be wrapped\r\n into multiple scorers that return one value each.\r\n\r\n See :ref:`multimetric_grid_search` for an example.\r\n\r\n If None, the estimator's score method is used.\r\n\r\n cv : int, cross-validation generator or an iterable, default=None\r\n Determines the cross-validation splitting strategy.\r\n Possible inputs for cv are:\r\n\r\n - None, to use the default 5-fold cross validation,\r\n - int, to specify the number of folds in a `(Stratified)KFold`,\r\n - :term:`CV splitter`,\r\n - An iterable yielding (train, test) splits as arrays of indices.\r\n\r\n For int/None inputs, if the estimator is a classifier and ``y`` is\r\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\r\n other cases, :class:`KFold` is used.\r\n\r\n Refer :ref:`User Guide <cross_validation>` for the various\r\n cross-validation strategies that can be used here.\r\n\r\n .. versionchanged:: 0.22\r\n ``cv`` default value if None changed from 3-fold to 5-fold.\r\n\r\n n_jobs : int, default=None\r\n Number of jobs to run in parallel. Training the estimator and computing\r\n the score are parallelized over the cross-validation splits.\r\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\r\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\r\n for more details.\r\n\r\n verbose : int, default=0\r\n The verbosity level.\r\n\r\n fit_params : dict, default=None\r\n Parameters to pass to the fit method of the estimator.\r\n\r\n pre_dispatch : int or str, default='2*n_jobs'\r\n Controls the number of jobs that get dispatched during parallel\r\n execution. Reducing this number can be useful to avoid an\r\n explosion of memory consumption when more jobs get dispatched\r\n than CPUs can process. This parameter can be:\r\n\r\n - None, in which case all the jobs are immediately\r\n created and spawned. Use this for lightweight and\r\n fast-running jobs, to avoid delays due to on-demand\r\n spawning of the jobs\r\n\r\n - An int, giving the exact number of total jobs that are\r\n spawned\r\n\r\n - A str, giving an expression as a function of n_jobs,\r\n as in '2*n_jobs'\r\n\r\n return_train_score : bool, default=False\r\n Whether to include train scores.\r\n Computing training scores is used to get insights on how different\r\n parameter settings impact the overfitting/underfitting trade-off.\r\n However computing the scores on the training set can be computationally\r\n expensive and is not strictly required to select the parameters that\r\n yield the best generalization performance.\r\n\r\n .. versionadded:: 0.19\r\n\r\n .. versionchanged:: 0.21\r\n Default value was changed from ``True`` to ``False``\r\n\r\n return_estimator : bool, default=False\r\n Whether to return the estimators fitted on each split.\r\n\r\n .. versionadded:: 0.20\r\n\r\n error_score : 'raise' or numeric, default=np.nan\r\n Value to assign to the score if an error occurs in estimator fitting.\r\n If set to 'raise', the error is raised.\r\n If a numeric value is given, FitFailedWarning is raised.\r\n\r\n .. versionadded:: 0.20\r\n\r\n Returns\r\n -------\r\n scores : dict of float arrays of shape (n_splits,)\r\n Array of scores of the estimator for each run of the cross validation.\r\n\r\n A dict of arrays containing the score/time arrays for each scorer is\r\n returned. The possible keys for this ``dict`` are:\r\n\r\n ``test_score``\r\n The score array for test scores on each cv split.\r\n Suffix ``_score`` in ``test_score`` changes to a specific\r\n metric like ``test_r2`` or ``test_auc`` if there are\r\n multiple scoring metrics in the scoring parameter.\r\n ``train_score``\r\n The score array for train scores on each cv split.\r\n Suffix ``_score`` in ``train_score`` changes to a specific\r\n metric like ``train_r2`` or ``train_auc`` if there are\r\n multiple scoring metrics in the scoring parameter.\r\n This is available only if ``return_train_score`` parameter\r\n is ``True``.\r\n ``fit_time``\r\n The time for fitting the estimator on the train\r\n set for each cv split.\r\n ``score_time``\r\n The time for scoring the estimator on the test set for each\r\n cv split. (Note time for scoring on the train set is not\r\n included even if ``return_train_score`` is set to ``True``\r\n ``estimator``\r\n The estimator objects for each cv split.\r\n This is available only if ``return_estimator`` parameter\r\n is set to ``True``.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn import datasets, linear_model\r\n >>> from sklearn.model_selection import cross_validate\r\n >>> from sklearn.metrics import make_scorer\r\n >>> from sklearn.metrics import confusion_matrix\r\n >>> from sklearn.svm import LinearSVC\r\n >>> diabetes = datasets.load_diabetes()\r\n >>> X = diabetes.data[:150]\r\n >>> y = diabetes.target[:150]\r\n >>> lasso = linear_model.Lasso()\r\n\r\n Single metric evaluation using ``cross_validate``\r\n\r\n >>> cv_results = cross_validate(lasso, X, y, cv=3)\r\n >>> sorted(cv_results.keys())\r\n ['fit_time', 'score_time', 'test_score']\r\n >>> cv_results['test_score']\r\n array([0.33150734, 0.08022311, 0.03531764])\r\n\r\n Multiple metric evaluation using ``cross_validate``\r\n (please refer the ``scoring`` parameter doc for more information)\r\n\r\n >>> scores = cross_validate(lasso, X, y, cv=3,\r\n ... scoring=('r2', 'neg_mean_squared_error'),\r\n ... return_train_score=True)\r\n >>> print(scores['test_neg_mean_squared_error'])\r\n [-3635.5... -3573.3... -6114.7...]\r\n >>> print(scores['train_r2'])\r\n [0.28010158 0.39088426 0.22784852]\r\n\r\n See Also\r\n ---------\r\n cross_val_score : Run cross-validation for single metric evaluation.\r\n\r\n cross_val_predict : Get predictions from each split of cross-validation for\r\n diagnostic purposes.\r\n\r\n sklearn.metrics.make_scorer : Make a scorer from a performance metric or\r\n loss function.\r\n\r\n \"\"\"\r\n X, y, groups = indexable(X, y, groups)\r\n\r\n cv = check_cv(cv, y, classifier=is_classifier(estimator))\r\n\r\n if callable(scoring):\r\n scorers = scoring\r\n elif scoring is None or isinstance(scoring, str):\r\n scorers = check_scoring(estimator, scoring)\r\n else:\r\n scorers = _check_multimetric_scoring(estimator, scoring)\r\n\r\n # We clone the estimator to make sure that all the folds are\r\n # independent, and that it is pickle-able.\r\n parallel = Parallel(n_jobs=n_jobs, verbose=verbose,\r\n pre_dispatch=pre_dispatch)\r\n results = parallel(\r\n delayed(_fit_and_score)(\r\n clone(estimator), X, y, scorers, train, test, verbose, None,\r\n fit_params, return_train_score=return_train_score,\r\n return_times=True, return_estimator=return_estimator,\r\n error_score=error_score)\r\n for train, test in cv.split(X, y, groups))\r\n\r\n # For callabe scoring, the return type is only know after calling. If the\r\n # return type is a dictionary, the error scores can now be inserted with\r\n # the correct key.\r\n if callable(scoring):\r\n _insert_error_scores(results, error_score)\r\n\r\n results = _aggregate_score_dicts(results)\r\n\r\n ret = {}\r\n ret['fit_time'] = results[\"fit_time\"]\r\n ret['score_time'] = results[\"score_time\"]\r\n\r\n if return_estimator:\r\n ret['estimator'] = results[\"estimator\"]\r\n\r\n test_scores_dict = _normalize_score_results(results[\"test_scores\"])\r\n if return_train_score:\r\n train_scores_dict = _normalize_score_results(results[\"train_scores\"])\r\n\r\n for name in test_scores_dict:\r\n ret['test_%s' % name] = test_scores_dict[name]\r\n if return_train_score:\r\n key = 'train_%s' % name\r\n ret[key] = train_scores_dict[name]\r\n\r\n return ret\r\n\r\n\r\ndef _insert_error_scores(results, error_score):\r\n \"\"\"Insert error in `results` by replacing them inplace with `error_score`.\r\n\r\n This only applies to multimetric scores because `_fit_and_score` will\r\n handle the single metric case.\r\n \"\"\"\r\n successful_score = None\r\n failed_indices = []\r\n for i, result in enumerate(results):\r\n if result[\"fit_failed\"]:\r\n failed_indices.append(i)\r\n elif successful_score is None:\r\n successful_score = result[\"test_scores\"]\r\n\r\n if successful_score is None:\r\n raise NotFittedError(\"All estimators failed to fit\")\r\n\r\n if isinstance(successful_score, dict):\r\n formatted_error = {name: error_score for name in successful_score}\r\n for i in failed_indices:\r\n results[i][\"test_scores\"] = formatted_error.copy()\r\n if \"train_scores\" in results[i]:\r\n results[i][\"train_scores\"] = formatted_error.copy()\r\n\r\n\r\ndef _normalize_score_results(scores, scaler_score_key='score'):\r\n \"\"\"Creates a scoring dictionary based on the type of `scores`\"\"\"\r\n if isinstance(scores[0], dict):\r\n # multimetric scoring\r\n return _aggregate_score_dicts(scores)\r\n # scaler\r\n return {scaler_score_key: scores}\r\n\r\n\r\n@_deprecate_positional_args\r\ndef cross_val_score(estimator, X, y=None, *, groups=None, scoring=None,\r\n cv=None, n_jobs=None, verbose=0, fit_params=None,\r\n pre_dispatch='2*n_jobs', error_score=np.nan):\r\n \"\"\"Evaluate a score by cross-validation\r\n\r\n Read more in the :ref:`User Guide <cross_validation>`.\r\n\r\n Parameters\r\n ----------\r\n estimator : estimator object implementing 'fit'\r\n The object to use to fit the data.\r\n\r\n X : array-like of shape (n_samples, n_features)\r\n The data to fit. Can be for example a list, or an array.\r\n\r\n y : array-like of shape (n_samples,) or (n_samples, n_outputs), \\\r\n default=None\r\n The target variable to try to predict in the case of\r\n supervised learning.\r\n\r\n groups : array-like of shape (n_samples,), default=None\r\n Group labels for the samples used while splitting the dataset into\r\n train/test set. Only used in conjunction with a \"Group\" :term:`cv`\r\n instance (e.g., :class:`GroupKFold`).\r\n\r\n scoring : str or callable, default=None\r\n A str (see model evaluation documentation) or\r\n a scorer callable object / function with signature\r\n ``scorer(estimator, X, y)`` which should return only\r\n a single value.\r\n\r\n Similar to :func:`cross_validate`\r\n but only a single metric is permitted.\r\n\r\n If None, the estimator's default scorer (if available) is used.\r\n\r\n cv : int, cross-validation generator or an iterable, default=None\r\n Determines the cross-validation splitting strategy.\r\n Possible inputs for cv are:\r\n\r\n - None, to use the default 5-fold cross validation,\r\n - int, to specify the number of folds in a `(Stratified)KFold`,\r\n - :term:`CV splitter`,\r\n - An iterable yielding (train, test) splits as arrays of indices.\r\n\r\n For int/None inputs, if the estimator is a classifier and ``y`` is\r\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\r\n other cases, :class:`KFold` is used.\r\n\r\n Refer :ref:`User Guide <cross_validation>` for the various\r\n cross-validation strategies that can be used here.\r\n\r\n .. versionchanged:: 0.22\r\n ``cv`` default value if None changed from 3-fold to 5-fold.\r\n\r\n n_jobs : int, default=None\r\n Number of jobs to run in parallel. Training the estimator and computing\r\n the score are parallelized over the cross-validation splits.\r\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\r\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\r\n for more details.\r\n\r\n verbose : int, default=0\r\n The verbosity level.\r\n\r\n fit_params : dict, default=None\r\n Parameters to pass to the fit method of the estimator.\r\n\r\n pre_dispatch : int or str, default='2*n_jobs'\r\n Controls the number of jobs that get dispatched during parallel\r\n execution. Reducing this number can be useful to avoid an\r\n explosion of memory consumption when more jobs get dispatched\r\n than CPUs can process. This parameter can be:\r\n\r\n - None, in which case all the jobs are immediately\r\n created and spawned. Use this for lightweight and\r\n fast-running jobs, to avoid delays due to on-demand\r\n spawning of the jobs\r\n\r\n - An int, giving the exact number of total jobs that are\r\n spawned\r\n\r\n - A str, giving an expression as a function of n_jobs,\r\n as in '2*n_jobs'\r\n\r\n error_score : 'raise' or numeric, default=np.nan\r\n Value to assign to the score if an error occurs in estimator fitting.\r\n If set to 'raise', the error is raised.\r\n If a numeric value is given, FitFailedWarning is raised.\r\n\r\n .. versionadded:: 0.20\r\n\r\n Returns\r\n -------\r\n scores : ndarray of float of shape=(len(list(cv)),)\r\n Array of scores of the estimator for each run of the cross validation.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn import datasets, linear_model\r\n >>> from sklearn.model_selection import cross_val_score\r\n >>> diabetes = datasets.load_diabetes()\r\n >>> X = diabetes.data[:150]\r\n >>> y = diabetes.target[:150]\r\n >>> lasso = linear_model.Lasso()\r\n >>> print(cross_val_score(lasso, X, y, cv=3))\r\n [0.33150734 0.08022311 0.03531764]\r\n\r\n See Also\r\n ---------\r\n cross_validate : To run cross-validation on multiple metrics and also to\r\n return train scores, fit times and score times.\r\n\r\n cross_val_predict : Get predictions from each split of cross-validation for\r\n diagnostic purposes.\r\n\r\n sklearn.metrics.make_scorer : Make a scorer from a performance metric or\r\n loss function.\r\n\r\n \"\"\"\r\n # To ensure multimetric format is not supported\r\n scorer = check_scoring(estimator, scoring=scoring)\r\n\r\n cv_results = cross_validate(estimator=estimator, X=X, y=y, groups=groups,\r\n scoring={'score': scorer}, cv=cv,\r\n n_jobs=n_jobs, verbose=verbose,\r\n fit_params=fit_params,\r\n pre_dispatch=pre_dispatch,\r\n error_score=error_score)\r\n return cv_results['test_score']\r\n\r\n\r\ndef _fit_and_score(estimator, X, y, scorer, train, test, verbose,\r\n parameters, fit_params, return_train_score=False,\r\n return_parameters=False, return_n_test_samples=False,\r\n return_times=False, return_estimator=False,\r\n split_progress=None, candidate_progress=None,\r\n error_score=np.nan):\r\n\r\n \"\"\"Fit estimator and compute scores for a given dataset split.\r\n\r\n Parameters\r\n ----------\r\n estimator : estimator object implementing 'fit'\r\n The object to use to fit the data.\r\n\r\n X : array-like of shape (n_samples, n_features)\r\n The data to fit.\r\n\r\n y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None\r\n The target variable to try to predict in the case of\r\n supervised learning.\r\n\r\n scorer : A single callable or dict mapping scorer name to the callable\r\n If it is a single callable, the return value for ``train_scores`` and\r\n ``test_scores`` is a single float.\r\n\r\n For a dict, it should be one mapping the scorer name to the scorer\r\n callable object / function.\r\n\r\n The callable object / fn should have signature\r\n ``scorer(estimator, X, y)``.\r\n\r\n train : array-like of shape (n_train_samples,)\r\n Indices of training samples.\r\n\r\n test : array-like of shape (n_test_samples,)\r\n Indices of test samples.\r\n\r\n verbose : int\r\n The verbosity level.\r\n\r\n error_score : 'raise' or numeric, default=np.nan\r\n Value to assign to the score if an error occurs in estimator fitting.\r\n If set to 'raise', the error is raised.\r\n If a numeric value is given, FitFailedWarning is raised.\r\n\r\n parameters : dict or None\r\n Parameters to be set on the estimator.\r\n\r\n fit_params : dict or None\r\n Parameters that will be passed to ``estimator.fit``.\r\n\r\n return_train_score : bool, default=False\r\n Compute and return score on training set.\r\n\r\n return_parameters : bool, default=False\r\n Return parameters that has been used for the estimator.\r\n\r\n split_progress : {list, tuple} of int, default=None\r\n A list or tuple of format (<current_split_id>, <total_num_of_splits>).\r\n\r\n candidate_progress : {list, tuple} of int, default=None\r\n A list or tuple of format\r\n (<current_candidate_id>, <total_number_of_candidates>).\r\n\r\n return_n_test_samples : bool, default=False\r\n Whether to return the ``n_test_samples``.\r\n\r\n return_times : bool, default=False\r\n Whether to return the fit/score times.\r\n\r\n return_estimator : bool, default=False\r\n Whether to return the fitted estimator.\r\n\r\n Returns\r\n -------\r\n result : dict with the following attributes\r\n train_scores : dict of scorer name -> float\r\n Score on training set (for all the scorers),\r\n returned only if `return_train_score` is `True`.\r\n test_scores : dict of scorer name -> float\r\n Score on testing set (for all the scorers).\r\n n_test_samples : int\r\n Number of test samples.\r\n fit_time : float\r\n Time spent for fitting in seconds.\r\n score_time : float\r\n Time spent for scoring in seconds.\r\n parameters : dict or None\r\n The parameters that have been evaluated.\r\n estimator : estimator object\r\n The fitted estimator.\r\n fit_failed : bool\r\n The estimator failed to fit.\r\n \"\"\"\r\n if not isinstance(error_score, numbers.Number) and error_score != 'raise':\r\n raise ValueError(\r\n \"error_score must be the string 'raise' or a numeric value. \"\r\n \"(Hint: if using 'raise', please make sure that it has been \"\r\n \"spelled correctly.)\"\r\n )\r\n\r\n progress_msg = \"\"\r\n if verbose > 2:\r\n if split_progress is not None:\r\n progress_msg = f\" {split_progress[0]+1}/{split_progress[1]}\"\r\n if candidate_progress and verbose > 9:\r\n progress_msg += (f\"; {candidate_progress[0]+1}/\"\r\n f\"{candidate_progress[1]}\")\r\n\r\n if verbose > 1:\r\n if parameters is None:\r\n params_msg = ''\r\n else:\r\n sorted_keys = sorted(parameters) # Ensure deterministic o/p\r\n params_msg = (', '.join(f'{k}={parameters[k]}'\r\n for k in sorted_keys))\r\n if verbose > 9:\r\n start_msg = f\"[CV{progress_msg}] START {params_msg}\"\r\n print(f\"{start_msg}{(80 - len(start_msg)) * '.'}\")\r\n\r\n # Adjust length of sample weights\r\n fit_params = fit_params if fit_params is not None else {}\r\n fit_params = _check_fit_params(X, fit_params, train)\r\n\r\n if parameters is not None:\r\n # clone after setting parameters in case any parameters\r\n # are estimators (like pipeline steps)\r\n # because pipeline doesn't clone steps in fit\r\n cloned_parameters = {}\r\n for k, v in parameters.items():\r\n cloned_parameters[k] = clone(v, safe=False)\r\n\r\n estimator = estimator.set_params(**cloned_parameters)\r\n\r\n start_time = time.time()\r\n\r\n X_train, y_train = _safe_split(estimator, X, y, train)\r\n X_test, y_test = _safe_split(estimator, X, y, test, train)\r\n\r\n result = {}\r\n try:\r\n if y_train is None:\r\n estimator.fit(X_train, **fit_params)\r\n else:\r\n estimator.fit(X_train, y_train, **fit_params)\r\n\r\n except Exception as e:\r\n # Note fit time as time until error\r\n fit_time = time.time() - start_time\r\n score_time = 0.0\r\n if error_score == 'raise':\r\n raise\r\n elif isinstance(error_score, numbers.Number):\r\n if isinstance(scorer, dict):\r\n test_scores = {name: error_score for name in scorer}\r\n if return_train_score:\r\n train_scores = test_scores.copy()\r\n else:\r\n test_scores = error_score\r\n if return_train_score:\r\n train_scores = error_score\r\n warnings.warn(\"Estimator fit failed. The score on this train-test\"\r\n \" partition for these parameters will be set to %f. \"\r\n \"Details: \\n%s\" %\r\n (error_score, format_exc()),\r\n FitFailedWarning)\r\n result[\"fit_failed\"] = True\r\n else:\r\n result[\"fit_failed\"] = False\r\n\r\n fit_time = time.time() - start_time\r\n test_scores = _score(estimator, X_test, y_test, scorer, error_score)\r\n score_time = time.time() - start_time - fit_time\r\n if return_train_score:\r\n train_scores = _score(\r\n estimator, X_train, y_train, scorer, error_score\r\n )\r\n\r\n if verbose > 1:\r\n total_time = score_time + fit_time\r\n end_msg = f\"[CV{progress_msg}] END \"\r\n result_msg = params_msg + (\";\" if params_msg else \"\")\r\n if verbose > 2 and isinstance(test_scores, dict):\r\n for scorer_name in sorted(test_scores):\r\n result_msg += f\" {scorer_name}: (\"\r\n if return_train_score:\r\n scorer_scores = train_scores[scorer_name]\r\n result_msg += f\"train={scorer_scores:.3f}, \"\r\n result_msg += f\"test={test_scores[scorer_name]:.3f})\"\r\n result_msg += f\" total time={logger.short_format_time(total_time)}\"\r\n\r\n # Right align the result_msg\r\n end_msg += \".\" * (80 - len(end_msg) - len(result_msg))\r\n end_msg += result_msg\r\n print(end_msg)\r\n\r\n result[\"test_scores\"] = test_scores\r\n if return_train_score:\r\n result[\"train_scores\"] = train_scores\r\n if return_n_test_samples:\r\n result[\"n_test_samples\"] = _num_samples(X_test)\r\n if return_times:\r\n result[\"fit_time\"] = fit_time\r\n result[\"score_time\"] = score_time\r\n if return_parameters:\r\n result[\"parameters\"] = parameters\r\n if return_estimator:\r\n result[\"estimator\"] = estimator\r\n return result\r\n\r\n\r\ndef _score(estimator, X_test, y_test, scorer, error_score=\"raise\"):\r\n \"\"\"Compute the score(s) of an estimator on a given test set.\r\n\r\n Will return a dict of floats if `scorer` is a dict, otherwise a single\r\n float is returned.\r\n \"\"\"\r\n if isinstance(scorer, dict):\r\n # will cache method calls if needed. scorer() returns a dict\r\n scorer = _MultimetricScorer(**scorer)\r\n\r\n try:\r\n if y_test is None:\r\n scores = scorer(estimator, X_test)\r\n else:\r\n scores = scorer(estimator, X_test, y_test)\r\n except Exception:\r\n if error_score == 'raise':\r\n raise\r\n else:\r\n if isinstance(scorer, _MultimetricScorer):\r\n scores = {name: error_score for name in scorer._scorers}\r\n else:\r\n scores = error_score\r\n warnings.warn(\r\n f\"Scoring failed. The score on this train-test partition for \"\r\n f\"these parameters will be set to {error_score}. Details: \\n\"\r\n f\"{format_exc()}\",\r\n UserWarning,\r\n )\r\n\r\n error_msg = (\r\n \"scoring must return a number, got %s (%s) instead. (scorer=%s)\"\r\n )\r\n if isinstance(scores, dict):\r\n for name, score in scores.items():\r\n if hasattr(score, 'item'):\r\n with suppress(ValueError):\r\n # e.g. unwrap memmapped scalars\r\n score = score.item()\r\n if not isinstance(score, numbers.Number):\r\n raise ValueError(error_msg % (score, type(score), name))\r\n scores[name] = score\r\n else: # scalar\r\n if hasattr(scores, 'item'):\r\n with suppress(ValueError):\r\n # e.g. unwrap memmapped scalars\r\n scores = scores.item()\r\n if not isinstance(scores, numbers.Number):\r\n raise ValueError(error_msg % (scores, type(scores), scorer))\r\n return scores\r\n\r\n\r\n@_deprecate_positional_args\r\ndef cross_val_predict(estimator, X, y=None, *, groups=None, cv=None,\r\n n_jobs=None, verbose=0, fit_params=None,\r\n pre_dispatch='2*n_jobs', method='predict'):\r\n \"\"\"Generate cross-validated estimates for each input data point\r\n\r\n The data is split according to the cv parameter. Each sample belongs\r\n to exactly one test set, and its prediction is computed with an\r\n estimator fitted on the corresponding training set.\r\n\r\n Passing these predictions into an evaluation metric may not be a valid\r\n way to measure generalization performance. Results can differ from\r\n :func:`cross_validate` and :func:`cross_val_score` unless all tests sets\r\n have equal size and the metric decomposes over samples.\r\n\r\n Read more in the :ref:`User Guide <cross_validation>`.\r\n\r\n Parameters\r\n ----------\r\n estimator : estimator object implementing 'fit' and 'predict'\r\n The object to use to fit the data.\r\n\r\n X : array-like of shape (n_samples, n_features)\r\n The data to fit. Can be, for example a list, or an array at least 2d.\r\n\r\n y : array-like of shape (n_samples,) or (n_samples, n_outputs), \\\r\n default=None\r\n The target variable to try to predict in the case of\r\n supervised learning.\r\n\r\n groups : array-like of shape (n_samples,), default=None\r\n Group labels for the samples used while splitting the dataset into\r\n train/test set. Only used in conjunction with a \"Group\" :term:`cv`\r\n instance (e.g., :class:`GroupKFold`).\r\n\r\n cv : int, cross-validation generator or an iterable, default=None\r\n Determines the cross-validation splitting strategy.\r\n Possible inputs for cv are:\r\n\r\n - None, to use the default 5-fold cross validation,\r\n - int, to specify the number of folds in a `(Stratified)KFold`,\r\n - :term:`CV splitter`,\r\n - An iterable yielding (train, test) splits as arrays of indices.\r\n\r\n For int/None inputs, if the estimator is a classifier and ``y`` is\r\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\r\n other cases, :class:`KFold` is used.\r\n\r\n Refer :ref:`User Guide <cross_validation>` for the various\r\n cross-validation strategies that can be used here.\r\n\r\n .. versionchanged:: 0.22\r\n ``cv`` default value if None changed from 3-fold to 5-fold.\r\n\r\n n_jobs : int, default=None\r\n Number of jobs to run in parallel. Training the estimator and\r\n predicting are parallelized over the cross-validation splits.\r\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\r\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\r\n for more details.\r\n\r\n verbose : int, default=0\r\n The verbosity level.\r\n\r\n fit_params : dict, defualt=None\r\n Parameters to pass to the fit method of the estimator.\r\n\r\n pre_dispatch : int or str, default='2*n_jobs'\r\n Controls the number of jobs that get dispatched during parallel\r\n execution. Reducing this number can be useful to avoid an\r\n explosion of memory consumption when more jobs get dispatched\r\n than CPUs can process. This parameter can be:\r\n\r\n - None, in which case all the jobs are immediately\r\n created and spawned. Use this for lightweight and\r\n fast-running jobs, to avoid delays due to on-demand\r\n spawning of the jobs\r\n\r\n - An int, giving the exact number of total jobs that are\r\n spawned\r\n\r\n - A str, giving an expression as a function of n_jobs,\r\n as in '2*n_jobs'\r\n\r\n method : {'predict', 'predict_proba', 'predict_log_proba', \\\r\n 'decision_function'}, default='predict'\r\n The method to be invoked by `estimator`.\r\n\r\n Returns\r\n -------\r\n predictions : ndarray\r\n This is the result of calling `method`. Shape:\r\n\r\n - When `method` is 'predict' and in special case where `method` is\r\n 'decision_function' and the target is binary: (n_samples,)\r\n - When `method` is one of {'predict_proba', 'predict_log_proba',\r\n 'decision_function'} (unless special case above):\r\n (n_samples, n_classes)\r\n - If `estimator` is :term:`multioutput`, an extra dimension\r\n 'n_outputs' is added to the end of each shape above.\r\n\r\n See Also\r\n --------\r\n cross_val_score : Calculate score for each CV split.\r\n cross_validate : Calculate one or more scores and timings for each CV\r\n split.\r\n\r\n Notes\r\n -----\r\n In the case that one or more classes are absent in a training portion, a\r\n default score needs to be assigned to all instances for that class if\r\n ``method`` produces columns per class, as in {'decision_function',\r\n 'predict_proba', 'predict_log_proba'}. For ``predict_proba`` this value is\r\n 0. In order to ensure finite output, we approximate negative infinity by\r\n the minimum finite float value for the dtype in other cases.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn import datasets, linear_model\r\n >>> from sklearn.model_selection import cross_val_predict\r\n >>> diabetes = datasets.load_diabetes()\r\n >>> X = diabetes.data[:150]\r\n >>> y = diabetes.target[:150]\r\n >>> lasso = linear_model.Lasso()\r\n >>> y_pred = cross_val_predict(lasso, X, y, cv=3)\r\n \"\"\"\r\n X, y, groups = indexable(X, y, groups)\r\n\r\n cv = check_cv(cv, y, classifier=is_classifier(estimator))\r\n splits = list(cv.split(X, y, groups))\r\n\r\n test_indices = np.concatenate([test for _, test in splits])\r\n if not _check_is_permutation(test_indices, _num_samples(X)):\r\n raise ValueError('cross_val_predict only works for partitions')\r\n\r\n # If classification methods produce multiple columns of output,\r\n # we need to manually encode classes to ensure consistent column ordering.\r\n encode = method in ['decision_function', 'predict_proba',\r\n 'predict_log_proba'] and y is not None\r\n if encode:\r\n y = np.asarray(y)\r\n if y.ndim == 1:\r\n le = LabelEncoder()\r\n y = le.fit_transform(y)\r\n elif y.ndim == 2:\r\n y_enc = np.zeros_like(y, dtype=int)\r\n for i_label in range(y.shape[1]):\r\n y_enc[:, i_label] = LabelEncoder().fit_transform(y[:, i_label])\r\n y = y_enc\r\n\r\n # We clone the estimator to make sure that all the folds are\r\n # independent, and that it is pickle-able.\r\n parallel = Parallel(n_jobs=n_jobs, verbose=verbose,\r\n pre_dispatch=pre_dispatch)\r\n predictions = parallel(delayed(_fit_and_predict)(\r\n clone(estimator), X, y, train, test, verbose, fit_params, method)\r\n for train, test in splits)\r\n\r\n inv_test_indices = np.empty(len(test_indices), dtype=int)\r\n inv_test_indices[test_indices] = np.arange(len(test_indices))\r\n\r\n if sp.issparse(predictions[0]):\r\n predictions = sp.vstack(predictions, format=predictions[0].format)\r\n elif encode and isinstance(predictions[0], list):\r\n # `predictions` is a list of method outputs from each fold.\r\n # If each of those is also a list, then treat this as a\r\n # multioutput-multiclass task. We need to separately concatenate\r\n # the method outputs for each label into an `n_labels` long list.\r\n n_labels = y.shape[1]\r\n concat_pred = []\r\n for i_label in range(n_labels):\r\n label_preds = np.concatenate([p[i_label] for p in predictions])\r\n concat_pred.append(label_preds)\r\n predictions = concat_pred\r\n else:\r\n predictions = np.concatenate(predictions)\r\n\r\n if isinstance(predictions, list):\r\n return [p[inv_test_indices] for p in predictions]\r\n else:\r\n return predictions[inv_test_indices]\r\n\r\n\r\ndef _fit_and_predict(estimator, X, y, train, test, verbose, fit_params,\r\n method):\r\n \"\"\"Fit estimator and predict values for a given dataset split.\r\n\r\n Read more in the :ref:`User Guide <cross_validation>`.\r\n\r\n Parameters\r\n ----------\r\n estimator : estimator object implementing 'fit' and 'predict'\r\n The object to use to fit the data.\r\n\r\n X : array-like of shape (n_samples, n_features)\r\n The data to fit.\r\n\r\n .. versionchanged:: 0.20\r\n X is only required to be an object with finite length or shape now\r\n\r\n y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None\r\n The target variable to try to predict in the case of\r\n supervised learning.\r\n\r\n train : array-like of shape (n_train_samples,)\r\n Indices of training samples.\r\n\r\n test : array-like of shape (n_test_samples,)\r\n Indices of test samples.\r\n\r\n verbose : int\r\n The verbosity level.\r\n\r\n fit_params : dict or None\r\n Parameters that will be passed to ``estimator.fit``.\r\n\r\n method : str\r\n Invokes the passed method name of the passed estimator.\r\n\r\n Returns\r\n -------\r\n predictions : sequence\r\n Result of calling 'estimator.method'\r\n \"\"\"\r\n # Adjust length of sample weights\r\n fit_params = fit_params if fit_params is not None else {}\r\n fit_params = _check_fit_params(X, fit_params, train)\r\n\r\n X_train, y_train = _safe_split(estimator, X, y, train)\r\n X_test, _ = _safe_split(estimator, X, y, test, train)\r\n\r\n if y_train is None:\r\n estimator.fit(X_train, **fit_params)\r\n else:\r\n estimator.fit(X_train, y_train, **fit_params)\r\n func = getattr(estimator, method)\r\n predictions = func(X_test)\r\n\r\n encode = method in ['decision_function', 'predict_proba',\r\n 'predict_log_proba'] and y is not None\r\n\r\n if encode:\r\n if isinstance(predictions, list):\r\n predictions = [_enforce_prediction_order(\r\n estimator.classes_[i_label], predictions[i_label],\r\n n_classes=len(set(y[:, i_label])), method=method)\r\n for i_label in range(len(predictions))]\r\n else:\r\n # A 2D y array should be a binary label indicator matrix\r\n n_classes = len(set(y)) if y.ndim == 1 else y.shape[1]\r\n predictions = _enforce_prediction_order(\r\n estimator.classes_, predictions, n_classes, method)\r\n return predictions\r\n\r\n\r\ndef _enforce_prediction_order(classes, predictions, n_classes, method):\r\n \"\"\"Ensure that prediction arrays have correct column order\r\n\r\n When doing cross-validation, if one or more classes are\r\n not present in the subset of data used for training,\r\n then the output prediction array might not have the same\r\n columns as other folds. Use the list of class names\r\n (assumed to be ints) to enforce the correct column order.\r\n\r\n Note that `classes` is the list of classes in this fold\r\n (a subset of the classes in the full training set)\r\n and `n_classes` is the number of classes in the full training set.\r\n \"\"\"\r\n if n_classes != len(classes):\r\n recommendation = (\r\n 'To fix this, use a cross-validation '\r\n 'technique resulting in properly '\r\n 'stratified folds')\r\n warnings.warn('Number of classes in training fold ({}) does '\r\n 'not match total number of classes ({}). '\r\n 'Results may not be appropriate for your use case. '\r\n '{}'.format(len(classes), n_classes, recommendation),\r\n RuntimeWarning)\r\n if method == 'decision_function':\r\n if (predictions.ndim == 2 and\r\n predictions.shape[1] != len(classes)):\r\n # This handles the case when the shape of predictions\r\n # does not match the number of classes used to train\r\n # it with. This case is found when sklearn.svm.SVC is\r\n # set to `decision_function_shape='ovo'`.\r\n raise ValueError('Output shape {} of {} does not match '\r\n 'number of classes ({}) in fold. '\r\n 'Irregular decision_function outputs '\r\n 'are not currently supported by '\r\n 'cross_val_predict'.format(\r\n predictions.shape, method, len(classes)))\r\n if len(classes) <= 2:\r\n # In this special case, `predictions` contains a 1D array.\r\n raise ValueError('Only {} class/es in training fold, but {} '\r\n 'in overall dataset. This '\r\n 'is not supported for decision_function '\r\n 'with imbalanced folds. {}'.format(\r\n len(classes), n_classes, recommendation))\r\n\r\n float_min = np.finfo(predictions.dtype).min\r\n default_values = {'decision_function': float_min,\r\n 'predict_log_proba': float_min,\r\n 'predict_proba': 0}\r\n predictions_for_all_classes = np.full((_num_samples(predictions),\r\n n_classes),\r\n default_values[method],\r\n dtype=predictions.dtype)\r\n predictions_for_all_classes[:, classes] = predictions\r\n predictions = predictions_for_all_classes\r\n return predictions\r\n\r\n\r\ndef _check_is_permutation(indices, n_samples):\r\n \"\"\"Check whether indices is a reordering of the array np.arange(n_samples)\r\n\r\n Parameters\r\n ----------\r\n indices : ndarray\r\n int array to test\r\n n_samples : int\r\n number of expected elements\r\n\r\n Returns\r\n -------\r\n is_partition : bool\r\n True iff sorted(indices) is np.arange(n)\r\n \"\"\"\r\n if len(indices) != n_samples:\r\n return False\r\n hit = np.zeros(n_samples, dtype=bool)\r\n hit[indices] = True\r\n if not np.all(hit):\r\n return False\r\n return True\r\n\r\n\r\n@_deprecate_positional_args\r\ndef permutation_test_score(estimator, X, y, *, groups=None, cv=None,\r\n n_permutations=100, n_jobs=None, random_state=0,\r\n verbose=0, scoring=None, fit_params=None):\r\n \"\"\"Evaluate the significance of a cross-validated score with permutations\r\n\r\n Permutes targets to generate 'randomized data' and compute the empirical\r\n p-value against the null hypothesis that features and targets are\r\n independent.\r\n\r\n The p-value represents the fraction of randomized data sets where the\r\n estimator performed as well or better than in the original data. A small\r\n p-value suggests that there is a real dependency between features and\r\n targets which has been used by the estimator to give good predictions.\r\n A large p-value may be due to lack of real dependency between features\r\n and targets or the estimator was not able to use the dependency to\r\n give good predictions.\r\n\r\n Read more in the :ref:`User Guide <permutation_test_score>`.\r\n\r\n Parameters\r\n ----------\r\n estimator : estimator object implementing 'fit'\r\n The object to use to fit the data.\r\n\r\n X : array-like of shape at least 2D\r\n The data to fit.\r\n\r\n y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None\r\n The target variable to try to predict in the case of\r\n supervised learning.\r\n\r\n groups : array-like of shape (n_samples,), default=None\r\n Labels to constrain permutation within groups, i.e. ``y`` values\r\n are permuted among samples with the same group identifier.\r\n When not specified, ``y`` values are permuted among all samples.\r\n\r\n When a grouped cross-validator is used, the group labels are\r\n also passed on to the ``split`` method of the cross-validator. The\r\n cross-validator uses them for grouping the samples while splitting\r\n the dataset into train/test set.\r\n\r\n scoring : str or callable, default=None\r\n A single str (see :ref:`scoring_parameter`) or a callable\r\n (see :ref:`scoring`) to evaluate the predictions on the test set.\r\n\r\n If None the estimator's score method is used.\r\n\r\n cv : int, cross-validation generator or an iterable, default=None\r\n Determines the cross-validation splitting strategy.\r\n Possible inputs for cv are:\r\n\r\n - None, to use the default 5-fold cross validation,\r\n - int, to specify the number of folds in a `(Stratified)KFold`,\r\n - :term:`CV splitter`,\r\n - An iterable yielding (train, test) splits as arrays of indices.\r\n\r\n For int/None inputs, if the estimator is a classifier and ``y`` is\r\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\r\n other cases, :class:`KFold` is used.\r\n\r\n Refer :ref:`User Guide <cross_validation>` for the various\r\n cross-validation strategies that can be used here.\r\n\r\n .. versionchanged:: 0.22\r\n ``cv`` default value if None changed from 3-fold to 5-fold.\r\n\r\n n_permutations : int, default=100\r\n Number of times to permute ``y``.\r\n\r\n n_jobs : int, default=None\r\n Number of jobs to run in parallel. Training the estimator and computing\r\n the cross-validated score are parallelized over the permutations.\r\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\r\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\r\n for more details.\r\n\r\n random_state : int, RandomState instance or None, default=0\r\n Pass an int for reproducible output for permutation of\r\n ``y`` values among samples. See :term:`Glossary <random_state>`.\r\n\r\n verbose : int, default=0\r\n The verbosity level.\r\n\r\n fit_params : dict, default=None\r\n Parameters to pass to the fit method of the estimator.\r\n\r\n .. versionadded:: 0.24\r\n\r\n Returns\r\n -------\r\n score : float\r\n The true score without permuting targets.\r\n\r\n permutation_scores : array of shape (n_permutations,)\r\n The scores obtained for each permutations.\r\n\r\n pvalue : float\r\n The p-value, which approximates the probability that the score would\r\n be obtained by chance. This is calculated as:\r\n\r\n `(C + 1) / (n_permutations + 1)`\r\n\r\n Where C is the number of permutations whose score >= the true score.\r\n\r\n The best possible p-value is 1/(n_permutations + 1), the worst is 1.0.\r\n\r\n Notes\r\n -----\r\n This function implements Test 1 in:\r\n\r\n Ojala and Garriga. `Permutation Tests for Studying Classifier\r\n Performance\r\n <http://www.jmlr.org/papers/volume11/ojala10a/ojala10a.pdf>`_. The\r\n Journal of Machine Learning Research (2010) vol. 11\r\n\r\n \"\"\"\r\n X, y, groups = indexable(X, y, groups)\r\n\r\n cv = check_cv(cv, y, classifier=is_classifier(estimator))\r\n scorer = check_scoring(estimator, scoring=scoring)\r\n random_state = check_random_state(random_state)\r\n\r\n # We clone the estimator to make sure that all the folds are\r\n # independent, and that it is pickle-able.\r\n score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer,\r\n fit_params=fit_params)\r\n permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(\r\n delayed(_permutation_test_score)(\r\n clone(estimator), X, _shuffle(y, groups, random_state),\r\n groups, cv, scorer, fit_params=fit_params)\r\n for _ in range(n_permutations))\r\n permutation_scores = np.array(permutation_scores)\r\n pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)\r\n return score, permutation_scores, pvalue\r\n\r\n\r\ndef _permutation_test_score(estimator, X, y, groups, cv, scorer,\r\n fit_params):\r\n \"\"\"Auxiliary function for permutation_test_score\"\"\"\r\n # Adjust length of sample weights\r\n fit_params = fit_params if fit_params is not None else {}\r\n avg_score = []\r\n for train, test in cv.split(X, y, groups):\r\n X_train, y_train = _safe_split(estimator, X, y, train)\r\n X_test, y_test = _safe_split(estimator, X, y, test, train)\r\n fit_params = _check_fit_params(X, fit_params, train)\r\n estimator.fit(X_train, y_train, **fit_params)\r\n avg_score.append(scorer(estimator, X_test, y_test))\r\n return np.mean(avg_score)\r\n\r\n\r\ndef _shuffle(y, groups, random_state):\r\n \"\"\"Return a shuffled copy of y eventually shuffle among same groups.\"\"\"\r\n if groups is None:\r\n indices = random_state.permutation(len(y))\r\n else:\r\n indices = np.arange(len(groups))\r\n for group in np.unique(groups):\r\n this_mask = (groups == group)\r\n indices[this_mask] = random_state.permutation(indices[this_mask])\r\n return _safe_indexing(y, indices)\r\n\r\n\r\n@_deprecate_positional_args\r\ndef learning_curve(estimator, X, y, *, groups=None,\r\n train_sizes=np.linspace(0.1, 1.0, 5), cv=None,\r\n scoring=None, exploit_incremental_learning=False,\r\n n_jobs=None, pre_dispatch=\"all\", verbose=0, shuffle=False,\r\n random_state=None, error_score=np.nan, return_times=False,\r\n fit_params=None):\r\n \"\"\"Learning curve.\r\n\r\n Determines cross-validated training and test scores for different training\r\n set sizes.\r\n\r\n A cross-validation generator splits the whole dataset k times in training\r\n and test data. Subsets of the training set with varying sizes will be used\r\n to train the estimator and a score for each training subset size and the\r\n test set will be computed. Afterwards, the scores will be averaged over\r\n all k runs for each training subset size.\r\n\r\n Read more in the :ref:`User Guide <learning_curve>`.\r\n\r\n Parameters\r\n ----------\r\n estimator : object type that implements the \"fit\" and \"predict\" methods\r\n An object of that type which is cloned for each validation.\r\n\r\n X : array-like of shape (n_samples, n_features)\r\n Training vector, where n_samples is the number of samples and\r\n n_features is the number of features.\r\n\r\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\r\n Target relative to X for classification or regression;\r\n None for unsupervised learning.\r\n\r\n groups : array-like of shape (n_samples,), default=None\r\n Group labels for the samples used while splitting the dataset into\r\n train/test set. Only used in conjunction with a \"Group\" :term:`cv`\r\n instance (e.g., :class:`GroupKFold`).\r\n\r\n train_sizes : array-like of shape (n_ticks,), \\\r\n default=np.linspace(0.1, 1.0, 5)\r\n Relative or absolute numbers of training examples that will be used to\r\n generate the learning curve. If the dtype is float, it is regarded as a\r\n fraction of the maximum size of the training set (that is determined\r\n by the selected validation method), i.e. it has to be within (0, 1].\r\n Otherwise it is interpreted as absolute sizes of the training sets.\r\n Note that for classification the number of samples usually have to\r\n be big enough to contain at least one sample from each class.\r\n\r\n cv : int, cross-validation generator or an iterable, default=None\r\n Determines the cross-validation splitting strategy.\r\n Possible inputs for cv are:\r\n\r\n - None, to use the default 5-fold cross validation,\r\n - int, to specify the number of folds in a `(Stratified)KFold`,\r\n - :term:`CV splitter`,\r\n - An iterable yielding (train, test) splits as arrays of indices.\r\n\r\n For int/None inputs, if the estimator is a classifier and ``y`` is\r\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\r\n other cases, :class:`KFold` is used.\r\n\r\n Refer :ref:`User Guide <cross_validation>` for the various\r\n cross-validation strategies that can be used here.\r\n\r\n .. versionchanged:: 0.22\r\n ``cv`` default value if None changed from 3-fold to 5-fold.\r\n\r\n scoring : str or callable, default=None\r\n A str (see model evaluation documentation) or\r\n a scorer callable object / function with signature\r\n ``scorer(estimator, X, y)``.\r\n\r\n exploit_incremental_learning : bool, default=False\r\n If the estimator supports incremental learning, this will be\r\n used to speed up fitting for different training set sizes.\r\n\r\n n_jobs : int, default=None\r\n Number of jobs to run in parallel. Training the estimator and computing\r\n the score are parallelized over the different training and test sets.\r\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\r\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\r\n for more details.\r\n\r\n pre_dispatch : int or str, default='all'\r\n Number of predispatched jobs for parallel execution (default is\r\n all). The option can reduce the allocated memory. The str can\r\n be an expression like '2*n_jobs'.\r\n\r\n verbose : int, default=0\r\n Controls the verbosity: the higher, the more messages.\r\n\r\n shuffle : bool, default=False\r\n Whether to shuffle training data before taking prefixes of it\r\n based on``train_sizes``.\r\n\r\n random_state : int, RandomState instance or None, default=None\r\n Used when ``shuffle`` is True. Pass an int for reproducible\r\n output across multiple function calls.\r\n See :term:`Glossary <random_state>`.\r\n\r\n error_score : 'raise' or numeric, default=np.nan\r\n Value to assign to the score if an error occurs in estimator fitting.\r\n If set to 'raise', the error is raised.\r\n If a numeric value is given, FitFailedWarning is raised.\r\n\r\n .. versionadded:: 0.20\r\n\r\n return_times : bool, default=False\r\n Whether to return the fit and score times.\r\n\r\n fit_params : dict, default=None\r\n Parameters to pass to the fit method of the estimator.\r\n\r\n .. versionadded:: 0.24\r\n\r\n Returns\r\n -------\r\n train_sizes_abs : array of shape (n_unique_ticks,)\r\n Numbers of training examples that has been used to generate the\r\n learning curve. Note that the number of ticks might be less\r\n than n_ticks because duplicate entries will be removed.\r\n\r\n train_scores : array of shape (n_ticks, n_cv_folds)\r\n Scores on training sets.\r\n\r\n test_scores : array of shape (n_ticks, n_cv_folds)\r\n Scores on test set.\r\n\r\n fit_times : array of shape (n_ticks, n_cv_folds)\r\n Times spent for fitting in seconds. Only present if ``return_times``\r\n is True.\r\n\r\n score_times : array of shape (n_ticks, n_cv_folds)\r\n Times spent for scoring in seconds. Only present if ``return_times``\r\n is True.\r\n\r\n Notes\r\n -----\r\n See :ref:`examples/model_selection/plot_learning_curve.py\r\n <sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`\r\n \"\"\"\r\n if exploit_incremental_learning and not hasattr(estimator, \"partial_fit\"):\r\n raise ValueError(\"An estimator must support the partial_fit interface \"\r\n \"to exploit incremental learning\")\r\n X, y, groups = indexable(X, y, groups)\r\n\r\n cv = check_cv(cv, y, classifier=is_classifier(estimator))\r\n # Store it as list as we will be iterating over the list multiple times\r\n cv_iter = list(cv.split(X, y, groups))\r\n\r\n scorer = check_scoring(estimator, scoring=scoring)\r\n\r\n n_max_training_samples = len(cv_iter[0][0])\r\n # Because the lengths of folds can be significantly different, it is\r\n # not guaranteed that we use all of the available training data when we\r\n # use the first 'n_max_training_samples' samples.\r\n train_sizes_abs = _translate_train_sizes(train_sizes,\r\n n_max_training_samples)\r\n n_unique_ticks = train_sizes_abs.shape[0]\r\n if verbose > 0:\r\n print(\"[learning_curve] Training set sizes: \" + str(train_sizes_abs))\r\n\r\n parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,\r\n verbose=verbose)\r\n\r\n if shuffle:\r\n rng = check_random_state(random_state)\r\n cv_iter = ((rng.permutation(train), test) for train, test in cv_iter)\r\n\r\n if exploit_incremental_learning:\r\n classes = np.unique(y) if is_classifier(estimator) else None\r\n out = parallel(delayed(_incremental_fit_estimator)(\r\n clone(estimator), X, y, classes, train, test, train_sizes_abs,\r\n scorer, verbose, return_times, error_score=error_score,\r\n fit_params=fit_params)\r\n for train, test in cv_iter\r\n )\r\n out = np.asarray(out).transpose((2, 1, 0))\r\n else:\r\n train_test_proportions = []\r\n for train, test in cv_iter:\r\n for n_train_samples in train_sizes_abs:\r\n train_test_proportions.append((train[:n_train_samples], test))\r\n\r\n results = parallel(delayed(_fit_and_score)(\r\n clone(estimator), X, y, scorer, train, test, verbose,\r\n parameters=None, fit_params=fit_params, return_train_score=True,\r\n error_score=error_score, return_times=return_times)\r\n for train, test in train_test_proportions\r\n )\r\n results = _aggregate_score_dicts(results)\r\n train_scores = results[\"train_scores\"].reshape(-1, n_unique_ticks).T\r\n test_scores = results[\"test_scores\"].reshape(-1, n_unique_ticks).T\r\n out = [train_scores, test_scores]\r\n\r\n if return_times:\r\n fit_times = results[\"fit_time\"].reshape(-1, n_unique_ticks).T\r\n score_times = results[\"score_time\"].reshape(-1, n_unique_ticks).T\r\n out.extend([fit_times, score_times])\r\n\r\n ret = train_sizes_abs, out[0], out[1]\r\n\r\n if return_times:\r\n ret = ret + (out[2], out[3])\r\n\r\n return ret\r\n\r\n\r\ndef _translate_train_sizes(train_sizes, n_max_training_samples):\r\n \"\"\"Determine absolute sizes of training subsets and validate 'train_sizes'.\r\n\r\n Examples:\r\n _translate_train_sizes([0.5, 1.0], 10) -> [5, 10]\r\n _translate_train_sizes([5, 10], 10) -> [5, 10]\r\n\r\n Parameters\r\n ----------\r\n train_sizes : array-like of shape (n_ticks,)\r\n Numbers of training examples that will be used to generate the\r\n learning curve. If the dtype is float, it is regarded as a\r\n fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].\r\n\r\n n_max_training_samples : int\r\n Maximum number of training samples (upper bound of 'train_sizes').\r\n\r\n Returns\r\n -------\r\n train_sizes_abs : array of shape (n_unique_ticks,)\r\n Numbers of training examples that will be used to generate the\r\n learning curve. Note that the number of ticks might be less\r\n than n_ticks because duplicate entries will be removed.\r\n \"\"\"\r\n train_sizes_abs = np.asarray(train_sizes)\r\n n_ticks = train_sizes_abs.shape[0]\r\n n_min_required_samples = np.min(train_sizes_abs)\r\n n_max_required_samples = np.max(train_sizes_abs)\r\n if np.issubdtype(train_sizes_abs.dtype, np.floating):\r\n if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:\r\n raise ValueError(\"train_sizes has been interpreted as fractions \"\r\n \"of the maximum number of training samples and \"\r\n \"must be within (0, 1], but is within [%f, %f].\"\r\n % (n_min_required_samples,\r\n n_max_required_samples))\r\n train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype(\r\n dtype=int, copy=False)\r\n train_sizes_abs = np.clip(train_sizes_abs, 1,\r\n n_max_training_samples)\r\n else:\r\n if (n_min_required_samples <= 0 or\r\n n_max_required_samples > n_max_training_samples):\r\n raise ValueError(\"train_sizes has been interpreted as absolute \"\r\n \"numbers of training samples and must be within \"\r\n \"(0, %d], but is within [%d, %d].\"\r\n % (n_max_training_samples,\r\n n_min_required_samples,\r\n n_max_required_samples))\r\n\r\n train_sizes_abs = np.unique(train_sizes_abs)\r\n if n_ticks > train_sizes_abs.shape[0]:\r\n warnings.warn(\"Removed duplicate entries from 'train_sizes'. Number \"\r\n \"of ticks will be less than the size of \"\r\n \"'train_sizes' %d instead of %d).\"\r\n % (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)\r\n\r\n return train_sizes_abs\r\n\r\n\r\ndef _incremental_fit_estimator(estimator, X, y, classes, train, test,\r\n train_sizes, scorer, verbose,\r\n return_times, error_score, fit_params):\r\n \"\"\"Train estimator on training subsets incrementally and compute scores.\"\"\"\r\n train_scores, test_scores, fit_times, score_times = [], [], [], []\r\n partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])\r\n if fit_params is None:\r\n fit_params = {}\r\n for n_train_samples, partial_train in partitions:\r\n train_subset = train[:n_train_samples]\r\n X_train, y_train = _safe_split(estimator, X, y, train_subset)\r\n X_partial_train, y_partial_train = _safe_split(estimator, X, y,\r\n partial_train)\r\n X_test, y_test = _safe_split(estimator, X, y, test, train_subset)\r\n start_fit = time.time()\r\n if y_partial_train is None:\r\n estimator.partial_fit(X_partial_train, classes=classes,\r\n **fit_params)\r\n else:\r\n estimator.partial_fit(X_partial_train, y_partial_train,\r\n classes=classes, **fit_params)\r\n fit_time = time.time() - start_fit\r\n fit_times.append(fit_time)\r\n\r\n start_score = time.time()\r\n\r\n test_scores.append(\r\n _score(estimator, X_test, y_test, scorer, error_score)\r\n )\r\n train_scores.append(\r\n _score(estimator, X_train, y_train, scorer, error_score)\r\n )\r\n\r\n score_time = time.time() - start_score\r\n score_times.append(score_time)\r\n\r\n ret = ((train_scores, test_scores, fit_times, score_times)\r\n if return_times else (train_scores, test_scores))\r\n\r\n return np.array(ret).T\r\n\r\n\r\n@_deprecate_positional_args\r\ndef validation_curve(estimator, X, y, *, param_name, param_range, groups=None,\r\n cv=None, scoring=None, n_jobs=None, pre_dispatch=\"all\",\r\n verbose=0, error_score=np.nan, fit_params=None):\r\n \"\"\"Validation curve.\r\n\r\n Determine training and test scores for varying parameter values.\r\n\r\n Compute scores for an estimator with different values of a specified\r\n parameter. This is similar to grid search with one parameter. However, this\r\n will also compute training scores and is merely a utility for plotting the\r\n results.\r\n\r\n Read more in the :ref:`User Guide <validation_curve>`.\r\n\r\n Parameters\r\n ----------\r\n estimator : object type that implements the \"fit\" and \"predict\" methods\r\n An object of that type which is cloned for each validation.\r\n\r\n X : array-like of shape (n_samples, n_features)\r\n Training vector, where n_samples is the number of samples and\r\n n_features is the number of features.\r\n\r\n y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None\r\n Target relative to X for classification or regression;\r\n None for unsupervised learning.\r\n\r\n param_name : str\r\n Name of the parameter that will be varied.\r\n\r\n param_range : array-like of shape (n_values,)\r\n The values of the parameter that will be evaluated.\r\n\r\n groups : array-like of shape (n_samples,), default=None\r\n Group labels for the samples used while splitting the dataset into\r\n train/test set. Only used in conjunction with a \"Group\" :term:`cv`\r\n instance (e.g., :class:`GroupKFold`).\r\n\r\n cv : int, cross-validation generator or an iterable, default=None\r\n Determines the cross-validation splitting strategy.\r\n Possible inputs for cv are:\r\n\r\n - None, to use the default 5-fold cross validation,\r\n - int, to specify the number of folds in a `(Stratified)KFold`,\r\n - :term:`CV splitter`,\r\n - An iterable yielding (train, test) splits as arrays of indices.\r\n\r\n For int/None inputs, if the estimator is a classifier and ``y`` is\r\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\r\n other cases, :class:`KFold` is used.\r\n\r\n Refer :ref:`User Guide <cross_validation>` for the various\r\n cross-validation strategies that can be used here.\r\n\r\n .. versionchanged:: 0.22\r\n ``cv`` default value if None changed from 3-fold to 5-fold.\r\n\r\n scoring : str or callable, default=None\r\n A str (see model evaluation documentation) or\r\n a scorer callable object / function with signature\r\n ``scorer(estimator, X, y)``.\r\n\r\n n_jobs : int, default=None\r\n Number of jobs to run in parallel. Training the estimator and computing\r\n the score are parallelized over the combinations of each parameter\r\n value and each cross-validation split.\r\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\r\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\r\n for more details.\r\n\r\n pre_dispatch : int or str, default='all'\r\n Number of predispatched jobs for parallel execution (default is\r\n all). The option can reduce the allocated memory. The str can\r\n be an expression like '2*n_jobs'.\r\n\r\n verbose : int, default=0\r\n Controls the verbosity: the higher, the more messages.\r\n\r\n fit_params : dict, default=None\r\n Parameters to pass to the fit method of the estimator.\r\n\r\n .. versionadded:: 0.24\r\n\r\n error_score : 'raise' or numeric, default=np.nan\r\n Value to assign to the score if an error occurs in estimator fitting.\r\n If set to 'raise', the error is raised.\r\n If a numeric value is given, FitFailedWarning is raised.\r\n\r\n .. versionadded:: 0.20\r\n\r\n Returns\r\n -------\r\n train_scores : array of shape (n_ticks, n_cv_folds)\r\n Scores on training sets.\r\n\r\n test_scores : array of shape (n_ticks, n_cv_folds)\r\n Scores on test set.\r\n\r\n Notes\r\n -----\r\n See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py`\r\n\r\n \"\"\"\r\n X, y, groups = indexable(X, y, groups)\r\n\r\n cv = check_cv(cv, y, classifier=is_classifier(estimator))\r\n scorer = check_scoring(estimator, scoring=scoring)\r\n\r\n parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,\r\n verbose=verbose)\r\n results = parallel(delayed(_fit_and_score)(\r\n clone(estimator), X, y, scorer, train, test, verbose,\r\n parameters={param_name: v}, fit_params=fit_params,\r\n return_train_score=True, error_score=error_score)\r\n\r\n # NOTE do not change order of iteration to allow one time cv splitters\r\n for train, test in cv.split(X, y, groups) for v in param_range)\r\n n_params = len(param_range)\r\n\r\n results = _aggregate_score_dicts(results)\r\n train_scores = results[\"train_scores\"].reshape(-1, n_params).T\r\n test_scores = results[\"test_scores\"].reshape(-1, n_params).T\r\n\r\n return train_scores, test_scores\r\n\r\n\r\ndef _aggregate_score_dicts(scores):\r\n \"\"\"Aggregate the list of dict to dict of np ndarray\r\n\r\n The aggregated output of _aggregate_score_dicts will be a list of dict\r\n of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.1, 'acc':1.0}, ...]\r\n Convert it to a dict of array {'prec': np.array([0.1 ...]), ...}\r\n\r\n Parameters\r\n ----------\r\n\r\n scores : list of dict\r\n List of dicts of the scores for all scorers. This is a flat list,\r\n assumed originally to be of row major order.\r\n\r\n Example\r\n -------\r\n\r\n >>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3},\r\n ... {'a': 10, 'b': 10}] # doctest: +SKIP\r\n >>> _aggregate_score_dicts(scores) # doctest: +SKIP\r\n {'a': array([1, 2, 3, 10]),\r\n 'b': array([10, 2, 3, 10])}\r\n \"\"\"\r\n return {\r\n key: np.asarray([score[key] for score in scores])\r\n if isinstance(scores[0][key], numbers.Number)\r\n else [score[key] for score in scores]\r\n for key in scores[0]\r\n }\r\n", "from datetime import timedelta\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nfrom pandas import Categorical, DataFrame, NaT, Period, Series, Timedelta, Timestamp\r\nimport pandas._testing as tm\r\n\r\n\r\nclass TestSeriesFillNA:\r\n def test_fillna_pytimedelta(self):\r\n # GH#8209\r\n ser = Series([np.nan, Timedelta(\"1 days\")], index=[\"A\", \"B\"])\r\n\r\n result = ser.fillna(timedelta(1))\r\n expected = Series(Timedelta(\"1 days\"), index=[\"A\", \"B\"])\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_fillna_period(self):\r\n # GH#13737\r\n ser = Series([Period(\"2011-01\", freq=\"M\"), Period(\"NaT\", freq=\"M\")])\r\n\r\n res = ser.fillna(Period(\"2012-01\", freq=\"M\"))\r\n exp = Series([Period(\"2011-01\", freq=\"M\"), Period(\"2012-01\", freq=\"M\")])\r\n tm.assert_series_equal(res, exp)\r\n assert res.dtype == \"Period[M]\"\r\n\r\n def test_fillna_dt64_timestamp(self):\r\n ser = Series(\r\n [\r\n Timestamp(\"20130101\"),\r\n Timestamp(\"20130101\"),\r\n Timestamp(\"20130102\"),\r\n Timestamp(\"20130103 9:01:01\"),\r\n ]\r\n )\r\n ser[2] = np.nan\r\n\r\n # reg fillna\r\n result = ser.fillna(Timestamp(\"20130104\"))\r\n expected = Series(\r\n [\r\n Timestamp(\"20130101\"),\r\n Timestamp(\"20130101\"),\r\n Timestamp(\"20130104\"),\r\n Timestamp(\"20130103 9:01:01\"),\r\n ]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = ser.fillna(NaT)\r\n expected = ser\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_fillna_dt64_non_nao(self):\r\n # GH#27419\r\n ser = Series([Timestamp(\"2010-01-01\"), NaT, Timestamp(\"2000-01-01\")])\r\n val = np.datetime64(\"1975-04-05\", \"ms\")\r\n\r\n result = ser.fillna(val)\r\n expected = Series(\r\n [Timestamp(\"2010-01-01\"), Timestamp(\"1975-04-05\"), Timestamp(\"2000-01-01\")]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_fillna_numeric_inplace(self):\r\n x = Series([np.nan, 1.0, np.nan, 3.0, np.nan], [\"z\", \"a\", \"b\", \"c\", \"d\"])\r\n y = x.copy()\r\n\r\n return_value = y.fillna(value=0, inplace=True)\r\n assert return_value is None\r\n\r\n expected = x.fillna(value=0)\r\n tm.assert_series_equal(y, expected)\r\n\r\n # ---------------------------------------------------------------\r\n # CategoricalDtype\r\n\r\n @pytest.mark.parametrize(\r\n \"fill_value, expected_output\",\r\n [\r\n (\"a\", [\"a\", \"a\", \"b\", \"a\", \"a\"]),\r\n ({1: \"a\", 3: \"b\", 4: \"b\"}, [\"a\", \"a\", \"b\", \"b\", \"b\"]),\r\n ({1: \"a\"}, [\"a\", \"a\", \"b\", np.nan, np.nan]),\r\n ({1: \"a\", 3: \"b\"}, [\"a\", \"a\", \"b\", \"b\", np.nan]),\r\n (Series(\"a\"), [\"a\", np.nan, \"b\", np.nan, np.nan]),\r\n (Series(\"a\", index=[1]), [\"a\", \"a\", \"b\", np.nan, np.nan]),\r\n (Series({1: \"a\", 3: \"b\"}), [\"a\", \"a\", \"b\", \"b\", np.nan]),\r\n (Series([\"a\", \"b\"], index=[3, 4]), [\"a\", np.nan, \"b\", \"a\", \"b\"]),\r\n ],\r\n )\r\n def test_fillna_categorical(self, fill_value, expected_output):\r\n # GH#17033\r\n # Test fillna for a Categorical series\r\n data = [\"a\", np.nan, \"b\", np.nan, np.nan]\r\n ser = Series(Categorical(data, categories=[\"a\", \"b\"]))\r\n exp = Series(Categorical(expected_output, categories=[\"a\", \"b\"]))\r\n result = ser.fillna(fill_value)\r\n tm.assert_series_equal(result, exp)\r\n\r\n @pytest.mark.parametrize(\r\n \"fill_value, expected_output\",\r\n [\r\n (Series([\"a\", \"b\", \"c\", \"d\", \"e\"]), [\"a\", \"b\", \"b\", \"d\", \"e\"]),\r\n (Series([\"b\", \"d\", \"a\", \"d\", \"a\"]), [\"a\", \"d\", \"b\", \"d\", \"a\"]),\r\n (\r\n Series(\r\n Categorical(\r\n [\"b\", \"d\", \"a\", \"d\", \"a\"], categories=[\"b\", \"c\", \"d\", \"e\", \"a\"]\r\n )\r\n ),\r\n [\"a\", \"d\", \"b\", \"d\", \"a\"],\r\n ),\r\n ],\r\n )\r\n def test_fillna_categorical_with_new_categories(self, fill_value, expected_output):\r\n # GH#26215\r\n data = [\"a\", np.nan, \"b\", np.nan, np.nan]\r\n ser = Series(Categorical(data, categories=[\"a\", \"b\", \"c\", \"d\", \"e\"]))\r\n exp = Series(Categorical(expected_output, categories=[\"a\", \"b\", \"c\", \"d\", \"e\"]))\r\n result = ser.fillna(fill_value)\r\n tm.assert_series_equal(result, exp)\r\n\r\n def test_fillna_categorical_raises(self):\r\n data = [\"a\", np.nan, \"b\", np.nan, np.nan]\r\n ser = Series(Categorical(data, categories=[\"a\", \"b\"]))\r\n\r\n with pytest.raises(ValueError, match=\"fill value must be in categories\"):\r\n ser.fillna(\"d\")\r\n\r\n with pytest.raises(ValueError, match=\"fill value must be in categories\"):\r\n ser.fillna(Series(\"d\"))\r\n\r\n with pytest.raises(ValueError, match=\"fill value must be in categories\"):\r\n ser.fillna({1: \"d\", 3: \"a\"})\r\n\r\n msg = '\"value\" parameter must be a scalar or dict, but you passed a \"list\"'\r\n with pytest.raises(TypeError, match=msg):\r\n ser.fillna([\"a\", \"b\"])\r\n\r\n msg = '\"value\" parameter must be a scalar or dict, but you passed a \"tuple\"'\r\n with pytest.raises(TypeError, match=msg):\r\n ser.fillna((\"a\", \"b\"))\r\n\r\n msg = (\r\n '\"value\" parameter must be a scalar, dict '\r\n 'or Series, but you passed a \"DataFrame\"'\r\n )\r\n with pytest.raises(TypeError, match=msg):\r\n ser.fillna(DataFrame({1: [\"a\"], 3: [\"b\"]}))\r\n\r\n # ---------------------------------------------------------------\r\n # Invalid Usages\r\n\r\n def test_fillna_listlike_invalid(self):\r\n ser = Series(np.random.randint(-100, 100, 50))\r\n msg = '\"value\" parameter must be a scalar or dict, but you passed a \"list\"'\r\n with pytest.raises(TypeError, match=msg):\r\n ser.fillna([1, 2])\r\n\r\n msg = '\"value\" parameter must be a scalar or dict, but you passed a \"tuple\"'\r\n with pytest.raises(TypeError, match=msg):\r\n ser.fillna((1, 2))\r\n\r\n def test_fillna_method_and_limit_invalid(self):\r\n\r\n # related GH#9217, make sure limit is an int and greater than 0\r\n ser = Series([1, 2, 3, None])\r\n msg = (\r\n r\"Cannot specify both 'value' and 'method'\\.|\"\r\n r\"Limit must be greater than 0|\"\r\n \"Limit must be an integer\"\r\n )\r\n for limit in [-1, 0, 1.0, 2.0]:\r\n for method in [\"backfill\", \"bfill\", \"pad\", \"ffill\", None]:\r\n with pytest.raises(ValueError, match=msg):\r\n ser.fillna(1, limit=limit, method=method)\r\n", "\"\"\"Sparse Dtype\"\"\"\r\n\r\nimport re\r\nfrom typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type\r\nimport warnings\r\n\r\nimport numpy as np\r\n\r\nfrom pandas._typing import Dtype, DtypeObj\r\nfrom pandas.errors import PerformanceWarning\r\n\r\nfrom pandas.core.dtypes.base import ExtensionDtype, register_extension_dtype\r\nfrom pandas.core.dtypes.cast import astype_nansafe\r\nfrom pandas.core.dtypes.common import (\r\n is_bool_dtype,\r\n is_extension_array_dtype,\r\n is_object_dtype,\r\n is_scalar,\r\n is_string_dtype,\r\n pandas_dtype,\r\n)\r\nfrom pandas.core.dtypes.missing import isna, na_value_for_dtype\r\n\r\nif TYPE_CHECKING:\r\n from pandas.core.arrays.sparse.array import SparseArray # noqa: F401\r\n\r\n\r\n@register_extension_dtype\r\nclass SparseDtype(ExtensionDtype):\r\n \"\"\"\r\n Dtype for data stored in :class:`SparseArray`.\r\n\r\n This dtype implements the pandas ExtensionDtype interface.\r\n\r\n .. versionadded:: 0.24.0\r\n\r\n Parameters\r\n ----------\r\n dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64\r\n The dtype of the underlying array storing the non-fill value values.\r\n fill_value : scalar, optional\r\n The scalar value not stored in the SparseArray. By default, this\r\n depends on `dtype`.\r\n\r\n =========== ==========\r\n dtype na_value\r\n =========== ==========\r\n float ``np.nan``\r\n int ``0``\r\n bool ``False``\r\n datetime64 ``pd.NaT``\r\n timedelta64 ``pd.NaT``\r\n =========== ==========\r\n\r\n The default value may be overridden by specifying a `fill_value`.\r\n\r\n Attributes\r\n ----------\r\n None\r\n\r\n Methods\r\n -------\r\n None\r\n \"\"\"\r\n\r\n # We include `_is_na_fill_value` in the metadata to avoid hash collisions\r\n # between SparseDtype(float, 0.0) and SparseDtype(float, nan).\r\n # Without is_na_fill_value in the comparison, those would be equal since\r\n # hash(nan) is (sometimes?) 0.\r\n _metadata = (\"_dtype\", \"_fill_value\", \"_is_na_fill_value\")\r\n\r\n def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None):\r\n\r\n if isinstance(dtype, type(self)):\r\n if fill_value is None:\r\n fill_value = dtype.fill_value\r\n dtype = dtype.subtype\r\n\r\n dtype = pandas_dtype(dtype)\r\n if is_string_dtype(dtype):\r\n dtype = np.dtype(\"object\")\r\n\r\n if fill_value is None:\r\n fill_value = na_value_for_dtype(dtype)\r\n\r\n if not is_scalar(fill_value):\r\n raise ValueError(f\"fill_value must be a scalar. Got {fill_value} instead\")\r\n self._dtype = dtype\r\n self._fill_value = fill_value\r\n\r\n def __hash__(self):\r\n # Python3 doesn't inherit __hash__ when a base class overrides\r\n # __eq__, so we explicitly do it here.\r\n return super().__hash__()\r\n\r\n def __eq__(self, other: Any) -> bool:\r\n # We have to override __eq__ to handle NA values in _metadata.\r\n # The base class does simple == checks, which fail for NA.\r\n if isinstance(other, str):\r\n try:\r\n other = self.construct_from_string(other)\r\n except TypeError:\r\n return False\r\n\r\n if isinstance(other, type(self)):\r\n subtype = self.subtype == other.subtype\r\n if self._is_na_fill_value:\r\n # this case is complicated by two things:\r\n # SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)\r\n # SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)\r\n # i.e. we want to treat any floating-point NaN as equal, but\r\n # not a floating-point NaN and a datetime NaT.\r\n fill_value = (\r\n other._is_na_fill_value\r\n and isinstance(self.fill_value, type(other.fill_value))\r\n or isinstance(other.fill_value, type(self.fill_value))\r\n )\r\n else:\r\n fill_value = self.fill_value == other.fill_value\r\n\r\n return subtype and fill_value\r\n return False\r\n\r\n @property\r\n def fill_value(self):\r\n \"\"\"\r\n The fill value of the array.\r\n\r\n Converting the SparseArray to a dense ndarray will fill the\r\n array with this value.\r\n\r\n .. warning::\r\n\r\n It's possible to end up with a SparseArray that has ``fill_value``\r\n values in ``sp_values``. This can occur, for example, when setting\r\n ``SparseArray.fill_value`` directly.\r\n \"\"\"\r\n return self._fill_value\r\n\r\n @property\r\n def _is_na_fill_value(self):\r\n return isna(self.fill_value)\r\n\r\n @property\r\n def _is_numeric(self) -> bool:\r\n return not is_object_dtype(self.subtype)\r\n\r\n @property\r\n def _is_boolean(self) -> bool:\r\n return is_bool_dtype(self.subtype)\r\n\r\n @property\r\n def kind(self):\r\n \"\"\"\r\n The sparse kind. Either 'integer', or 'block'.\r\n \"\"\"\r\n return self.subtype.kind\r\n\r\n @property\r\n def type(self):\r\n return self.subtype.type\r\n\r\n @property\r\n def subtype(self):\r\n return self._dtype\r\n\r\n @property\r\n def name(self):\r\n return f\"Sparse[{self.subtype.name}, {repr(self.fill_value)}]\"\r\n\r\n def __repr__(self) -> str:\r\n return self.name\r\n\r\n @classmethod\r\n def construct_array_type(cls) -> Type[\"SparseArray\"]:\r\n \"\"\"\r\n Return the array type associated with this dtype.\r\n\r\n Returns\r\n -------\r\n type\r\n \"\"\"\r\n from pandas.core.arrays.sparse.array import SparseArray # noqa: F811\r\n\r\n return SparseArray\r\n\r\n @classmethod\r\n def construct_from_string(cls, string: str) -> \"SparseDtype\":\r\n \"\"\"\r\n Construct a SparseDtype from a string form.\r\n\r\n Parameters\r\n ----------\r\n string : str\r\n Can take the following forms.\r\n\r\n string dtype\r\n ================ ============================\r\n 'int' SparseDtype[np.int64, 0]\r\n 'Sparse' SparseDtype[np.float64, nan]\r\n 'Sparse[int]' SparseDtype[np.int64, 0]\r\n 'Sparse[int, 0]' SparseDtype[np.int64, 0]\r\n ================ ============================\r\n\r\n It is not possible to specify non-default fill values\r\n with a string. An argument like ``'Sparse[int, 1]'``\r\n will raise a ``TypeError`` because the default fill value\r\n for integers is 0.\r\n\r\n Returns\r\n -------\r\n SparseDtype\r\n \"\"\"\r\n if not isinstance(string, str):\r\n raise TypeError(\r\n f\"'construct_from_string' expects a string, got {type(string)}\"\r\n )\r\n msg = f\"Cannot construct a 'SparseDtype' from '{string}'\"\r\n if string.startswith(\"Sparse\"):\r\n try:\r\n sub_type, has_fill_value = cls._parse_subtype(string)\r\n except ValueError as err:\r\n raise TypeError(msg) from err\r\n else:\r\n result = SparseDtype(sub_type)\r\n msg = (\r\n f\"Cannot construct a 'SparseDtype' from '{string}'.\\n\\nIt \"\r\n \"looks like the fill_value in the string is not \"\r\n \"the default for the dtype. Non-default fill_values \"\r\n \"are not supported. Use the 'SparseDtype()' \"\r\n \"constructor instead.\"\r\n )\r\n if has_fill_value and str(result) != string:\r\n raise TypeError(msg)\r\n return result\r\n else:\r\n raise TypeError(msg)\r\n\r\n @staticmethod\r\n def _parse_subtype(dtype: str) -> Tuple[str, bool]:\r\n \"\"\"\r\n Parse a string to get the subtype\r\n\r\n Parameters\r\n ----------\r\n dtype : str\r\n A string like\r\n\r\n * Sparse[subtype]\r\n * Sparse[subtype, fill_value]\r\n\r\n Returns\r\n -------\r\n subtype : str\r\n\r\n Raises\r\n ------\r\n ValueError\r\n When the subtype cannot be extracted.\r\n \"\"\"\r\n xpr = re.compile(r\"Sparse\\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\\]$\")\r\n m = xpr.match(dtype)\r\n has_fill_value = False\r\n if m:\r\n subtype = m.groupdict()[\"subtype\"]\r\n has_fill_value = bool(m.groupdict()[\"fill_value\"])\r\n elif dtype == \"Sparse\":\r\n subtype = \"float64\"\r\n else:\r\n raise ValueError(f\"Cannot parse {dtype}\")\r\n return subtype, has_fill_value\r\n\r\n @classmethod\r\n def is_dtype(cls, dtype: object) -> bool:\r\n dtype = getattr(dtype, \"dtype\", dtype)\r\n if isinstance(dtype, str) and dtype.startswith(\"Sparse\"):\r\n sub_type, _ = cls._parse_subtype(dtype)\r\n dtype = np.dtype(sub_type)\r\n elif isinstance(dtype, cls):\r\n return True\r\n return isinstance(dtype, np.dtype) or dtype == \"Sparse\"\r\n\r\n def update_dtype(self, dtype):\r\n \"\"\"\r\n Convert the SparseDtype to a new dtype.\r\n\r\n This takes care of converting the ``fill_value``.\r\n\r\n Parameters\r\n ----------\r\n dtype : Union[str, numpy.dtype, SparseDtype]\r\n The new dtype to use.\r\n\r\n * For a SparseDtype, it is simply returned\r\n * For a NumPy dtype (or str), the current fill value\r\n is converted to the new dtype, and a SparseDtype\r\n with `dtype` and the new fill value is returned.\r\n\r\n Returns\r\n -------\r\n SparseDtype\r\n A new SparseDtype with the correct `dtype` and fill value\r\n for that `dtype`.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n When the current fill value cannot be converted to the\r\n new `dtype` (e.g. trying to convert ``np.nan`` to an\r\n integer dtype).\r\n\r\n\r\n Examples\r\n --------\r\n >>> SparseDtype(int, 0).update_dtype(float)\r\n Sparse[float64, 0.0]\r\n\r\n >>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))\r\n Sparse[float64, nan]\r\n \"\"\"\r\n cls = type(self)\r\n dtype = pandas_dtype(dtype)\r\n\r\n if not isinstance(dtype, cls):\r\n if is_extension_array_dtype(dtype):\r\n raise TypeError(\"sparse arrays of extension dtypes not supported\")\r\n\r\n fill_value = astype_nansafe(np.array(self.fill_value), dtype).item()\r\n dtype = cls(dtype, fill_value=fill_value)\r\n\r\n return dtype\r\n\r\n @property\r\n def _subtype_with_str(self):\r\n \"\"\"\r\n Whether the SparseDtype's subtype should be considered ``str``.\r\n\r\n Typically, pandas will store string data in an object-dtype array.\r\n When converting values to a dtype, e.g. in ``.astype``, we need to\r\n be more specific, we need the actual underlying type.\r\n\r\n Returns\r\n -------\r\n >>> SparseDtype(int, 1)._subtype_with_str\r\n dtype('int64')\r\n\r\n >>> SparseDtype(object, 1)._subtype_with_str\r\n dtype('O')\r\n\r\n >>> dtype = SparseDtype(str, '')\r\n >>> dtype.subtype\r\n dtype('O')\r\n\r\n >>> dtype._subtype_with_str\r\n <class 'str'>\r\n \"\"\"\r\n if isinstance(self.fill_value, str):\r\n return type(self.fill_value)\r\n return self.subtype\r\n\r\n def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:\r\n # TODO for now only handle SparseDtypes and numpy dtypes => extend\r\n # with other compatibtle extension dtypes\r\n if any(\r\n isinstance(x, ExtensionDtype) and not isinstance(x, SparseDtype)\r\n for x in dtypes\r\n ):\r\n return None\r\n\r\n fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)]\r\n fill_value = fill_values[0]\r\n\r\n # np.nan isn't a singleton, so we may end up with multiple\r\n # NaNs here, so we ignore tha all NA case too.\r\n if not (len(set(fill_values)) == 1 or isna(fill_values).all()):\r\n warnings.warn(\r\n \"Concatenating sparse arrays with multiple fill \"\r\n f\"values: '{fill_values}'. Picking the first and \"\r\n \"converting the rest.\",\r\n PerformanceWarning,\r\n stacklevel=6,\r\n )\r\n\r\n np_dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes]\r\n return SparseDtype(np.find_common_type(np_dtypes, []), fill_value=fill_value)\r\n", "import numpy as np\r\n\r\nfrom pandas import Series, Timestamp, date_range\r\nimport pandas._testing as tm\r\nfrom pandas.api.types import is_scalar\r\n\r\n\r\nclass TestSeriesSearchSorted:\r\n def test_searchsorted(self):\r\n ser = Series([1, 2, 3])\r\n\r\n result = ser.searchsorted(1, side=\"left\")\r\n assert is_scalar(result)\r\n assert result == 0\r\n\r\n result = ser.searchsorted(1, side=\"right\")\r\n assert is_scalar(result)\r\n assert result == 1\r\n\r\n def test_searchsorted_numeric_dtypes_scalar(self):\r\n ser = Series([1, 2, 90, 1000, 3e9])\r\n res = ser.searchsorted(30)\r\n assert is_scalar(res)\r\n assert res == 2\r\n\r\n res = ser.searchsorted([30])\r\n exp = np.array([2], dtype=np.intp)\r\n tm.assert_numpy_array_equal(res, exp)\r\n\r\n def test_searchsorted_numeric_dtypes_vector(self):\r\n ser = Series([1, 2, 90, 1000, 3e9])\r\n res = ser.searchsorted([91, 2e6])\r\n exp = np.array([3, 4], dtype=np.intp)\r\n tm.assert_numpy_array_equal(res, exp)\r\n\r\n def test_searchsorted_datetime64_scalar(self):\r\n ser = Series(date_range(\"20120101\", periods=10, freq=\"2D\"))\r\n val = Timestamp(\"20120102\")\r\n res = ser.searchsorted(val)\r\n assert is_scalar(res)\r\n assert res == 1\r\n\r\n def test_searchsorted_datetime64_scalar_mixed_timezones(self):\r\n # GH 30086\r\n ser = Series(date_range(\"20120101\", periods=10, freq=\"2D\", tz=\"UTC\"))\r\n val = Timestamp(\"20120102\", tz=\"America/New_York\")\r\n res = ser.searchsorted(val)\r\n assert is_scalar(res)\r\n assert res == 1\r\n\r\n def test_searchsorted_datetime64_list(self):\r\n ser = Series(date_range(\"20120101\", periods=10, freq=\"2D\"))\r\n vals = [Timestamp(\"20120102\"), Timestamp(\"20120104\")]\r\n res = ser.searchsorted(vals)\r\n exp = np.array([1, 2], dtype=np.intp)\r\n tm.assert_numpy_array_equal(res, exp)\r\n\r\n def test_searchsorted_sorter(self):\r\n # GH8490\r\n ser = Series([3, 1, 2])\r\n res = ser.searchsorted([0, 3], sorter=np.argsort(ser))\r\n exp = np.array([0, 2], dtype=np.intp)\r\n tm.assert_numpy_array_equal(res, exp)\r\n", "import numpy as np\r\nimport pytest\r\n\r\nfrom pandas.core.dtypes.common import ensure_platform_int\r\n\r\nimport pandas as pd\r\nfrom pandas import Float64Index, Index, Int64Index, RangeIndex\r\nimport pandas._testing as tm\r\n\r\nfrom ..test_numeric import Numeric\r\n\r\n# aliases to make some tests easier to read\r\nRI = RangeIndex\r\nI64 = Int64Index\r\nF64 = Float64Index\r\nOI = Index\r\n\r\n\r\nclass TestRangeIndex(Numeric):\r\n _holder = RangeIndex\r\n _compat_props = [\"shape\", \"ndim\", \"size\"]\r\n\r\n @pytest.fixture(\r\n params=[\r\n RangeIndex(start=0, stop=20, step=2, name=\"foo\"),\r\n RangeIndex(start=18, stop=-1, step=-2, name=\"bar\"),\r\n ],\r\n ids=[\"index_inc\", \"index_dec\"],\r\n )\r\n def index(self, request):\r\n return request.param\r\n\r\n def create_index(self) -> RangeIndex:\r\n return RangeIndex(start=0, stop=20, step=2)\r\n\r\n def test_can_hold_identifiers(self):\r\n idx = self.create_index()\r\n key = idx[0]\r\n assert idx._can_hold_identifiers_and_holds_name(key) is False\r\n\r\n def test_too_many_names(self):\r\n index = self.create_index()\r\n with pytest.raises(ValueError, match=\"^Length\"):\r\n index.names = [\"roger\", \"harold\"]\r\n\r\n @pytest.mark.parametrize(\r\n \"index, start, stop, step\",\r\n [\r\n (RangeIndex(5), 0, 5, 1),\r\n (RangeIndex(0, 5), 0, 5, 1),\r\n (RangeIndex(5, step=2), 0, 5, 2),\r\n (RangeIndex(1, 5, 2), 1, 5, 2),\r\n ],\r\n )\r\n def test_start_stop_step_attrs(self, index, start, stop, step):\r\n # GH 25710\r\n assert index.start == start\r\n assert index.stop == stop\r\n assert index.step == step\r\n\r\n @pytest.mark.parametrize(\"attr_name\", [\"_start\", \"_stop\", \"_step\"])\r\n def test_deprecated_start_stop_step_attrs(self, attr_name):\r\n # GH 26581\r\n idx = self.create_index()\r\n with tm.assert_produces_warning(FutureWarning):\r\n getattr(idx, attr_name)\r\n\r\n def test_copy(self):\r\n i = RangeIndex(5, name=\"Foo\")\r\n i_copy = i.copy()\r\n assert i_copy is not i\r\n assert i_copy.identical(i)\r\n assert i_copy._range == range(0, 5, 1)\r\n assert i_copy.name == \"Foo\"\r\n\r\n def test_repr(self):\r\n i = RangeIndex(5, name=\"Foo\")\r\n result = repr(i)\r\n expected = \"RangeIndex(start=0, stop=5, step=1, name='Foo')\"\r\n assert result == expected\r\n\r\n result = eval(result)\r\n tm.assert_index_equal(result, i, exact=True)\r\n\r\n i = RangeIndex(5, 0, -1)\r\n result = repr(i)\r\n expected = \"RangeIndex(start=5, stop=0, step=-1)\"\r\n assert result == expected\r\n\r\n result = eval(result)\r\n tm.assert_index_equal(result, i, exact=True)\r\n\r\n def test_insert(self):\r\n\r\n idx = RangeIndex(5, name=\"Foo\")\r\n result = idx[1:4]\r\n\r\n # test 0th element\r\n tm.assert_index_equal(idx[0:4], result.insert(0, idx[0]))\r\n\r\n # GH 18295 (test missing)\r\n expected = Float64Index([0, np.nan, 1, 2, 3, 4])\r\n for na in (np.nan, pd.NaT, None):\r\n result = RangeIndex(5).insert(1, na)\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_delete(self):\r\n\r\n idx = RangeIndex(5, name=\"Foo\")\r\n expected = idx[1:].astype(int)\r\n result = idx.delete(0)\r\n tm.assert_index_equal(result, expected)\r\n assert result.name == expected.name\r\n\r\n expected = idx[:-1].astype(int)\r\n result = idx.delete(-1)\r\n tm.assert_index_equal(result, expected)\r\n assert result.name == expected.name\r\n\r\n msg = \"index 5 is out of bounds for axis 0 with size 5\"\r\n with pytest.raises((IndexError, ValueError), match=msg):\r\n # either depending on numpy version\r\n result = idx.delete(len(idx))\r\n\r\n def test_view(self):\r\n i = RangeIndex(0, name=\"Foo\")\r\n i_view = i.view()\r\n assert i_view.name == \"Foo\"\r\n\r\n i_view = i.view(\"i8\")\r\n tm.assert_numpy_array_equal(i.values, i_view)\r\n\r\n i_view = i.view(RangeIndex)\r\n tm.assert_index_equal(i, i_view)\r\n\r\n def test_dtype(self):\r\n index = self.create_index()\r\n assert index.dtype == np.int64\r\n\r\n def test_cached_data(self):\r\n # GH 26565, GH26617\r\n # Calling RangeIndex._data caches an int64 array of the same length at\r\n # self._cached_data. This test checks whether _cached_data has been set\r\n idx = RangeIndex(0, 100, 10)\r\n\r\n assert idx._cached_data is None\r\n\r\n repr(idx)\r\n assert idx._cached_data is None\r\n\r\n str(idx)\r\n assert idx._cached_data is None\r\n\r\n idx.get_loc(20)\r\n assert idx._cached_data is None\r\n\r\n 90 in idx\r\n assert idx._cached_data is None\r\n\r\n 91 in idx\r\n assert idx._cached_data is None\r\n\r\n idx.all()\r\n assert idx._cached_data is None\r\n\r\n idx.any()\r\n assert idx._cached_data is None\r\n\r\n idx.format()\r\n assert idx._cache == {}\r\n\r\n df = pd.DataFrame({\"a\": range(10)}, index=idx)\r\n\r\n str(df)\r\n assert idx._cache == {}\r\n\r\n df.loc[50]\r\n assert idx._cached_data is None\r\n\r\n with pytest.raises(KeyError, match=\"51\"):\r\n df.loc[51]\r\n assert idx._cached_data is None\r\n\r\n df.loc[10:50]\r\n assert idx._cached_data is None\r\n\r\n df.iloc[5:10]\r\n assert idx._cached_data is None\r\n\r\n # actually calling idx._data\r\n assert isinstance(idx._data, np.ndarray)\r\n assert isinstance(idx._cached_data, np.ndarray)\r\n\r\n def test_is_monotonic(self):\r\n index = RangeIndex(0, 20, 2)\r\n assert index.is_monotonic is True\r\n assert index.is_monotonic_increasing is True\r\n assert index.is_monotonic_decreasing is False\r\n assert index._is_strictly_monotonic_increasing is True\r\n assert index._is_strictly_monotonic_decreasing is False\r\n\r\n index = RangeIndex(4, 0, -1)\r\n assert index.is_monotonic is False\r\n assert index._is_strictly_monotonic_increasing is False\r\n assert index.is_monotonic_decreasing is True\r\n assert index._is_strictly_monotonic_decreasing is True\r\n\r\n index = RangeIndex(1, 2)\r\n assert index.is_monotonic is True\r\n assert index.is_monotonic_increasing is True\r\n assert index.is_monotonic_decreasing is True\r\n assert index._is_strictly_monotonic_increasing is True\r\n assert index._is_strictly_monotonic_decreasing is True\r\n\r\n index = RangeIndex(2, 1)\r\n assert index.is_monotonic is True\r\n assert index.is_monotonic_increasing is True\r\n assert index.is_monotonic_decreasing is True\r\n assert index._is_strictly_monotonic_increasing is True\r\n assert index._is_strictly_monotonic_decreasing is True\r\n\r\n index = RangeIndex(1, 1)\r\n assert index.is_monotonic is True\r\n assert index.is_monotonic_increasing is True\r\n assert index.is_monotonic_decreasing is True\r\n assert index._is_strictly_monotonic_increasing is True\r\n assert index._is_strictly_monotonic_decreasing is True\r\n\r\n def test_equals_range(self):\r\n equiv_pairs = [\r\n (RangeIndex(0, 9, 2), RangeIndex(0, 10, 2)),\r\n (RangeIndex(0), RangeIndex(1, -1, 3)),\r\n (RangeIndex(1, 2, 3), RangeIndex(1, 3, 4)),\r\n (RangeIndex(0, -9, -2), RangeIndex(0, -10, -2)),\r\n ]\r\n for left, right in equiv_pairs:\r\n assert left.equals(right)\r\n assert right.equals(left)\r\n\r\n def test_logical_compat(self):\r\n idx = self.create_index()\r\n assert idx.all() == idx.values.all()\r\n assert idx.any() == idx.values.any()\r\n\r\n def test_identical(self):\r\n index = self.create_index()\r\n i = Index(index.copy())\r\n assert i.identical(index)\r\n\r\n # we don't allow object dtype for RangeIndex\r\n if isinstance(index, RangeIndex):\r\n return\r\n\r\n same_values_different_type = Index(i, dtype=object)\r\n assert not i.identical(same_values_different_type)\r\n\r\n i = index.copy(dtype=object)\r\n i = i.rename(\"foo\")\r\n same_values = Index(i, dtype=object)\r\n assert same_values.identical(index.copy(dtype=object))\r\n\r\n assert not i.identical(index)\r\n assert Index(same_values, name=\"foo\", dtype=object).identical(i)\r\n\r\n assert not index.copy(dtype=object).identical(index.copy(dtype=\"int64\"))\r\n\r\n def test_nbytes(self):\r\n\r\n # memory savings vs int index\r\n i = RangeIndex(0, 1000)\r\n assert i.nbytes < i._int64index.nbytes / 10\r\n\r\n # constant memory usage\r\n i2 = RangeIndex(0, 10)\r\n assert i.nbytes == i2.nbytes\r\n\r\n @pytest.mark.parametrize(\r\n \"start,stop,step\",\r\n [\r\n # can't\r\n (\"foo\", \"bar\", \"baz\"),\r\n # shouldn't\r\n (\"0\", \"1\", \"2\"),\r\n ],\r\n )\r\n def test_cant_or_shouldnt_cast(self, start, stop, step):\r\n msg = f\"Wrong type {type(start)} for value {start}\"\r\n with pytest.raises(TypeError, match=msg):\r\n RangeIndex(start, stop, step)\r\n\r\n def test_view_index(self):\r\n index = self.create_index()\r\n index.view(Index)\r\n\r\n def test_prevent_casting(self):\r\n index = self.create_index()\r\n result = index.astype(\"O\")\r\n assert result.dtype == np.object_\r\n\r\n def test_repr_roundtrip(self):\r\n index = self.create_index()\r\n tm.assert_index_equal(eval(repr(index)), index)\r\n\r\n def test_slice_keep_name(self):\r\n idx = RangeIndex(1, 2, name=\"asdf\")\r\n assert idx.name == idx[1:].name\r\n\r\n def test_explicit_conversions(self):\r\n\r\n # GH 8608\r\n # add/sub are overridden explicitly for Float/Int Index\r\n idx = RangeIndex(5)\r\n\r\n # float conversions\r\n arr = np.arange(5, dtype=\"int64\") * 3.2\r\n expected = Float64Index(arr)\r\n fidx = idx * 3.2\r\n tm.assert_index_equal(fidx, expected)\r\n fidx = 3.2 * idx\r\n tm.assert_index_equal(fidx, expected)\r\n\r\n # interops with numpy arrays\r\n expected = Float64Index(arr)\r\n a = np.zeros(5, dtype=\"float64\")\r\n result = fidx - a\r\n tm.assert_index_equal(result, expected)\r\n\r\n expected = Float64Index(-arr)\r\n a = np.zeros(5, dtype=\"float64\")\r\n result = a - fidx\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_has_duplicates(self, index):\r\n assert index.is_unique\r\n assert not index.has_duplicates\r\n\r\n def test_extended_gcd(self):\r\n index = self.create_index()\r\n result = index._extended_gcd(6, 10)\r\n assert result[0] == result[1] * 6 + result[2] * 10\r\n assert 2 == result[0]\r\n\r\n result = index._extended_gcd(10, 6)\r\n assert 2 == result[1] * 10 + result[2] * 6\r\n assert 2 == result[0]\r\n\r\n def test_min_fitting_element(self):\r\n result = RangeIndex(0, 20, 2)._min_fitting_element(1)\r\n assert 2 == result\r\n\r\n result = RangeIndex(1, 6)._min_fitting_element(1)\r\n assert 1 == result\r\n\r\n result = RangeIndex(18, -2, -2)._min_fitting_element(1)\r\n assert 2 == result\r\n\r\n result = RangeIndex(5, 0, -1)._min_fitting_element(1)\r\n assert 1 == result\r\n\r\n big_num = 500000000000000000000000\r\n\r\n result = RangeIndex(5, big_num * 2, 1)._min_fitting_element(big_num)\r\n assert big_num == result\r\n\r\n def test_max_fitting_element(self):\r\n result = RangeIndex(0, 20, 2)._max_fitting_element(17)\r\n assert 16 == result\r\n\r\n result = RangeIndex(1, 6)._max_fitting_element(4)\r\n assert 4 == result\r\n\r\n result = RangeIndex(18, -2, -2)._max_fitting_element(17)\r\n assert 16 == result\r\n\r\n result = RangeIndex(5, 0, -1)._max_fitting_element(4)\r\n assert 4 == result\r\n\r\n big_num = 500000000000000000000000\r\n\r\n result = RangeIndex(5, big_num * 2, 1)._max_fitting_element(big_num)\r\n assert big_num == result\r\n\r\n def test_pickle_compat_construction(self):\r\n # RangeIndex() is a valid constructor\r\n pass\r\n\r\n def test_slice_specialised(self):\r\n index = self.create_index()\r\n index.name = \"foo\"\r\n\r\n # scalar indexing\r\n res = index[1]\r\n expected = 2\r\n assert res == expected\r\n\r\n res = index[-1]\r\n expected = 18\r\n assert res == expected\r\n\r\n # slicing\r\n # slice value completion\r\n index_slice = index[:]\r\n expected = index\r\n tm.assert_index_equal(index_slice, expected)\r\n\r\n # positive slice values\r\n index_slice = index[7:10:2]\r\n expected = Index(np.array([14, 18]), name=\"foo\")\r\n tm.assert_index_equal(index_slice, expected)\r\n\r\n # negative slice values\r\n index_slice = index[-1:-5:-2]\r\n expected = Index(np.array([18, 14]), name=\"foo\")\r\n tm.assert_index_equal(index_slice, expected)\r\n\r\n # stop overshoot\r\n index_slice = index[2:100:4]\r\n expected = Index(np.array([4, 12]), name=\"foo\")\r\n tm.assert_index_equal(index_slice, expected)\r\n\r\n # reverse\r\n index_slice = index[::-1]\r\n expected = Index(index.values[::-1], name=\"foo\")\r\n tm.assert_index_equal(index_slice, expected)\r\n\r\n index_slice = index[-8::-1]\r\n expected = Index(np.array([4, 2, 0]), name=\"foo\")\r\n tm.assert_index_equal(index_slice, expected)\r\n\r\n index_slice = index[-40::-1]\r\n expected = Index(np.array([], dtype=np.int64), name=\"foo\")\r\n tm.assert_index_equal(index_slice, expected)\r\n\r\n index_slice = index[40::-1]\r\n expected = Index(index.values[40::-1], name=\"foo\")\r\n tm.assert_index_equal(index_slice, expected)\r\n\r\n index_slice = index[10::-1]\r\n expected = Index(index.values[::-1], name=\"foo\")\r\n tm.assert_index_equal(index_slice, expected)\r\n\r\n @pytest.mark.parametrize(\"step\", set(range(-5, 6)) - {0})\r\n def test_len_specialised(self, step):\r\n # make sure that our len is the same as np.arange calc\r\n start, stop = (0, 5) if step > 0 else (5, 0)\r\n\r\n arr = np.arange(start, stop, step)\r\n index = RangeIndex(start, stop, step)\r\n assert len(index) == len(arr)\r\n\r\n index = RangeIndex(stop, start, step)\r\n assert len(index) == 0\r\n\r\n @pytest.fixture(\r\n params=[\r\n ([RI(1, 12, 5)], RI(1, 12, 5)),\r\n ([RI(0, 6, 4)], RI(0, 6, 4)),\r\n ([RI(1, 3), RI(3, 7)], RI(1, 7)),\r\n ([RI(1, 5, 2), RI(5, 6)], RI(1, 6, 2)),\r\n ([RI(1, 3, 2), RI(4, 7, 3)], RI(1, 7, 3)),\r\n ([RI(-4, 3, 2), RI(4, 7, 2)], RI(-4, 7, 2)),\r\n ([RI(-4, -8), RI(-8, -12)], RI(0, 0)),\r\n ([RI(-4, -8), RI(3, -4)], RI(0, 0)),\r\n ([RI(-4, -8), RI(3, 5)], RI(3, 5)),\r\n ([RI(-4, -2), RI(3, 5)], I64([-4, -3, 3, 4])),\r\n ([RI(-2), RI(3, 5)], RI(3, 5)),\r\n ([RI(2), RI(2)], I64([0, 1, 0, 1])),\r\n ([RI(2), RI(2, 5), RI(5, 8, 4)], RI(0, 6)),\r\n ([RI(2), RI(3, 5), RI(5, 8, 4)], I64([0, 1, 3, 4, 5])),\r\n ([RI(-2, 2), RI(2, 5), RI(5, 8, 4)], RI(-2, 6)),\r\n ([RI(3), I64([-1, 3, 15])], I64([0, 1, 2, -1, 3, 15])),\r\n ([RI(3), F64([-1, 3.1, 15.0])], F64([0, 1, 2, -1, 3.1, 15.0])),\r\n ([RI(3), OI([\"a\", None, 14])], OI([0, 1, 2, \"a\", None, 14])),\r\n ([RI(3, 1), OI([\"a\", None, 14])], OI([\"a\", None, 14])),\r\n ]\r\n )\r\n def appends(self, request):\r\n \"\"\"Inputs and expected outputs for RangeIndex.append test\"\"\"\r\n return request.param\r\n\r\n def test_append(self, appends):\r\n # GH16212\r\n\r\n indices, expected = appends\r\n\r\n result = indices[0].append(indices[1:])\r\n tm.assert_index_equal(result, expected, exact=True)\r\n\r\n if len(indices) == 2:\r\n # Append single item rather than list\r\n result2 = indices[0].append(indices[1])\r\n tm.assert_index_equal(result2, expected, exact=True)\r\n\r\n def test_engineless_lookup(self):\r\n # GH 16685\r\n # Standard lookup on RangeIndex should not require the engine to be\r\n # created\r\n idx = RangeIndex(2, 10, 3)\r\n\r\n assert idx.get_loc(5) == 1\r\n tm.assert_numpy_array_equal(\r\n idx.get_indexer([2, 8]), ensure_platform_int(np.array([0, 2]))\r\n )\r\n with pytest.raises(KeyError, match=\"3\"):\r\n idx.get_loc(3)\r\n\r\n assert \"_engine\" not in idx._cache\r\n\r\n # Different types of scalars can be excluded immediately, no need to\r\n # use the _engine\r\n with pytest.raises(KeyError, match=\"'a'\"):\r\n idx.get_loc(\"a\")\r\n\r\n assert \"_engine\" not in idx._cache\r\n\r\n def test_format_empty(self):\r\n # GH35712\r\n empty_idx = self._holder(0)\r\n assert empty_idx.format() == []\r\n assert empty_idx.format(name=True) == [\"\"]\r\n", "\"\"\"\r\nTesting for Isolation Forest algorithm (sklearn.ensemble.iforest).\r\n\"\"\"\r\n\r\n# Authors: Nicolas Goix <[email protected]>\r\n# Alexandre Gramfort <[email protected]>\r\n# License: BSD 3 clause\r\n\r\nimport pytest\r\n\r\nimport numpy as np\r\n\r\nfrom sklearn.utils._testing import assert_array_equal\r\nfrom sklearn.utils._testing import assert_array_almost_equal\r\nfrom sklearn.utils._testing import assert_raises\r\nfrom sklearn.utils._testing import assert_warns_message\r\nfrom sklearn.utils._testing import ignore_warnings\r\nfrom sklearn.utils._testing import assert_allclose\r\n\r\nfrom sklearn.model_selection import ParameterGrid\r\nfrom sklearn.ensemble import IsolationForest\r\nfrom sklearn.ensemble._iforest import _average_path_length\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.datasets import load_diabetes, load_iris\r\nfrom sklearn.utils import check_random_state\r\nfrom sklearn.metrics import roc_auc_score\r\n\r\nfrom scipy.sparse import csc_matrix, csr_matrix\r\nfrom unittest.mock import Mock, patch\r\n\r\nrng = check_random_state(0)\r\n\r\n# load the iris dataset\r\n# and randomly permute it\r\niris = load_iris()\r\nperm = rng.permutation(iris.target.size)\r\niris.data = iris.data[perm]\r\niris.target = iris.target[perm]\r\n\r\n# also load the diabetes dataset\r\n# and randomly permute it\r\ndiabetes = load_diabetes()\r\nperm = rng.permutation(diabetes.target.size)\r\ndiabetes.data = diabetes.data[perm]\r\ndiabetes.target = diabetes.target[perm]\r\n\r\n\r\ndef test_iforest():\r\n \"\"\"Check Isolation Forest for various parameter settings.\"\"\"\r\n X_train = np.array([[0, 1], [1, 2]])\r\n X_test = np.array([[2, 1], [1, 1]])\r\n\r\n grid = ParameterGrid({\"n_estimators\": [3],\r\n \"max_samples\": [0.5, 1.0, 3],\r\n \"bootstrap\": [True, False]})\r\n\r\n with ignore_warnings():\r\n for params in grid:\r\n IsolationForest(random_state=rng,\r\n **params).fit(X_train).predict(X_test)\r\n\r\n\r\ndef test_iforest_sparse():\r\n \"\"\"Check IForest for various parameter settings on sparse input.\"\"\"\r\n rng = check_random_state(0)\r\n X_train, X_test, y_train, y_test = train_test_split(diabetes.data[:50],\r\n diabetes.target[:50],\r\n random_state=rng)\r\n grid = ParameterGrid({\"max_samples\": [0.5, 1.0],\r\n \"bootstrap\": [True, False]})\r\n\r\n for sparse_format in [csc_matrix, csr_matrix]:\r\n X_train_sparse = sparse_format(X_train)\r\n X_test_sparse = sparse_format(X_test)\r\n\r\n for params in grid:\r\n # Trained on sparse format\r\n sparse_classifier = IsolationForest(\r\n n_estimators=10, random_state=1, **params).fit(X_train_sparse)\r\n sparse_results = sparse_classifier.predict(X_test_sparse)\r\n\r\n # Trained on dense format\r\n dense_classifier = IsolationForest(\r\n n_estimators=10, random_state=1, **params).fit(X_train)\r\n dense_results = dense_classifier.predict(X_test)\r\n\r\n assert_array_equal(sparse_results, dense_results)\r\n\r\n\r\ndef test_iforest_error():\r\n \"\"\"Test that it gives proper exception on deficient input.\"\"\"\r\n X = iris.data\r\n\r\n # Test max_samples\r\n assert_raises(ValueError,\r\n IsolationForest(max_samples=-1).fit, X)\r\n assert_raises(ValueError,\r\n IsolationForest(max_samples=0.0).fit, X)\r\n assert_raises(ValueError,\r\n IsolationForest(max_samples=2.0).fit, X)\r\n # The dataset has less than 256 samples, explicitly setting\r\n # max_samples > n_samples should result in a warning. If not set\r\n # explicitly there should be no warning\r\n assert_warns_message(UserWarning,\r\n \"max_samples will be set to n_samples for estimation\",\r\n IsolationForest(max_samples=1000).fit, X)\r\n # note that assert_no_warnings does not apply since it enables a\r\n # PendingDeprecationWarning triggered by scipy.sparse's use of\r\n # np.matrix. See issue #11251.\r\n with pytest.warns(None) as record:\r\n IsolationForest(max_samples='auto').fit(X)\r\n user_warnings = [each for each in record\r\n if issubclass(each.category, UserWarning)]\r\n assert len(user_warnings) == 0\r\n with pytest.warns(None) as record:\r\n IsolationForest(max_samples=np.int64(2)).fit(X)\r\n user_warnings = [each for each in record\r\n if issubclass(each.category, UserWarning)]\r\n assert len(user_warnings) == 0\r\n\r\n assert_raises(ValueError, IsolationForest(max_samples='foobar').fit, X)\r\n assert_raises(ValueError, IsolationForest(max_samples=1.5).fit, X)\r\n\r\n # test X_test n_features match X_train one:\r\n assert_raises(ValueError, IsolationForest().fit(X).predict, X[:, 1:])\r\n\r\n\r\ndef test_recalculate_max_depth():\r\n \"\"\"Check max_depth recalculation when max_samples is reset to n_samples\"\"\"\r\n X = iris.data\r\n clf = IsolationForest().fit(X)\r\n for est in clf.estimators_:\r\n assert est.max_depth == int(np.ceil(np.log2(X.shape[0])))\r\n\r\n\r\ndef test_max_samples_attribute():\r\n X = iris.data\r\n clf = IsolationForest().fit(X)\r\n assert clf.max_samples_ == X.shape[0]\r\n\r\n clf = IsolationForest(max_samples=500)\r\n assert_warns_message(UserWarning,\r\n \"max_samples will be set to n_samples for estimation\",\r\n clf.fit, X)\r\n assert clf.max_samples_ == X.shape[0]\r\n\r\n clf = IsolationForest(max_samples=0.4).fit(X)\r\n assert clf.max_samples_ == 0.4*X.shape[0]\r\n\r\n\r\ndef test_iforest_parallel_regression():\r\n \"\"\"Check parallel regression.\"\"\"\r\n rng = check_random_state(0)\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(diabetes.data,\r\n diabetes.target,\r\n random_state=rng)\r\n\r\n ensemble = IsolationForest(n_jobs=3,\r\n random_state=0).fit(X_train)\r\n\r\n ensemble.set_params(n_jobs=1)\r\n y1 = ensemble.predict(X_test)\r\n ensemble.set_params(n_jobs=2)\r\n y2 = ensemble.predict(X_test)\r\n assert_array_almost_equal(y1, y2)\r\n\r\n ensemble = IsolationForest(n_jobs=1,\r\n random_state=0).fit(X_train)\r\n\r\n y3 = ensemble.predict(X_test)\r\n assert_array_almost_equal(y1, y3)\r\n\r\n\r\ndef test_iforest_performance():\r\n \"\"\"Test Isolation Forest performs well\"\"\"\r\n\r\n # Generate train/test data\r\n rng = check_random_state(2)\r\n X = 0.3 * rng.randn(120, 2)\r\n X_train = np.r_[X + 2, X - 2]\r\n X_train = X[:100]\r\n\r\n # Generate some abnormal novel observations\r\n X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))\r\n X_test = np.r_[X[100:], X_outliers]\r\n y_test = np.array([0] * 20 + [1] * 20)\r\n\r\n # fit the model\r\n clf = IsolationForest(max_samples=100, random_state=rng).fit(X_train)\r\n\r\n # predict scores (the lower, the more normal)\r\n y_pred = - clf.decision_function(X_test)\r\n\r\n # check that there is at most 6 errors (false positive or false negative)\r\n assert roc_auc_score(y_test, y_pred) > 0.98\r\n\r\n\r\[email protected](\"contamination\", [0.25, \"auto\"])\r\ndef test_iforest_works(contamination):\r\n # toy sample (the last two samples are outliers)\r\n X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [6, 3], [-4, 7]]\r\n\r\n # Test IsolationForest\r\n clf = IsolationForest(random_state=rng, contamination=contamination)\r\n clf.fit(X)\r\n decision_func = -clf.decision_function(X)\r\n pred = clf.predict(X)\r\n # assert detect outliers:\r\n assert np.min(decision_func[-2:]) > np.max(decision_func[:-2])\r\n assert_array_equal(pred, 6 * [1] + 2 * [-1])\r\n\r\n\r\ndef test_max_samples_consistency():\r\n # Make sure validated max_samples in iforest and BaseBagging are identical\r\n X = iris.data\r\n clf = IsolationForest().fit(X)\r\n assert clf.max_samples_ == clf._max_samples\r\n\r\n\r\ndef test_iforest_subsampled_features():\r\n # It tests non-regression for #5732 which failed at predict.\r\n rng = check_random_state(0)\r\n X_train, X_test, y_train, y_test = train_test_split(diabetes.data[:50],\r\n diabetes.target[:50],\r\n random_state=rng)\r\n clf = IsolationForest(max_features=0.8)\r\n clf.fit(X_train, y_train)\r\n clf.predict(X_test)\r\n\r\n\r\ndef test_iforest_average_path_length():\r\n # It tests non-regression for #8549 which used the wrong formula\r\n # for average path length, strictly for the integer case\r\n # Updated to check average path length when input is <= 2 (issue #11839)\r\n result_one = 2.0 * (np.log(4.0) + np.euler_gamma) - 2.0 * 4.0 / 5.0\r\n result_two = 2.0 * (np.log(998.0) + np.euler_gamma) - 2.0 * 998.0 / 999.0\r\n assert_allclose(_average_path_length([0]), [0.0])\r\n assert_allclose(_average_path_length([1]), [0.0])\r\n assert_allclose(_average_path_length([2]), [1.0])\r\n assert_allclose(_average_path_length([5]), [result_one])\r\n assert_allclose(_average_path_length([999]), [result_two])\r\n assert_allclose(\r\n _average_path_length(np.array([1, 2, 5, 999])),\r\n [0.0, 1.0, result_one, result_two],\r\n )\r\n # _average_path_length is increasing\r\n avg_path_length = _average_path_length(np.arange(5))\r\n assert_array_equal(avg_path_length, np.sort(avg_path_length))\r\n\r\n\r\ndef test_score_samples():\r\n X_train = [[1, 1], [1, 2], [2, 1]]\r\n clf1 = IsolationForest(contamination=0.1).fit(X_train)\r\n clf2 = IsolationForest().fit(X_train)\r\n assert_array_equal(clf1.score_samples([[2., 2.]]),\r\n clf1.decision_function([[2., 2.]]) + clf1.offset_)\r\n assert_array_equal(clf2.score_samples([[2., 2.]]),\r\n clf2.decision_function([[2., 2.]]) + clf2.offset_)\r\n assert_array_equal(clf1.score_samples([[2., 2.]]),\r\n clf2.score_samples([[2., 2.]]))\r\n\r\n\r\ndef test_iforest_warm_start():\r\n \"\"\"Test iterative addition of iTrees to an iForest \"\"\"\r\n\r\n rng = check_random_state(0)\r\n X = rng.randn(20, 2)\r\n\r\n # fit first 10 trees\r\n clf = IsolationForest(n_estimators=10, max_samples=20,\r\n random_state=rng, warm_start=True)\r\n clf.fit(X)\r\n # remember the 1st tree\r\n tree_1 = clf.estimators_[0]\r\n # fit another 10 trees\r\n clf.set_params(n_estimators=20)\r\n clf.fit(X)\r\n # expecting 20 fitted trees and no overwritten trees\r\n assert len(clf.estimators_) == 20\r\n assert clf.estimators_[0] is tree_1\r\n\r\n\r\n# mock get_chunk_n_rows to actually test more than one chunk (here one\r\n# chunk = 3 rows:\r\n@patch(\r\n \"sklearn.ensemble._iforest.get_chunk_n_rows\",\r\n side_effect=Mock(**{\"return_value\": 3}),\r\n)\r\[email protected](\r\n \"contamination, n_predict_calls\", [(0.25, 3), (\"auto\", 2)]\r\n)\r\ndef test_iforest_chunks_works1(\r\n mocked_get_chunk, contamination, n_predict_calls\r\n):\r\n test_iforest_works(contamination)\r\n assert mocked_get_chunk.call_count == n_predict_calls\r\n\r\n\r\n# idem with chunk_size = 5 rows\r\n@patch(\r\n \"sklearn.ensemble._iforest.get_chunk_n_rows\",\r\n side_effect=Mock(**{\"return_value\": 10}),\r\n)\r\[email protected](\r\n \"contamination, n_predict_calls\", [(0.25, 3), (\"auto\", 2)]\r\n)\r\ndef test_iforest_chunks_works2(\r\n mocked_get_chunk, contamination, n_predict_calls\r\n):\r\n test_iforest_works(contamination)\r\n assert mocked_get_chunk.call_count == n_predict_calls\r\n\r\n\r\ndef test_iforest_with_uniform_data():\r\n \"\"\"Test whether iforest predicts inliers when using uniform data\"\"\"\r\n\r\n # 2-d array of all 1s\r\n X = np.ones((100, 10))\r\n iforest = IsolationForest()\r\n iforest.fit(X)\r\n\r\n rng = np.random.RandomState(0)\r\n\r\n assert all(iforest.predict(X) == 1)\r\n assert all(iforest.predict(rng.randn(100, 10)) == 1)\r\n assert all(iforest.predict(X + 1) == 1)\r\n assert all(iforest.predict(X - 1) == 1)\r\n\r\n # 2-d array where columns contain the same value across rows\r\n X = np.repeat(rng.randn(1, 10), 100, 0)\r\n iforest = IsolationForest()\r\n iforest.fit(X)\r\n\r\n assert all(iforest.predict(X) == 1)\r\n assert all(iforest.predict(rng.randn(100, 10)) == 1)\r\n assert all(iforest.predict(np.ones((100, 10))) == 1)\r\n\r\n # Single row\r\n X = rng.randn(1, 10)\r\n iforest = IsolationForest()\r\n iforest.fit(X)\r\n\r\n assert all(iforest.predict(X) == 1)\r\n assert all(iforest.predict(rng.randn(100, 10)) == 1)\r\n assert all(iforest.predict(np.ones((100, 10))) == 1)\r\n", "import numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nfrom pandas import DataFrame, Series, date_range\r\nimport pandas._testing as tm\r\n\r\n\r\nclass TestDataFrameRound:\r\n def test_round(self):\r\n # GH#2665\r\n\r\n # Test that rounding an empty DataFrame does nothing\r\n df = DataFrame()\r\n tm.assert_frame_equal(df, df.round())\r\n\r\n # Here's the test frame we'll be working with\r\n df = DataFrame({\"col1\": [1.123, 2.123, 3.123], \"col2\": [1.234, 2.234, 3.234]})\r\n\r\n # Default round to integer (i.e. decimals=0)\r\n expected_rounded = DataFrame({\"col1\": [1.0, 2.0, 3.0], \"col2\": [1.0, 2.0, 3.0]})\r\n tm.assert_frame_equal(df.round(), expected_rounded)\r\n\r\n # Round with an integer\r\n decimals = 2\r\n expected_rounded = DataFrame(\r\n {\"col1\": [1.12, 2.12, 3.12], \"col2\": [1.23, 2.23, 3.23]}\r\n )\r\n tm.assert_frame_equal(df.round(decimals), expected_rounded)\r\n\r\n # This should also work with np.round (since np.round dispatches to\r\n # df.round)\r\n tm.assert_frame_equal(np.round(df, decimals), expected_rounded)\r\n\r\n # Round with a list\r\n round_list = [1, 2]\r\n msg = \"decimals must be an integer, a dict-like or a Series\"\r\n with pytest.raises(TypeError, match=msg):\r\n df.round(round_list)\r\n\r\n # Round with a dictionary\r\n expected_rounded = DataFrame(\r\n {\"col1\": [1.1, 2.1, 3.1], \"col2\": [1.23, 2.23, 3.23]}\r\n )\r\n round_dict = {\"col1\": 1, \"col2\": 2}\r\n tm.assert_frame_equal(df.round(round_dict), expected_rounded)\r\n\r\n # Incomplete dict\r\n expected_partially_rounded = DataFrame(\r\n {\"col1\": [1.123, 2.123, 3.123], \"col2\": [1.2, 2.2, 3.2]}\r\n )\r\n partial_round_dict = {\"col2\": 1}\r\n tm.assert_frame_equal(df.round(partial_round_dict), expected_partially_rounded)\r\n\r\n # Dict with unknown elements\r\n wrong_round_dict = {\"col3\": 2, \"col2\": 1}\r\n tm.assert_frame_equal(df.round(wrong_round_dict), expected_partially_rounded)\r\n\r\n # float input to `decimals`\r\n non_int_round_dict = {\"col1\": 1, \"col2\": 0.5}\r\n msg = \"integer argument expected, got float\"\r\n with pytest.raises(TypeError, match=msg):\r\n df.round(non_int_round_dict)\r\n\r\n # String input\r\n non_int_round_dict = {\"col1\": 1, \"col2\": \"foo\"}\r\n msg = r\"an integer is required \\(got type str\\)\"\r\n with pytest.raises(TypeError, match=msg):\r\n df.round(non_int_round_dict)\r\n\r\n non_int_round_Series = Series(non_int_round_dict)\r\n with pytest.raises(TypeError, match=msg):\r\n df.round(non_int_round_Series)\r\n\r\n # List input\r\n non_int_round_dict = {\"col1\": 1, \"col2\": [1, 2]}\r\n msg = r\"an integer is required \\(got type list\\)\"\r\n with pytest.raises(TypeError, match=msg):\r\n df.round(non_int_round_dict)\r\n\r\n non_int_round_Series = Series(non_int_round_dict)\r\n with pytest.raises(TypeError, match=msg):\r\n df.round(non_int_round_Series)\r\n\r\n # Non integer Series inputs\r\n non_int_round_Series = Series(non_int_round_dict)\r\n with pytest.raises(TypeError, match=msg):\r\n df.round(non_int_round_Series)\r\n\r\n non_int_round_Series = Series(non_int_round_dict)\r\n with pytest.raises(TypeError, match=msg):\r\n df.round(non_int_round_Series)\r\n\r\n # Negative numbers\r\n negative_round_dict = {\"col1\": -1, \"col2\": -2}\r\n big_df = df * 100\r\n expected_neg_rounded = DataFrame(\r\n {\"col1\": [110.0, 210, 310], \"col2\": [100.0, 200, 300]}\r\n )\r\n tm.assert_frame_equal(big_df.round(negative_round_dict), expected_neg_rounded)\r\n\r\n # nan in Series round\r\n nan_round_Series = Series({\"col1\": np.nan, \"col2\": 1})\r\n\r\n msg = \"integer argument expected, got float\"\r\n with pytest.raises(TypeError, match=msg):\r\n df.round(nan_round_Series)\r\n\r\n # Make sure this doesn't break existing Series.round\r\n tm.assert_series_equal(df[\"col1\"].round(1), expected_rounded[\"col1\"])\r\n\r\n # named columns\r\n # GH#11986\r\n decimals = 2\r\n expected_rounded = DataFrame(\r\n {\"col1\": [1.12, 2.12, 3.12], \"col2\": [1.23, 2.23, 3.23]}\r\n )\r\n df.columns.name = \"cols\"\r\n expected_rounded.columns.name = \"cols\"\r\n tm.assert_frame_equal(df.round(decimals), expected_rounded)\r\n\r\n # interaction of named columns & series\r\n tm.assert_series_equal(df[\"col1\"].round(decimals), expected_rounded[\"col1\"])\r\n tm.assert_series_equal(df.round(decimals)[\"col1\"], expected_rounded[\"col1\"])\r\n\r\n def test_round_numpy(self):\r\n # GH#12600\r\n df = DataFrame([[1.53, 1.36], [0.06, 7.01]])\r\n out = np.round(df, decimals=0)\r\n expected = DataFrame([[2.0, 1.0], [0.0, 7.0]])\r\n tm.assert_frame_equal(out, expected)\r\n\r\n msg = \"the 'out' parameter is not supported\"\r\n with pytest.raises(ValueError, match=msg):\r\n np.round(df, decimals=0, out=df)\r\n\r\n def test_round_numpy_with_nan(self):\r\n # See GH#14197\r\n df = Series([1.53, np.nan, 0.06]).to_frame()\r\n with tm.assert_produces_warning(None):\r\n result = df.round()\r\n expected = Series([2.0, np.nan, 0.0]).to_frame()\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_round_mixed_type(self):\r\n # GH#11885\r\n df = DataFrame(\r\n {\r\n \"col1\": [1.1, 2.2, 3.3, 4.4],\r\n \"col2\": [\"1\", \"a\", \"c\", \"f\"],\r\n \"col3\": date_range(\"20111111\", periods=4),\r\n }\r\n )\r\n round_0 = DataFrame(\r\n {\r\n \"col1\": [1.0, 2.0, 3.0, 4.0],\r\n \"col2\": [\"1\", \"a\", \"c\", \"f\"],\r\n \"col3\": date_range(\"20111111\", periods=4),\r\n }\r\n )\r\n tm.assert_frame_equal(df.round(), round_0)\r\n tm.assert_frame_equal(df.round(1), df)\r\n tm.assert_frame_equal(df.round({\"col1\": 1}), df)\r\n tm.assert_frame_equal(df.round({\"col1\": 0}), round_0)\r\n tm.assert_frame_equal(df.round({\"col1\": 0, \"col2\": 1}), round_0)\r\n tm.assert_frame_equal(df.round({\"col3\": 1}), df)\r\n\r\n def test_round_with_duplicate_columns(self):\r\n # GH#11611\r\n\r\n df = pd.DataFrame(\r\n np.random.random([3, 3]),\r\n columns=[\"A\", \"B\", \"C\"],\r\n index=[\"first\", \"second\", \"third\"],\r\n )\r\n\r\n dfs = pd.concat((df, df), axis=1)\r\n rounded = dfs.round()\r\n tm.assert_index_equal(rounded.index, dfs.index)\r\n\r\n decimals = pd.Series([1, 0, 2], index=[\"A\", \"B\", \"A\"])\r\n msg = \"Index of decimals must be unique\"\r\n with pytest.raises(ValueError, match=msg):\r\n df.round(decimals)\r\n\r\n def test_round_builtin(self):\r\n # GH#11763\r\n # Here's the test frame we'll be working with\r\n df = DataFrame({\"col1\": [1.123, 2.123, 3.123], \"col2\": [1.234, 2.234, 3.234]})\r\n\r\n # Default round to integer (i.e. decimals=0)\r\n expected_rounded = DataFrame({\"col1\": [1.0, 2.0, 3.0], \"col2\": [1.0, 2.0, 3.0]})\r\n tm.assert_frame_equal(round(df), expected_rounded)\r\n\r\n def test_round_nonunique_categorical(self):\r\n # See GH#21809\r\n idx = pd.CategoricalIndex([\"low\"] * 3 + [\"hi\"] * 3)\r\n df = pd.DataFrame(np.random.rand(6, 3), columns=list(\"abc\"))\r\n\r\n expected = df.round(3)\r\n expected.index = idx\r\n\r\n df_categorical = df.copy().set_index(idx)\r\n assert df_categorical.shape == (6, 3)\r\n result = df_categorical.round(3)\r\n assert result.shape == (6, 3)\r\n\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_round_interval_category_columns(self):\r\n # GH#30063\r\n columns = pd.CategoricalIndex(pd.interval_range(0, 2))\r\n df = DataFrame([[0.66, 1.1], [0.3, 0.25]], columns=columns)\r\n\r\n result = df.round()\r\n expected = DataFrame([[1.0, 1.0], [0.0, 0.0]], columns=columns)\r\n tm.assert_frame_equal(result, expected)\r\n", "\"\"\"\r\nSequential feature selection\r\n\"\"\"\r\nimport numbers\r\n\r\nimport numpy as np\r\n\r\nfrom ._base import SelectorMixin\r\nfrom ..base import BaseEstimator, MetaEstimatorMixin, clone\r\nfrom ..utils._tags import _safe_tags\r\nfrom ..utils.validation import check_is_fitted\r\nfrom ..model_selection import cross_val_score\r\n\r\n\r\nclass SequentialFeatureSelector(SelectorMixin, MetaEstimatorMixin,\r\n BaseEstimator):\r\n \"\"\"Transformer that performs Sequential Feature Selection.\r\n\r\n This Sequential Feature Selector adds (forward selection) or\r\n removes (backward selection) features to form a feature subset in a\r\n greedy fashion. At each stage, this estimator chooses the best feature to\r\n add or remove based on the cross-validation score of an estimator.\r\n\r\n Read more in the :ref:`User Guide <sequential_feature_selection>`.\r\n\r\n .. versionadded:: 0.24\r\n\r\n Parameters\r\n ----------\r\n estimator : estimator instance\r\n An unfitted estimator.\r\n\r\n n_features_to_select : int or float, default=None\r\n The number of features to select. If `None`, half of the features are\r\n selected. If integer, the parameter is the absolute number of features\r\n to select. If float between 0 and 1, it is the fraction of features to\r\n select.\r\n\r\n direction: {'forward', 'backward'}, default='forward'\r\n Whether to perform forward selection or backward selection.\r\n\r\n scoring : str, callable, list/tuple or dict, default=None\r\n A single str (see :ref:`scoring_parameter`) or a callable\r\n (see :ref:`scoring`) to evaluate the predictions on the test set.\r\n\r\n NOTE that when using custom scorers, each scorer should return a single\r\n value. Metric functions returning a list/array of values can be wrapped\r\n into multiple scorers that return one value each.\r\n\r\n If None, the estimator's score method is used.\r\n\r\n cv : int, cross-validation generator or an iterable, default=None\r\n Determines the cross-validation splitting strategy.\r\n Possible inputs for cv are:\r\n\r\n - None, to use the default 5-fold cross validation,\r\n - integer, to specify the number of folds in a `(Stratified)KFold`,\r\n - :term:`CV splitter`,\r\n - An iterable yielding (train, test) splits as arrays of indices.\r\n\r\n For integer/None inputs, if the estimator is a classifier and ``y`` is\r\n either binary or multiclass, :class:`StratifiedKFold` is used. In all\r\n other cases, :class:`KFold` is used.\r\n\r\n Refer :ref:`User Guide <cross_validation>` for the various\r\n cross-validation strategies that can be used here.\r\n\r\n n_jobs : int, default=None\r\n Number of jobs to run in parallel. When evaluating a new feature to\r\n add or remove, the cross-validation procedure is parallel over the\r\n folds.\r\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\r\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\r\n for more details.\r\n\r\n Attributes\r\n ----------\r\n n_features_to_select_ : int\r\n The number of features that were selected.\r\n\r\n support_ : ndarray of shape (n_features,), dtype=bool\r\n The mask of selected features.\r\n\r\n See Also\r\n --------\r\n RFE : Recursive feature elimination based on importance weights.\r\n RFECV : Recursive feature elimination based on importance weights, with\r\n automatic selection of the number of features.\r\n SelectFromModel : Feature selection based on thresholds of importance\r\n weights.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.feature_selection import SequentialFeatureSelector\r\n >>> from sklearn.neighbors import KNeighborsClassifier\r\n >>> from sklearn.datasets import load_iris\r\n >>> X, y = load_iris(return_X_y=True)\r\n >>> knn = KNeighborsClassifier(n_neighbors=3)\r\n >>> sfs = SequentialFeatureSelector(knn, n_features_to_select=3)\r\n >>> sfs.fit(X, y)\r\n SequentialFeatureSelector(estimator=KNeighborsClassifier(n_neighbors=3),\r\n n_features_to_select=3)\r\n >>> sfs.get_support()\r\n array([ True, False, True, True])\r\n >>> sfs.transform(X).shape\r\n (150, 3)\r\n \"\"\"\r\n def __init__(self, estimator, *, n_features_to_select=None,\r\n direction='forward', scoring=None, cv=5, n_jobs=None):\r\n\r\n self.estimator = estimator\r\n self.n_features_to_select = n_features_to_select\r\n self.direction = direction\r\n self.scoring = scoring\r\n self.cv = cv\r\n self.n_jobs = n_jobs\r\n\r\n def fit(self, X, y):\r\n \"\"\"Learn the features to select.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n Training vectors.\r\n y : array-like of shape (n_samples,)\r\n Target values.\r\n\r\n Returns\r\n -------\r\n self : object\r\n \"\"\"\r\n tags = self._get_tags()\r\n X, y = self._validate_data(\r\n X, y, accept_sparse=\"csc\",\r\n ensure_min_features=2,\r\n force_all_finite=not tags.get(\"allow_nan\", True),\r\n multi_output=True\r\n )\r\n n_features = X.shape[1]\r\n\r\n error_msg = (\"n_features_to_select must be either None, an \"\r\n \"integer in [1, n_features - 1] \"\r\n \"representing the absolute \"\r\n \"number of features, or a float in (0, 1] \"\r\n \"representing a percentage of features to \"\r\n f\"select. Got {self.n_features_to_select}\")\r\n if self.n_features_to_select is None:\r\n self.n_features_to_select_ = n_features // 2\r\n elif isinstance(self.n_features_to_select, numbers.Integral):\r\n if not 0 < self.n_features_to_select < n_features:\r\n raise ValueError(error_msg)\r\n self.n_features_to_select_ = self.n_features_to_select\r\n elif isinstance(self.n_features_to_select, numbers.Real):\r\n if not 0 < self.n_features_to_select <= 1:\r\n raise ValueError(error_msg)\r\n self.n_features_to_select_ = int(n_features *\r\n self.n_features_to_select)\r\n else:\r\n raise ValueError(error_msg)\r\n\r\n if self.direction not in ('forward', 'backward'):\r\n raise ValueError(\r\n \"direction must be either 'forward' or 'backward'. \"\r\n f\"Got {self.direction}.\"\r\n )\r\n\r\n cloned_estimator = clone(self.estimator)\r\n\r\n # the current mask corresponds to the set of features:\r\n # - that we have already *selected* if we do forward selection\r\n # - that we have already *excluded* if we do backward selection\r\n current_mask = np.zeros(shape=n_features, dtype=bool)\r\n n_iterations = (\r\n self.n_features_to_select_ if self.direction == 'forward'\r\n else n_features - self.n_features_to_select_\r\n )\r\n for _ in range(n_iterations):\r\n new_feature_idx = self._get_best_new_feature(cloned_estimator, X,\r\n y, current_mask)\r\n current_mask[new_feature_idx] = True\r\n\r\n if self.direction == 'backward':\r\n current_mask = ~current_mask\r\n self.support_ = current_mask\r\n\r\n return self\r\n\r\n def _get_best_new_feature(self, estimator, X, y, current_mask):\r\n # Return the best new feature to add to the current_mask, i.e. return\r\n # the best new feature to add (resp. remove) when doing forward\r\n # selection (resp. backward selection)\r\n candidate_feature_indices = np.flatnonzero(~current_mask)\r\n scores = {}\r\n for feature_idx in candidate_feature_indices:\r\n candidate_mask = current_mask.copy()\r\n candidate_mask[feature_idx] = True\r\n if self.direction == 'backward':\r\n candidate_mask = ~candidate_mask\r\n X_new = X[:, candidate_mask]\r\n scores[feature_idx] = cross_val_score(\r\n estimator, X_new, y, cv=self.cv, scoring=self.scoring,\r\n n_jobs=self.n_jobs).mean()\r\n return max(scores, key=lambda feature_idx: scores[feature_idx])\r\n\r\n def _get_support_mask(self):\r\n check_is_fitted(self)\r\n return self.support_\r\n\r\n def _more_tags(self):\r\n return {\r\n 'allow_nan': _safe_tags(self.estimator, key=\"allow_nan\"),\r\n 'requires_y': True,\r\n }\r\n", "import operator\r\nfrom typing import TYPE_CHECKING, Type, Union\r\n\r\nimport numpy as np\r\n\r\nfrom pandas._libs import lib, missing as libmissing\r\n\r\nfrom pandas.core.dtypes.base import ExtensionDtype, register_extension_dtype\r\nfrom pandas.core.dtypes.common import pandas_dtype\r\nfrom pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries\r\nfrom pandas.core.dtypes.inference import is_array_like\r\n\r\nfrom pandas import compat\r\nfrom pandas.core import ops\r\nfrom pandas.core.arrays import IntegerArray, PandasArray\r\nfrom pandas.core.arrays.integer import _IntegerDtype\r\nfrom pandas.core.construction import extract_array\r\nfrom pandas.core.indexers import check_array_indexer\r\nfrom pandas.core.missing import isna\r\n\r\nif TYPE_CHECKING:\r\n import pyarrow # noqa: F401\r\n\r\n\r\n@register_extension_dtype\r\nclass StringDtype(ExtensionDtype):\r\n \"\"\"\r\n Extension dtype for string data.\r\n\r\n .. versionadded:: 1.0.0\r\n\r\n .. warning::\r\n\r\n StringDtype is considered experimental. The implementation and\r\n parts of the API may change without warning.\r\n\r\n In particular, StringDtype.na_value may change to no longer be\r\n ``numpy.nan``.\r\n\r\n Attributes\r\n ----------\r\n None\r\n\r\n Methods\r\n -------\r\n None\r\n\r\n Examples\r\n --------\r\n >>> pd.StringDtype()\r\n StringDtype\r\n \"\"\"\r\n\r\n name = \"string\"\r\n\r\n #: StringDtype.na_value uses pandas.NA\r\n na_value = libmissing.NA\r\n\r\n @property\r\n def type(self) -> Type[str]:\r\n return str\r\n\r\n @classmethod\r\n def construct_array_type(cls) -> Type[\"StringArray\"]:\r\n \"\"\"\r\n Return the array type associated with this dtype.\r\n\r\n Returns\r\n -------\r\n type\r\n \"\"\"\r\n return StringArray\r\n\r\n def __repr__(self) -> str:\r\n return \"StringDtype\"\r\n\r\n def __from_arrow__(\r\n self, array: Union[\"pyarrow.Array\", \"pyarrow.ChunkedArray\"]\r\n ) -> \"StringArray\":\r\n \"\"\"\r\n Construct StringArray from pyarrow Array/ChunkedArray.\r\n \"\"\"\r\n import pyarrow # noqa: F811\r\n\r\n if isinstance(array, pyarrow.Array):\r\n chunks = [array]\r\n else:\r\n # pyarrow.ChunkedArray\r\n chunks = array.chunks\r\n\r\n results = []\r\n for arr in chunks:\r\n # using _from_sequence to ensure None is converted to NA\r\n str_arr = StringArray._from_sequence(np.array(arr))\r\n results.append(str_arr)\r\n\r\n return StringArray._concat_same_type(results)\r\n\r\n\r\nclass StringArray(PandasArray):\r\n \"\"\"\r\n Extension array for string data.\r\n\r\n .. versionadded:: 1.0.0\r\n\r\n .. warning::\r\n\r\n StringArray is considered experimental. The implementation and\r\n parts of the API may change without warning.\r\n\r\n Parameters\r\n ----------\r\n values : array-like\r\n The array of data.\r\n\r\n .. warning::\r\n\r\n Currently, this expects an object-dtype ndarray\r\n where the elements are Python strings or :attr:`pandas.NA`.\r\n This may change without warning in the future. Use\r\n :meth:`pandas.array` with ``dtype=\"string\"`` for a stable way of\r\n creating a `StringArray` from any sequence.\r\n\r\n copy : bool, default False\r\n Whether to copy the array of data.\r\n\r\n Attributes\r\n ----------\r\n None\r\n\r\n Methods\r\n -------\r\n None\r\n\r\n See Also\r\n --------\r\n array\r\n The recommended function for creating a StringArray.\r\n Series.str\r\n The string methods are available on Series backed by\r\n a StringArray.\r\n\r\n Notes\r\n -----\r\n StringArray returns a BooleanArray for comparison methods.\r\n\r\n Examples\r\n --------\r\n >>> pd.array(['This is', 'some text', None, 'data.'], dtype=\"string\")\r\n <StringArray>\r\n ['This is', 'some text', <NA>, 'data.']\r\n Length: 4, dtype: string\r\n\r\n Unlike arrays instantiated with ``dtype=\"object\"``, ``StringArray``\r\n will convert the values to strings.\r\n\r\n >>> pd.array(['1', 1], dtype=\"object\")\r\n <PandasArray>\r\n ['1', 1]\r\n Length: 2, dtype: object\r\n >>> pd.array(['1', 1], dtype=\"string\")\r\n <StringArray>\r\n ['1', '1']\r\n Length: 2, dtype: string\r\n\r\n However, instantiating StringArrays directly with non-strings will raise an error.\r\n\r\n For comparison methods, `StringArray` returns a :class:`pandas.BooleanArray`:\r\n\r\n >>> pd.array([\"a\", None, \"c\"], dtype=\"string\") == \"a\"\r\n <BooleanArray>\r\n [True, <NA>, False]\r\n Length: 3, dtype: boolean\r\n \"\"\"\r\n\r\n # undo the PandasArray hack\r\n _typ = \"extension\"\r\n\r\n def __init__(self, values, copy=False):\r\n values = extract_array(values)\r\n\r\n super().__init__(values, copy=copy)\r\n self._dtype = StringDtype()\r\n if not isinstance(values, type(self)):\r\n self._validate()\r\n\r\n def _validate(self):\r\n \"\"\"Validate that we only store NA or strings.\"\"\"\r\n if len(self._ndarray) and not lib.is_string_array(self._ndarray, skipna=True):\r\n raise ValueError(\"StringArray requires a sequence of strings or pandas.NA\")\r\n if self._ndarray.dtype != \"object\":\r\n raise ValueError(\r\n \"StringArray requires a sequence of strings or pandas.NA. Got \"\r\n f\"'{self._ndarray.dtype}' dtype instead.\"\r\n )\r\n\r\n @classmethod\r\n def _from_sequence(cls, scalars, dtype=None, copy=False):\r\n if dtype:\r\n assert dtype == \"string\"\r\n\r\n # convert non-na-likes to str, and nan-likes to StringDtype.na_value\r\n result = lib.ensure_string_array(\r\n scalars, na_value=StringDtype.na_value, copy=copy\r\n )\r\n\r\n return cls(result)\r\n\r\n @classmethod\r\n def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):\r\n return cls._from_sequence(strings, dtype=dtype, copy=copy)\r\n\r\n def __arrow_array__(self, type=None):\r\n \"\"\"\r\n Convert myself into a pyarrow Array.\r\n \"\"\"\r\n import pyarrow as pa\r\n\r\n if type is None:\r\n type = pa.string()\r\n\r\n values = self._ndarray.copy()\r\n values[self.isna()] = None\r\n return pa.array(values, type=type, from_pandas=True)\r\n\r\n def _values_for_factorize(self):\r\n arr = self._ndarray.copy()\r\n mask = self.isna()\r\n arr[mask] = -1\r\n return arr, -1\r\n\r\n def __setitem__(self, key, value):\r\n value = extract_array(value, extract_numpy=True)\r\n if isinstance(value, type(self)):\r\n # extract_array doesn't extract PandasArray subclasses\r\n value = value._ndarray\r\n\r\n key = check_array_indexer(self, key)\r\n scalar_key = lib.is_scalar(key)\r\n scalar_value = lib.is_scalar(value)\r\n if scalar_key and not scalar_value:\r\n raise ValueError(\"setting an array element with a sequence.\")\r\n\r\n # validate new items\r\n if scalar_value:\r\n if isna(value):\r\n value = StringDtype.na_value\r\n elif not isinstance(value, str):\r\n raise ValueError(\r\n f\"Cannot set non-string value '{value}' into a StringArray.\"\r\n )\r\n else:\r\n if not is_array_like(value):\r\n value = np.asarray(value, dtype=object)\r\n if len(value) and not lib.is_string_array(value, skipna=True):\r\n raise ValueError(\"Must provide strings.\")\r\n\r\n super().__setitem__(key, value)\r\n\r\n def fillna(self, value=None, method=None, limit=None):\r\n # TODO: validate dtype\r\n return super().fillna(value, method, limit)\r\n\r\n def astype(self, dtype, copy=True):\r\n dtype = pandas_dtype(dtype)\r\n if isinstance(dtype, StringDtype):\r\n if copy:\r\n return self.copy()\r\n return self\r\n elif isinstance(dtype, _IntegerDtype):\r\n arr = self._ndarray.copy()\r\n mask = self.isna()\r\n arr[mask] = 0\r\n values = arr.astype(dtype.numpy_dtype)\r\n return IntegerArray(values, mask, copy=False)\r\n\r\n return super().astype(dtype, copy)\r\n\r\n def _reduce(self, name: str, skipna: bool = True, **kwargs):\r\n if name in [\"min\", \"max\"]:\r\n return getattr(self, name)(skipna=skipna)\r\n\r\n raise TypeError(f\"Cannot perform reduction '{name}' with string dtype\")\r\n\r\n def value_counts(self, dropna=False):\r\n from pandas import value_counts\r\n\r\n return value_counts(self._ndarray, dropna=dropna).astype(\"Int64\")\r\n\r\n def memory_usage(self, deep=False):\r\n result = self._ndarray.nbytes\r\n if deep:\r\n return result + lib.memory_usage_of_objects(self._ndarray)\r\n return result\r\n\r\n # Override parent because we have different return types.\r\n @classmethod\r\n def _create_arithmetic_method(cls, op):\r\n # Note: this handles both arithmetic and comparison methods.\r\n def method(self, other):\r\n from pandas.arrays import BooleanArray\r\n\r\n assert op.__name__ in ops.ARITHMETIC_BINOPS | ops.COMPARISON_BINOPS\r\n\r\n if isinstance(other, (ABCIndexClass, ABCSeries, ABCDataFrame)):\r\n return NotImplemented\r\n\r\n elif isinstance(other, cls):\r\n other = other._ndarray\r\n\r\n mask = isna(self) | isna(other)\r\n valid = ~mask\r\n\r\n if not lib.is_scalar(other):\r\n if len(other) != len(self):\r\n # prevent improper broadcasting when other is 2D\r\n raise ValueError(\r\n f\"Lengths of operands do not match: {len(self)} != {len(other)}\"\r\n )\r\n\r\n other = np.asarray(other)\r\n other = other[valid]\r\n\r\n if op.__name__ in ops.ARITHMETIC_BINOPS:\r\n result = np.empty_like(self._ndarray, dtype=\"object\")\r\n result[mask] = StringDtype.na_value\r\n result[valid] = op(self._ndarray[valid], other)\r\n return StringArray(result)\r\n else:\r\n # logical\r\n result = np.zeros(len(self._ndarray), dtype=\"bool\")\r\n result[valid] = op(self._ndarray[valid], other)\r\n return BooleanArray(result, mask)\r\n\r\n return compat.set_function_name(method, f\"__{op.__name__}__\", cls)\r\n\r\n @classmethod\r\n def _add_arithmetic_ops(cls):\r\n cls.__add__ = cls._create_arithmetic_method(operator.add)\r\n cls.__radd__ = cls._create_arithmetic_method(ops.radd)\r\n\r\n cls.__mul__ = cls._create_arithmetic_method(operator.mul)\r\n cls.__rmul__ = cls._create_arithmetic_method(ops.rmul)\r\n\r\n _create_comparison_method = _create_arithmetic_method\r\n\r\n\r\nStringArray._add_arithmetic_ops()\r\nStringArray._add_comparison_ops()\r\n", "\"\"\" test get/set & misc \"\"\"\r\n\r\n\r\nimport pandas as pd\r\nfrom pandas import MultiIndex, Series\r\n\r\n\r\ndef test_access_none_value_in_multiindex():\r\n # GH34318: test that you can access a None value using .loc through a Multiindex\r\n\r\n s = Series([None], pd.MultiIndex.from_arrays([[\"Level1\"], [\"Level2\"]]))\r\n result = s.loc[(\"Level1\", \"Level2\")]\r\n assert result is None\r\n\r\n midx = MultiIndex.from_product([[\"Level1\"], [\"Level2_a\", \"Level2_b\"]])\r\n s = Series([None] * len(midx), dtype=object, index=midx)\r\n result = s.loc[(\"Level1\", \"Level2_a\")]\r\n assert result is None\r\n\r\n s = Series([1] * len(midx), dtype=object, index=midx)\r\n result = s.loc[(\"Level1\", \"Level2_a\")]\r\n assert result == 1\r\n", "# -*- coding: utf-8 -*-\r\n\r\n# Authors: Alexandre Gramfort <[email protected]>\r\n# Mathieu Blondel <[email protected]>\r\n# Robert Layton <[email protected]>\r\n# Andreas Mueller <[email protected]>\r\n# Philippe Gervais <[email protected]>\r\n# Lars Buitinck\r\n# Joel Nothman <[email protected]>\r\n# License: BSD 3 clause\r\n\r\nimport itertools\r\nfrom functools import partial\r\nimport warnings\r\n\r\nimport numpy as np\r\nfrom scipy.spatial import distance\r\nfrom scipy.sparse import csr_matrix\r\nfrom scipy.sparse import issparse\r\nfrom joblib import Parallel, effective_n_jobs\r\n\r\nfrom ..utils.validation import _num_samples\r\nfrom ..utils.validation import check_non_negative\r\nfrom ..utils import check_array\r\nfrom ..utils import gen_even_slices\r\nfrom ..utils import gen_batches, get_chunk_n_rows\r\nfrom ..utils import is_scalar_nan\r\nfrom ..utils.extmath import row_norms, safe_sparse_dot\r\nfrom ..preprocessing import normalize\r\nfrom ..utils._mask import _get_mask\r\nfrom ..utils.validation import _deprecate_positional_args\r\nfrom ..utils.fixes import delayed\r\nfrom ..utils.fixes import sp_version, parse_version\r\n\r\nfrom ._pairwise_fast import _chi2_kernel_fast, _sparse_manhattan\r\nfrom ..exceptions import DataConversionWarning\r\n\r\n\r\n# Utility Functions\r\ndef _return_float_dtype(X, Y):\r\n \"\"\"\r\n 1. If dtype of X and Y is float32, then dtype float32 is returned.\r\n 2. Else dtype float is returned.\r\n \"\"\"\r\n if not issparse(X) and not isinstance(X, np.ndarray):\r\n X = np.asarray(X)\r\n\r\n if Y is None:\r\n Y_dtype = X.dtype\r\n elif not issparse(Y) and not isinstance(Y, np.ndarray):\r\n Y = np.asarray(Y)\r\n Y_dtype = Y.dtype\r\n else:\r\n Y_dtype = Y.dtype\r\n\r\n if X.dtype == Y_dtype == np.float32:\r\n dtype = np.float32\r\n else:\r\n dtype = float\r\n\r\n return X, Y, dtype\r\n\r\n\r\n@_deprecate_positional_args\r\ndef check_pairwise_arrays(X, Y, *, precomputed=False, dtype=None,\r\n accept_sparse='csr', force_all_finite=True,\r\n copy=False):\r\n \"\"\"Set X and Y appropriately and checks inputs.\r\n\r\n If Y is None, it is set as a pointer to X (i.e. not a copy).\r\n If Y is given, this does not happen.\r\n All distance metrics should use this function first to assert that the\r\n given parameters are correct and safe to use.\r\n\r\n Specifically, this function first ensures that both X and Y are arrays,\r\n then checks that they are at least two dimensional while ensuring that\r\n their elements are floats (or dtype if provided). Finally, the function\r\n checks that the size of the second dimension of the two arrays is equal, or\r\n the equivalent check for a precomputed distance matrix.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples_X, n_features)\r\n\r\n Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)\r\n\r\n precomputed : bool, default=False\r\n True if X is to be treated as precomputed distances to the samples in\r\n Y.\r\n\r\n dtype : str, type, list of type, default=None\r\n Data type required for X and Y. If None, the dtype will be an\r\n appropriate float type selected by _return_float_dtype.\r\n\r\n .. versionadded:: 0.18\r\n\r\n accept_sparse : str, bool or list/tuple of str, default='csr'\r\n String[s] representing allowed sparse matrix formats, such as 'csc',\r\n 'csr', etc. If the input is sparse but not in the allowed format,\r\n it will be converted to the first listed format. True allows the input\r\n to be any format. False means that a sparse matrix input will\r\n raise an error.\r\n\r\n force_all_finite : bool or 'allow-nan', default=True\r\n Whether to raise an error on np.inf, np.nan, pd.NA in array. The\r\n possibilities are:\r\n\r\n - True: Force all values of array to be finite.\r\n - False: accepts np.inf, np.nan, pd.NA in array.\r\n - 'allow-nan': accepts only np.nan and pd.NA values in array. Values\r\n cannot be infinite.\r\n\r\n .. versionadded:: 0.22\r\n ``force_all_finite`` accepts the string ``'allow-nan'``.\r\n\r\n .. versionchanged:: 0.23\r\n Accepts `pd.NA` and converts it into `np.nan`.\r\n\r\n copy : bool, default=False\r\n Whether a forced copy will be triggered. If copy=False, a copy might\r\n be triggered by a conversion.\r\n\r\n .. versionadded:: 0.22\r\n\r\n Returns\r\n -------\r\n safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)\r\n An array equal to X, guaranteed to be a numpy array.\r\n\r\n safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)\r\n An array equal to Y if Y was not None, guaranteed to be a numpy array.\r\n If Y was None, safe_Y will be a pointer to X.\r\n\r\n \"\"\"\r\n X, Y, dtype_float = _return_float_dtype(X, Y)\r\n\r\n estimator = 'check_pairwise_arrays'\r\n if dtype is None:\r\n dtype = dtype_float\r\n\r\n if Y is X or Y is None:\r\n X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype,\r\n copy=copy, force_all_finite=force_all_finite,\r\n estimator=estimator)\r\n else:\r\n X = check_array(X, accept_sparse=accept_sparse, dtype=dtype,\r\n copy=copy, force_all_finite=force_all_finite,\r\n estimator=estimator)\r\n Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype,\r\n copy=copy, force_all_finite=force_all_finite,\r\n estimator=estimator)\r\n\r\n if precomputed:\r\n if X.shape[1] != Y.shape[0]:\r\n raise ValueError(\"Precomputed metric requires shape \"\r\n \"(n_queries, n_indexed). Got (%d, %d) \"\r\n \"for %d indexed.\" %\r\n (X.shape[0], X.shape[1], Y.shape[0]))\r\n elif X.shape[1] != Y.shape[1]:\r\n raise ValueError(\"Incompatible dimension for X and Y matrices: \"\r\n \"X.shape[1] == %d while Y.shape[1] == %d\" % (\r\n X.shape[1], Y.shape[1]))\r\n\r\n return X, Y\r\n\r\n\r\ndef check_paired_arrays(X, Y):\r\n \"\"\"Set X and Y appropriately and checks inputs for paired distances.\r\n\r\n All paired distance metrics should use this function first to assert that\r\n the given parameters are correct and safe to use.\r\n\r\n Specifically, this function first ensures that both X and Y are arrays,\r\n then checks that they are at least two dimensional while ensuring that\r\n their elements are floats. Finally, the function checks that the size\r\n of the dimensions of the two arrays are equal.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples_X, n_features)\r\n\r\n Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)\r\n\r\n Returns\r\n -------\r\n safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)\r\n An array equal to X, guaranteed to be a numpy array.\r\n\r\n safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)\r\n An array equal to Y if Y was not None, guaranteed to be a numpy array.\r\n If Y was None, safe_Y will be a pointer to X.\r\n\r\n \"\"\"\r\n X, Y = check_pairwise_arrays(X, Y)\r\n if X.shape != Y.shape:\r\n raise ValueError(\"X and Y should be of same shape. They were \"\r\n \"respectively %r and %r long.\" % (X.shape, Y.shape))\r\n return X, Y\r\n\r\n\r\n# Pairwise distances\r\n@_deprecate_positional_args\r\ndef euclidean_distances(X, Y=None, *, Y_norm_squared=None, squared=False,\r\n X_norm_squared=None):\r\n \"\"\"\r\n Considering the rows of X (and Y=X) as vectors, compute the\r\n distance matrix between each pair of vectors.\r\n\r\n For efficiency reasons, the euclidean distance between a pair of row\r\n vector x and y is computed as::\r\n\r\n dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))\r\n\r\n This formulation has two advantages over other ways of computing distances.\r\n First, it is computationally efficient when dealing with sparse data.\r\n Second, if one argument varies but the other remains unchanged, then\r\n `dot(x, x)` and/or `dot(y, y)` can be pre-computed.\r\n\r\n However, this is not the most precise way of doing this computation,\r\n because this equation potentially suffers from \"catastrophic cancellation\".\r\n Also, the distance matrix returned by this function may not be exactly\r\n symmetric as required by, e.g., ``scipy.spatial.distance`` functions.\r\n\r\n Read more in the :ref:`User Guide <metrics>`.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples_X, n_features)\r\n\r\n Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \\\r\n default=None\r\n\r\n Y_norm_squared : array-like of shape (n_samples_Y,), default=None\r\n Pre-computed dot-products of vectors in Y (e.g.,\r\n ``(Y**2).sum(axis=1)``)\r\n May be ignored in some cases, see the note below.\r\n\r\n squared : bool, default=False\r\n Return squared Euclidean distances.\r\n\r\n X_norm_squared : array-like of shape (n_samples,), default=None\r\n Pre-computed dot-products of vectors in X (e.g.,\r\n ``(X**2).sum(axis=1)``)\r\n May be ignored in some cases, see the note below.\r\n\r\n Notes\r\n -----\r\n To achieve better accuracy, `X_norm_squared` and `Y_norm_squared` may be\r\n unused if they are passed as ``float32``.\r\n\r\n Returns\r\n -------\r\n distances : ndarray of shape (n_samples_X, n_samples_Y)\r\n\r\n See Also\r\n --------\r\n paired_distances : Distances betweens pairs of elements of X and Y.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.metrics.pairwise import euclidean_distances\r\n >>> X = [[0, 1], [1, 1]]\r\n >>> # distance between rows of X\r\n >>> euclidean_distances(X, X)\r\n array([[0., 1.],\r\n [1., 0.]])\r\n >>> # get distance to origin\r\n >>> euclidean_distances(X, [[0, 0]])\r\n array([[1. ],\r\n [1.41421356]])\r\n \"\"\"\r\n X, Y = check_pairwise_arrays(X, Y)\r\n\r\n # If norms are passed as float32, they are unused. If arrays are passed as\r\n # float32, norms needs to be recomputed on upcast chunks.\r\n # TODO: use a float64 accumulator in row_norms to avoid the latter.\r\n if X_norm_squared is not None:\r\n XX = check_array(X_norm_squared)\r\n if XX.shape == (1, X.shape[0]):\r\n XX = XX.T\r\n elif XX.shape != (X.shape[0], 1):\r\n raise ValueError(\r\n \"Incompatible dimensions for X and X_norm_squared\")\r\n if XX.dtype == np.float32:\r\n XX = None\r\n elif X.dtype == np.float32:\r\n XX = None\r\n else:\r\n XX = row_norms(X, squared=True)[:, np.newaxis]\r\n\r\n if X is Y and XX is not None:\r\n # shortcut in the common case euclidean_distances(X, X)\r\n YY = XX.T\r\n elif Y_norm_squared is not None:\r\n YY = np.atleast_2d(Y_norm_squared)\r\n\r\n if YY.shape != (1, Y.shape[0]):\r\n raise ValueError(\r\n \"Incompatible dimensions for Y and Y_norm_squared\")\r\n if YY.dtype == np.float32:\r\n YY = None\r\n elif Y.dtype == np.float32:\r\n YY = None\r\n else:\r\n YY = row_norms(Y, squared=True)[np.newaxis, :]\r\n\r\n if X.dtype == np.float32:\r\n # To minimize precision issues with float32, we compute the distance\r\n # matrix on chunks of X and Y upcast to float64\r\n distances = _euclidean_distances_upcast(X, XX, Y, YY)\r\n else:\r\n # if dtype is already float64, no need to chunk and upcast\r\n distances = - 2 * safe_sparse_dot(X, Y.T, dense_output=True)\r\n distances += XX\r\n distances += YY\r\n np.maximum(distances, 0, out=distances)\r\n\r\n # Ensure that distances between vectors and themselves are set to 0.0.\r\n # This may not be the case due to floating point rounding errors.\r\n if X is Y:\r\n np.fill_diagonal(distances, 0)\r\n\r\n return distances if squared else np.sqrt(distances, out=distances)\r\n\r\n\r\n@_deprecate_positional_args\r\ndef nan_euclidean_distances(X, Y=None, *, squared=False,\r\n missing_values=np.nan, copy=True):\r\n \"\"\"Calculate the euclidean distances in the presence of missing values.\r\n\r\n Compute the euclidean distance between each pair of samples in X and Y,\r\n where Y=X is assumed if Y=None. When calculating the distance between a\r\n pair of samples, this formulation ignores feature coordinates with a\r\n missing value in either sample and scales up the weight of the remaining\r\n coordinates:\r\n\r\n dist(x,y) = sqrt(weight * sq. distance from present coordinates)\r\n where,\r\n weight = Total # of coordinates / # of present coordinates\r\n\r\n For example, the distance between ``[3, na, na, 6]`` and ``[1, na, 4, 5]``\r\n is:\r\n\r\n .. math::\r\n \\\\sqrt{\\\\frac{4}{2}((3-1)^2 + (6-5)^2)}\r\n\r\n If all the coordinates are missing or if there are no common present\r\n coordinates then NaN is returned for that pair.\r\n\r\n Read more in the :ref:`User Guide <metrics>`.\r\n\r\n .. versionadded:: 0.22\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape=(n_samples_X, n_features)\r\n\r\n Y : array-like of shape=(n_samples_Y, n_features), default=None\r\n\r\n squared : bool, default=False\r\n Return squared Euclidean distances.\r\n\r\n missing_values : np.nan or int, default=np.nan\r\n Representation of missing value.\r\n\r\n copy : bool, default=True\r\n Make and use a deep copy of X and Y (if Y exists).\r\n\r\n Returns\r\n -------\r\n distances : ndarray of shape (n_samples_X, n_samples_Y)\r\n\r\n See Also\r\n --------\r\n paired_distances : Distances between pairs of elements of X and Y.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.metrics.pairwise import nan_euclidean_distances\r\n >>> nan = float(\"NaN\")\r\n >>> X = [[0, 1], [1, nan]]\r\n >>> nan_euclidean_distances(X, X) # distance between rows of X\r\n array([[0. , 1.41421356],\r\n [1.41421356, 0. ]])\r\n\r\n >>> # get distance to origin\r\n >>> nan_euclidean_distances(X, [[0, 0]])\r\n array([[1. ],\r\n [1.41421356]])\r\n\r\n References\r\n ----------\r\n * John K. Dixon, \"Pattern Recognition with Partly Missing Data\",\r\n IEEE Transactions on Systems, Man, and Cybernetics, Volume: 9, Issue:\r\n 10, pp. 617 - 621, Oct. 1979.\r\n http://ieeexplore.ieee.org/abstract/document/4310090/\r\n \"\"\"\r\n\r\n force_all_finite = 'allow-nan' if is_scalar_nan(missing_values) else True\r\n X, Y = check_pairwise_arrays(X, Y, accept_sparse=False,\r\n force_all_finite=force_all_finite, copy=copy)\r\n # Get missing mask for X\r\n missing_X = _get_mask(X, missing_values)\r\n\r\n # Get missing mask for Y\r\n missing_Y = missing_X if Y is X else _get_mask(Y, missing_values)\r\n\r\n # set missing values to zero\r\n X[missing_X] = 0\r\n Y[missing_Y] = 0\r\n\r\n distances = euclidean_distances(X, Y, squared=True)\r\n\r\n # Adjust distances for missing values\r\n XX = X * X\r\n YY = Y * Y\r\n distances -= np.dot(XX, missing_Y.T)\r\n distances -= np.dot(missing_X, YY.T)\r\n\r\n np.clip(distances, 0, None, out=distances)\r\n\r\n if X is Y:\r\n # Ensure that distances between vectors and themselves are set to 0.0.\r\n # This may not be the case due to floating point rounding errors.\r\n np.fill_diagonal(distances, 0.0)\r\n\r\n present_X = 1 - missing_X\r\n present_Y = present_X if Y is X else ~missing_Y\r\n present_count = np.dot(present_X, present_Y.T)\r\n distances[present_count == 0] = np.nan\r\n # avoid divide by zero\r\n np.maximum(1, present_count, out=present_count)\r\n distances /= present_count\r\n distances *= X.shape[1]\r\n\r\n if not squared:\r\n np.sqrt(distances, out=distances)\r\n\r\n return distances\r\n\r\n\r\ndef _euclidean_distances_upcast(X, XX=None, Y=None, YY=None, batch_size=None):\r\n \"\"\"Euclidean distances between X and Y.\r\n\r\n Assumes X and Y have float32 dtype.\r\n Assumes XX and YY have float64 dtype or are None.\r\n\r\n X and Y are upcast to float64 by chunks, which size is chosen to limit\r\n memory increase by approximately 10% (at least 10MiB).\r\n \"\"\"\r\n n_samples_X = X.shape[0]\r\n n_samples_Y = Y.shape[0]\r\n n_features = X.shape[1]\r\n\r\n distances = np.empty((n_samples_X, n_samples_Y), dtype=np.float32)\r\n\r\n if batch_size is None:\r\n x_density = X.nnz / np.prod(X.shape) if issparse(X) else 1\r\n y_density = Y.nnz / np.prod(Y.shape) if issparse(Y) else 1\r\n\r\n # Allow 10% more memory than X, Y and the distance matrix take (at\r\n # least 10MiB)\r\n maxmem = max(\r\n ((x_density * n_samples_X + y_density * n_samples_Y) * n_features\r\n + (x_density * n_samples_X * y_density * n_samples_Y)) / 10,\r\n 10 * 2 ** 17)\r\n\r\n # The increase amount of memory in 8-byte blocks is:\r\n # - x_density * batch_size * n_features (copy of chunk of X)\r\n # - y_density * batch_size * n_features (copy of chunk of Y)\r\n # - batch_size * batch_size (chunk of distance matrix)\r\n # Hence x² + (xd+yd)kx = M, where x=batch_size, k=n_features, M=maxmem\r\n # xd=x_density and yd=y_density\r\n tmp = (x_density + y_density) * n_features\r\n batch_size = (-tmp + np.sqrt(tmp ** 2 + 4 * maxmem)) / 2\r\n batch_size = max(int(batch_size), 1)\r\n\r\n x_batches = gen_batches(n_samples_X, batch_size)\r\n\r\n for i, x_slice in enumerate(x_batches):\r\n X_chunk = X[x_slice].astype(np.float64)\r\n if XX is None:\r\n XX_chunk = row_norms(X_chunk, squared=True)[:, np.newaxis]\r\n else:\r\n XX_chunk = XX[x_slice]\r\n\r\n y_batches = gen_batches(n_samples_Y, batch_size)\r\n\r\n for j, y_slice in enumerate(y_batches):\r\n if X is Y and j < i:\r\n # when X is Y the distance matrix is symmetric so we only need\r\n # to compute half of it.\r\n d = distances[y_slice, x_slice].T\r\n\r\n else:\r\n Y_chunk = Y[y_slice].astype(np.float64)\r\n if YY is None:\r\n YY_chunk = row_norms(Y_chunk, squared=True)[np.newaxis, :]\r\n else:\r\n YY_chunk = YY[:, y_slice]\r\n\r\n d = -2 * safe_sparse_dot(X_chunk, Y_chunk.T, dense_output=True)\r\n d += XX_chunk\r\n d += YY_chunk\r\n\r\n distances[x_slice, y_slice] = d.astype(np.float32, copy=False)\r\n\r\n return distances\r\n\r\n\r\ndef _argmin_min_reduce(dist, start):\r\n indices = dist.argmin(axis=1)\r\n values = dist[np.arange(dist.shape[0]), indices]\r\n return indices, values\r\n\r\n\r\n@_deprecate_positional_args\r\ndef pairwise_distances_argmin_min(X, Y, *, axis=1, metric=\"euclidean\",\r\n metric_kwargs=None):\r\n \"\"\"Compute minimum distances between one point and a set of points.\r\n\r\n This function computes for each row in X, the index of the row of Y which\r\n is closest (according to the specified distance). The minimal distances are\r\n also returned.\r\n\r\n This is mostly equivalent to calling:\r\n\r\n (pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),\r\n pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))\r\n\r\n but uses much less memory, and is faster for large arrays.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples_X, n_features)\r\n Array containing points.\r\n\r\n Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)\r\n Array containing points.\r\n\r\n axis : int, default=1\r\n Axis along which the argmin and distances are to be computed.\r\n\r\n metric : str or callable, default='euclidean'\r\n Metric to use for distance computation. Any metric from scikit-learn\r\n or scipy.spatial.distance can be used.\r\n\r\n If metric is a callable function, it is called on each\r\n pair of instances (rows) and the resulting value recorded. The callable\r\n should take two arrays as input and return one value indicating the\r\n distance between them. This works for Scipy's metrics, but is less\r\n efficient than passing the metric name as a string.\r\n\r\n Distance matrices are not supported.\r\n\r\n Valid values for metric are:\r\n\r\n - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\r\n 'manhattan']\r\n\r\n - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\r\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',\r\n 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',\r\n 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',\r\n 'yule']\r\n\r\n See the documentation for scipy.spatial.distance for details on these\r\n metrics.\r\n\r\n metric_kwargs : dict, default=None\r\n Keyword arguments to pass to specified metric function.\r\n\r\n Returns\r\n -------\r\n argmin : ndarray\r\n Y[argmin[i], :] is the row in Y that is closest to X[i, :].\r\n\r\n distances : ndarray\r\n distances[i] is the distance between the i-th row in X and the\r\n argmin[i]-th row in Y.\r\n\r\n See Also\r\n --------\r\n sklearn.metrics.pairwise_distances\r\n sklearn.metrics.pairwise_distances_argmin\r\n \"\"\"\r\n X, Y = check_pairwise_arrays(X, Y)\r\n\r\n if metric_kwargs is None:\r\n metric_kwargs = {}\r\n\r\n if axis == 0:\r\n X, Y = Y, X\r\n\r\n indices, values = zip(*pairwise_distances_chunked(\r\n X, Y, reduce_func=_argmin_min_reduce, metric=metric,\r\n **metric_kwargs))\r\n indices = np.concatenate(indices)\r\n values = np.concatenate(values)\r\n\r\n return indices, values\r\n\r\n\r\n@_deprecate_positional_args\r\ndef pairwise_distances_argmin(X, Y, *, axis=1, metric=\"euclidean\",\r\n metric_kwargs=None):\r\n \"\"\"Compute minimum distances between one point and a set of points.\r\n\r\n This function computes for each row in X, the index of the row of Y which\r\n is closest (according to the specified distance).\r\n\r\n This is mostly equivalent to calling:\r\n\r\n pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)\r\n\r\n but uses much less memory, and is faster for large arrays.\r\n\r\n This function works with dense 2D arrays only.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples_X, n_features)\r\n Array containing points.\r\n\r\n Y : array-like of shape (n_samples_Y, n_features)\r\n Arrays containing points.\r\n\r\n axis : int, default=1\r\n Axis along which the argmin and distances are to be computed.\r\n\r\n metric : str or callable, default=\"euclidean\"\r\n Metric to use for distance computation. Any metric from scikit-learn\r\n or scipy.spatial.distance can be used.\r\n\r\n If metric is a callable function, it is called on each\r\n pair of instances (rows) and the resulting value recorded. The callable\r\n should take two arrays as input and return one value indicating the\r\n distance between them. This works for Scipy's metrics, but is less\r\n efficient than passing the metric name as a string.\r\n\r\n Distance matrices are not supported.\r\n\r\n Valid values for metric are:\r\n\r\n - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\r\n 'manhattan']\r\n\r\n - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\r\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',\r\n 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',\r\n 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',\r\n 'yule']\r\n\r\n See the documentation for scipy.spatial.distance for details on these\r\n metrics.\r\n\r\n metric_kwargs : dict, default=None\r\n Keyword arguments to pass to specified metric function.\r\n\r\n Returns\r\n -------\r\n argmin : numpy.ndarray\r\n Y[argmin[i], :] is the row in Y that is closest to X[i, :].\r\n\r\n See Also\r\n --------\r\n sklearn.metrics.pairwise_distances\r\n sklearn.metrics.pairwise_distances_argmin_min\r\n \"\"\"\r\n if metric_kwargs is None:\r\n metric_kwargs = {}\r\n\r\n return pairwise_distances_argmin_min(X, Y, axis=axis, metric=metric,\r\n metric_kwargs=metric_kwargs)[0]\r\n\r\n\r\ndef haversine_distances(X, Y=None):\r\n \"\"\"Compute the Haversine distance between samples in X and Y.\r\n\r\n The Haversine (or great circle) distance is the angular distance between\r\n two points on the surface of a sphere. The first coordinate of each point\r\n is assumed to be the latitude, the second is the longitude, given\r\n in radians. The dimension of the data must be 2.\r\n\r\n .. math::\r\n D(x, y) = 2\\\\arcsin[\\\\sqrt{\\\\sin^2((x1 - y1) / 2)\r\n + \\\\cos(x1)\\\\cos(y1)\\\\sin^2((x2 - y2) / 2)}]\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples_X, 2)\r\n\r\n Y : array-like of shape (n_samples_Y, 2), default=None\r\n\r\n Returns\r\n -------\r\n distance : ndarray of shape (n_samples_X, n_samples_Y)\r\n\r\n Notes\r\n -----\r\n As the Earth is nearly spherical, the haversine formula provides a good\r\n approximation of the distance between two points of the Earth surface, with\r\n a less than 1% error on average.\r\n\r\n Examples\r\n --------\r\n We want to calculate the distance between the Ezeiza Airport\r\n (Buenos Aires, Argentina) and the Charles de Gaulle Airport (Paris,\r\n France).\r\n\r\n >>> from sklearn.metrics.pairwise import haversine_distances\r\n >>> from math import radians\r\n >>> bsas = [-34.83333, -58.5166646]\r\n >>> paris = [49.0083899664, 2.53844117956]\r\n >>> bsas_in_radians = [radians(_) for _ in bsas]\r\n >>> paris_in_radians = [radians(_) for _ in paris]\r\n >>> result = haversine_distances([bsas_in_radians, paris_in_radians])\r\n >>> result * 6371000/1000 # multiply by Earth radius to get kilometers\r\n array([[ 0. , 11099.54035582],\r\n [11099.54035582, 0. ]])\r\n \"\"\"\r\n from sklearn.neighbors import DistanceMetric\r\n return DistanceMetric.get_metric('haversine').pairwise(X, Y)\r\n\r\n\r\n@_deprecate_positional_args\r\ndef manhattan_distances(X, Y=None, *, sum_over_features=True):\r\n \"\"\"Compute the L1 distances between the vectors in X and Y.\r\n\r\n With sum_over_features equal to False it returns the componentwise\r\n distances.\r\n\r\n Read more in the :ref:`User Guide <metrics>`.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples_X, n_features)\r\n\r\n Y : array-like of shape (n_samples_Y, n_features), default=None\r\n\r\n sum_over_features : bool, default=True\r\n If True the function returns the pairwise distance matrix\r\n else it returns the componentwise L1 pairwise-distances.\r\n Not supported for sparse matrix inputs.\r\n\r\n Returns\r\n -------\r\n D : ndarray of shape (n_samples_X * n_samples_Y, n_features) or \\\r\n (n_samples_X, n_samples_Y)\r\n If sum_over_features is False shape is\r\n (n_samples_X * n_samples_Y, n_features) and D contains the\r\n componentwise L1 pairwise-distances (ie. absolute difference),\r\n else shape is (n_samples_X, n_samples_Y) and D contains\r\n the pairwise L1 distances.\r\n\r\n Notes\r\n --------\r\n When X and/or Y are CSR sparse matrices and they are not already\r\n in canonical format, this function modifies them in-place to\r\n make them canonical.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.metrics.pairwise import manhattan_distances\r\n >>> manhattan_distances([[3]], [[3]])\r\n array([[0.]])\r\n >>> manhattan_distances([[3]], [[2]])\r\n array([[1.]])\r\n >>> manhattan_distances([[2]], [[3]])\r\n array([[1.]])\r\n >>> manhattan_distances([[1, 2], [3, 4]],\\\r\n [[1, 2], [0, 3]])\r\n array([[0., 2.],\r\n [4., 4.]])\r\n >>> import numpy as np\r\n >>> X = np.ones((1, 2))\r\n >>> y = np.full((2, 2), 2.)\r\n >>> manhattan_distances(X, y, sum_over_features=False)\r\n array([[1., 1.],\r\n [1., 1.]])\r\n \"\"\"\r\n X, Y = check_pairwise_arrays(X, Y)\r\n\r\n if issparse(X) or issparse(Y):\r\n if not sum_over_features:\r\n raise TypeError(\"sum_over_features=%r not supported\"\r\n \" for sparse matrices\" % sum_over_features)\r\n\r\n X = csr_matrix(X, copy=False)\r\n Y = csr_matrix(Y, copy=False)\r\n X.sum_duplicates() # this also sorts indices in-place\r\n Y.sum_duplicates()\r\n D = np.zeros((X.shape[0], Y.shape[0]))\r\n _sparse_manhattan(X.data, X.indices, X.indptr,\r\n Y.data, Y.indices, Y.indptr,\r\n D)\r\n return D\r\n\r\n if sum_over_features:\r\n return distance.cdist(X, Y, 'cityblock')\r\n\r\n D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]\r\n D = np.abs(D, D)\r\n return D.reshape((-1, X.shape[1]))\r\n\r\n\r\ndef cosine_distances(X, Y=None):\r\n \"\"\"Compute cosine distance between samples in X and Y.\r\n\r\n Cosine distance is defined as 1.0 minus the cosine similarity.\r\n\r\n Read more in the :ref:`User Guide <metrics>`.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples_X, n_features)\r\n Matrix `X`.\r\n\r\n Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \\\r\n default=None\r\n Matrix `Y`.\r\n\r\n Returns\r\n -------\r\n distance matrix : ndarray of shape (n_samples_X, n_samples_Y)\r\n\r\n See Also\r\n --------\r\n cosine_similarity\r\n scipy.spatial.distance.cosine : Dense matrices only.\r\n \"\"\"\r\n # 1.0 - cosine_similarity(X, Y) without copy\r\n S = cosine_similarity(X, Y)\r\n S *= -1\r\n S += 1\r\n np.clip(S, 0, 2, out=S)\r\n if X is Y or Y is None:\r\n # Ensure that distances between vectors and themselves are set to 0.0.\r\n # This may not be the case due to floating point rounding errors.\r\n S[np.diag_indices_from(S)] = 0.0\r\n return S\r\n\r\n\r\n# Paired distances\r\ndef paired_euclidean_distances(X, Y):\r\n \"\"\"\r\n Computes the paired euclidean distances between X and Y.\r\n\r\n Read more in the :ref:`User Guide <metrics>`.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n\r\n Y : array-like of shape (n_samples, n_features)\r\n\r\n Returns\r\n -------\r\n distances : ndarray of shape (n_samples,)\r\n \"\"\"\r\n X, Y = check_paired_arrays(X, Y)\r\n return row_norms(X - Y)\r\n\r\n\r\ndef paired_manhattan_distances(X, Y):\r\n \"\"\"Compute the L1 distances between the vectors in X and Y.\r\n\r\n Read more in the :ref:`User Guide <metrics>`.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n\r\n Y : array-like of shape (n_samples, n_features)\r\n\r\n Returns\r\n -------\r\n distances : ndarray of shape (n_samples,)\r\n \"\"\"\r\n X, Y = check_paired_arrays(X, Y)\r\n diff = X - Y\r\n if issparse(diff):\r\n diff.data = np.abs(diff.data)\r\n return np.squeeze(np.array(diff.sum(axis=1)))\r\n else:\r\n return np.abs(diff).sum(axis=-1)\r\n\r\n\r\ndef paired_cosine_distances(X, Y):\r\n \"\"\"\r\n Computes the paired cosine distances between X and Y.\r\n\r\n Read more in the :ref:`User Guide <metrics>`.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n\r\n Y : array-like of shape (n_samples, n_features)\r\n\r\n Returns\r\n -------\r\n distances : ndarray of shape (n_samples,)\r\n\r\n Notes\r\n -----\r\n The cosine distance is equivalent to the half the squared\r\n euclidean distance if each sample is normalized to unit norm.\r\n \"\"\"\r\n X, Y = check_paired_arrays(X, Y)\r\n return .5 * row_norms(normalize(X) - normalize(Y), squared=True)\r\n\r\n\r\nPAIRED_DISTANCES = {\r\n 'cosine': paired_cosine_distances,\r\n 'euclidean': paired_euclidean_distances,\r\n 'l2': paired_euclidean_distances,\r\n 'l1': paired_manhattan_distances,\r\n 'manhattan': paired_manhattan_distances,\r\n 'cityblock': paired_manhattan_distances}\r\n\r\n\r\n@_deprecate_positional_args\r\ndef paired_distances(X, Y, *, metric=\"euclidean\", **kwds):\r\n \"\"\"\r\n Computes the paired distances between X and Y.\r\n\r\n Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...\r\n\r\n Read more in the :ref:`User Guide <metrics>`.\r\n\r\n Parameters\r\n ----------\r\n X : ndarray of shape (n_samples, n_features)\r\n Array 1 for distance computation.\r\n\r\n Y : ndarray of shape (n_samples, n_features)\r\n Array 2 for distance computation.\r\n\r\n metric : str or callable, default=\"euclidean\"\r\n The metric to use when calculating distance between instances in a\r\n feature array. If metric is a string, it must be one of the options\r\n specified in PAIRED_DISTANCES, including \"euclidean\",\r\n \"manhattan\", or \"cosine\".\r\n Alternatively, if metric is a callable function, it is called on each\r\n pair of instances (rows) and the resulting value recorded. The callable\r\n should take two arrays from X as input and return a value indicating\r\n the distance between them.\r\n\r\n Returns\r\n -------\r\n distances : ndarray of shape (n_samples,)\r\n\r\n See Also\r\n --------\r\n pairwise_distances : Computes the distance between every pair of samples.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.metrics.pairwise import paired_distances\r\n >>> X = [[0, 1], [1, 1]]\r\n >>> Y = [[0, 1], [2, 1]]\r\n >>> paired_distances(X, Y)\r\n array([0., 1.])\r\n \"\"\"\r\n\r\n if metric in PAIRED_DISTANCES:\r\n func = PAIRED_DISTANCES[metric]\r\n return func(X, Y)\r\n elif callable(metric):\r\n # Check the matrix first (it is usually done by the metric)\r\n X, Y = check_paired_arrays(X, Y)\r\n distances = np.zeros(len(X))\r\n for i in range(len(X)):\r\n distances[i] = metric(X[i], Y[i])\r\n return distances\r\n else:\r\n raise ValueError('Unknown distance %s' % metric)\r\n\r\n\r\n# Kernels\r\ndef linear_kernel(X, Y=None, dense_output=True):\r\n \"\"\"\r\n Compute the linear kernel between X and Y.\r\n\r\n Read more in the :ref:`User Guide <linear_kernel>`.\r\n\r\n Parameters\r\n ----------\r\n X : ndarray of shape (n_samples_X, n_features)\r\n\r\n Y : ndarray of shape (n_samples_Y, n_features), default=None\r\n\r\n dense_output : bool, default=True\r\n Whether to return dense output even when the input is sparse. If\r\n ``False``, the output is sparse if both input arrays are sparse.\r\n\r\n .. versionadded:: 0.20\r\n\r\n Returns\r\n -------\r\n Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)\r\n \"\"\"\r\n X, Y = check_pairwise_arrays(X, Y)\r\n return safe_sparse_dot(X, Y.T, dense_output=dense_output)\r\n\r\n\r\ndef polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):\r\n \"\"\"\r\n Compute the polynomial kernel between X and Y::\r\n\r\n K(X, Y) = (gamma <X, Y> + coef0)^degree\r\n\r\n Read more in the :ref:`User Guide <polynomial_kernel>`.\r\n\r\n Parameters\r\n ----------\r\n X : ndarray of shape (n_samples_X, n_features)\r\n\r\n Y : ndarray of shape (n_samples_Y, n_features), default=None\r\n\r\n degree : int, default=3\r\n\r\n gamma : float, default=None\r\n If None, defaults to 1.0 / n_features.\r\n\r\n coef0 : float, default=1\r\n\r\n Returns\r\n -------\r\n Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)\r\n \"\"\"\r\n X, Y = check_pairwise_arrays(X, Y)\r\n if gamma is None:\r\n gamma = 1.0 / X.shape[1]\r\n\r\n K = safe_sparse_dot(X, Y.T, dense_output=True)\r\n K *= gamma\r\n K += coef0\r\n K **= degree\r\n return K\r\n\r\n\r\ndef sigmoid_kernel(X, Y=None, gamma=None, coef0=1):\r\n \"\"\"\r\n Compute the sigmoid kernel between X and Y::\r\n\r\n K(X, Y) = tanh(gamma <X, Y> + coef0)\r\n\r\n Read more in the :ref:`User Guide <sigmoid_kernel>`.\r\n\r\n Parameters\r\n ----------\r\n X : ndarray of shape (n_samples_X, n_features)\r\n\r\n Y : ndarray of shape (n_samples_Y, n_features), default=None\r\n\r\n gamma : float, default=None\r\n If None, defaults to 1.0 / n_features.\r\n\r\n coef0 : float, default=1\r\n\r\n Returns\r\n -------\r\n Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)\r\n \"\"\"\r\n X, Y = check_pairwise_arrays(X, Y)\r\n if gamma is None:\r\n gamma = 1.0 / X.shape[1]\r\n\r\n K = safe_sparse_dot(X, Y.T, dense_output=True)\r\n K *= gamma\r\n K += coef0\r\n np.tanh(K, K) # compute tanh in-place\r\n return K\r\n\r\n\r\ndef rbf_kernel(X, Y=None, gamma=None):\r\n \"\"\"\r\n Compute the rbf (gaussian) kernel between X and Y::\r\n\r\n K(x, y) = exp(-gamma ||x-y||^2)\r\n\r\n for each pair of rows x in X and y in Y.\r\n\r\n Read more in the :ref:`User Guide <rbf_kernel>`.\r\n\r\n Parameters\r\n ----------\r\n X : ndarray of shape (n_samples_X, n_features)\r\n\r\n Y : ndarray of shape (n_samples_Y, n_features), default=None\r\n\r\n gamma : float, default=None\r\n If None, defaults to 1.0 / n_features.\r\n\r\n Returns\r\n -------\r\n kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)\r\n \"\"\"\r\n X, Y = check_pairwise_arrays(X, Y)\r\n if gamma is None:\r\n gamma = 1.0 / X.shape[1]\r\n\r\n K = euclidean_distances(X, Y, squared=True)\r\n K *= -gamma\r\n np.exp(K, K) # exponentiate K in-place\r\n return K\r\n\r\n\r\ndef laplacian_kernel(X, Y=None, gamma=None):\r\n \"\"\"Compute the laplacian kernel between X and Y.\r\n\r\n The laplacian kernel is defined as::\r\n\r\n K(x, y) = exp(-gamma ||x-y||_1)\r\n\r\n for each pair of rows x in X and y in Y.\r\n Read more in the :ref:`User Guide <laplacian_kernel>`.\r\n\r\n .. versionadded:: 0.17\r\n\r\n Parameters\r\n ----------\r\n X : ndarray of shape (n_samples_X, n_features)\r\n\r\n Y : ndarray of shape (n_samples_Y, n_features), default=None\r\n\r\n gamma : float, default=None\r\n If None, defaults to 1.0 / n_features.\r\n\r\n Returns\r\n -------\r\n kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)\r\n \"\"\"\r\n X, Y = check_pairwise_arrays(X, Y)\r\n if gamma is None:\r\n gamma = 1.0 / X.shape[1]\r\n\r\n K = -gamma * manhattan_distances(X, Y)\r\n np.exp(K, K) # exponentiate K in-place\r\n return K\r\n\r\n\r\ndef cosine_similarity(X, Y=None, dense_output=True):\r\n \"\"\"Compute cosine similarity between samples in X and Y.\r\n\r\n Cosine similarity, or the cosine kernel, computes similarity as the\r\n normalized dot product of X and Y:\r\n\r\n K(X, Y) = <X, Y> / (||X||*||Y||)\r\n\r\n On L2-normalized data, this function is equivalent to linear_kernel.\r\n\r\n Read more in the :ref:`User Guide <cosine_similarity>`.\r\n\r\n Parameters\r\n ----------\r\n X : {ndarray, sparse matrix} of shape (n_samples_X, n_features)\r\n Input data.\r\n\r\n Y : {ndarray, sparse matrix} of shape (n_samples_Y, n_features), \\\r\n default=None\r\n Input data. If ``None``, the output will be the pairwise\r\n similarities between all samples in ``X``.\r\n\r\n dense_output : bool, default=True\r\n Whether to return dense output even when the input is sparse. If\r\n ``False``, the output is sparse if both input arrays are sparse.\r\n\r\n .. versionadded:: 0.17\r\n parameter ``dense_output`` for dense output.\r\n\r\n Returns\r\n -------\r\n kernel matrix : ndarray of shape (n_samples_X, n_samples_Y)\r\n \"\"\"\r\n # to avoid recursive import\r\n\r\n X, Y = check_pairwise_arrays(X, Y)\r\n\r\n X_normalized = normalize(X, copy=True)\r\n if X is Y:\r\n Y_normalized = X_normalized\r\n else:\r\n Y_normalized = normalize(Y, copy=True)\r\n\r\n K = safe_sparse_dot(X_normalized, Y_normalized.T,\r\n dense_output=dense_output)\r\n\r\n return K\r\n\r\n\r\ndef additive_chi2_kernel(X, Y=None):\r\n \"\"\"Computes the additive chi-squared kernel between observations in X and\r\n Y.\r\n\r\n The chi-squared kernel is computed between each pair of rows in X and Y. X\r\n and Y have to be non-negative. This kernel is most commonly applied to\r\n histograms.\r\n\r\n The chi-squared kernel is given by::\r\n\r\n k(x, y) = -Sum [(x - y)^2 / (x + y)]\r\n\r\n It can be interpreted as a weighted difference per entry.\r\n\r\n Read more in the :ref:`User Guide <chi2_kernel>`.\r\n\r\n Notes\r\n -----\r\n As the negative of a distance, this kernel is only conditionally positive\r\n definite.\r\n\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples_X, n_features)\r\n\r\n Y : ndarray of shape (n_samples_Y, n_features), default=None\r\n\r\n Returns\r\n -------\r\n kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)\r\n\r\n See Also\r\n --------\r\n chi2_kernel : The exponentiated version of the kernel, which is usually\r\n preferable.\r\n sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation\r\n to this kernel.\r\n\r\n References\r\n ----------\r\n * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.\r\n Local features and kernels for classification of texture and object\r\n categories: A comprehensive study\r\n International Journal of Computer Vision 2007\r\n https://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf\r\n \"\"\"\r\n if issparse(X) or issparse(Y):\r\n raise ValueError(\"additive_chi2 does not support sparse matrices.\")\r\n X, Y = check_pairwise_arrays(X, Y)\r\n if (X < 0).any():\r\n raise ValueError(\"X contains negative values.\")\r\n if Y is not X and (Y < 0).any():\r\n raise ValueError(\"Y contains negative values.\")\r\n\r\n result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)\r\n _chi2_kernel_fast(X, Y, result)\r\n return result\r\n\r\n\r\ndef chi2_kernel(X, Y=None, gamma=1.):\r\n \"\"\"Computes the exponential chi-squared kernel X and Y.\r\n\r\n The chi-squared kernel is computed between each pair of rows in X and Y. X\r\n and Y have to be non-negative. This kernel is most commonly applied to\r\n histograms.\r\n\r\n The chi-squared kernel is given by::\r\n\r\n k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])\r\n\r\n It can be interpreted as a weighted difference per entry.\r\n\r\n Read more in the :ref:`User Guide <chi2_kernel>`.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples_X, n_features)\r\n\r\n Y : ndarray of shape (n_samples_Y, n_features), default=None\r\n\r\n gamma : float, default=1.\r\n Scaling parameter of the chi2 kernel.\r\n\r\n Returns\r\n -------\r\n kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)\r\n\r\n See Also\r\n --------\r\n additive_chi2_kernel : The additive version of this kernel.\r\n sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation\r\n to the additive version of this kernel.\r\n\r\n References\r\n ----------\r\n * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.\r\n Local features and kernels for classification of texture and object\r\n categories: A comprehensive study\r\n International Journal of Computer Vision 2007\r\n https://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf\r\n \"\"\"\r\n K = additive_chi2_kernel(X, Y)\r\n K *= gamma\r\n return np.exp(K, K)\r\n\r\n\r\n# Helper functions - distance\r\nPAIRWISE_DISTANCE_FUNCTIONS = {\r\n # If updating this dictionary, update the doc in both distance_metrics()\r\n # and also in pairwise_distances()!\r\n 'cityblock': manhattan_distances,\r\n 'cosine': cosine_distances,\r\n 'euclidean': euclidean_distances,\r\n 'haversine': haversine_distances,\r\n 'l2': euclidean_distances,\r\n 'l1': manhattan_distances,\r\n 'manhattan': manhattan_distances,\r\n 'precomputed': None, # HACK: precomputed is always allowed, never called\r\n 'nan_euclidean': nan_euclidean_distances,\r\n}\r\n\r\n\r\ndef distance_metrics():\r\n \"\"\"Valid metrics for pairwise_distances.\r\n\r\n This function simply returns the valid pairwise distance metrics.\r\n It exists to allow for a description of the mapping for\r\n each of the valid strings.\r\n\r\n The valid distance metrics, and the function they map to, are:\r\n\r\n =============== ========================================\r\n metric Function\r\n =============== ========================================\r\n 'cityblock' metrics.pairwise.manhattan_distances\r\n 'cosine' metrics.pairwise.cosine_distances\r\n 'euclidean' metrics.pairwise.euclidean_distances\r\n 'haversine' metrics.pairwise.haversine_distances\r\n 'l1' metrics.pairwise.manhattan_distances\r\n 'l2' metrics.pairwise.euclidean_distances\r\n 'manhattan' metrics.pairwise.manhattan_distances\r\n 'nan_euclidean' metrics.pairwise.nan_euclidean_distances\r\n =============== ========================================\r\n\r\n Read more in the :ref:`User Guide <metrics>`.\r\n\r\n \"\"\"\r\n return PAIRWISE_DISTANCE_FUNCTIONS\r\n\r\n\r\ndef _dist_wrapper(dist_func, dist_matrix, slice_, *args, **kwargs):\r\n \"\"\"Write in-place to a slice of a distance matrix.\"\"\"\r\n dist_matrix[:, slice_] = dist_func(*args, **kwargs)\r\n\r\n\r\ndef _parallel_pairwise(X, Y, func, n_jobs, **kwds):\r\n \"\"\"Break the pairwise matrix in n_jobs even slices\r\n and compute them in parallel.\"\"\"\r\n\r\n if Y is None:\r\n Y = X\r\n X, Y, dtype = _return_float_dtype(X, Y)\r\n\r\n if effective_n_jobs(n_jobs) == 1:\r\n return func(X, Y, **kwds)\r\n\r\n # enforce a threading backend to prevent data communication overhead\r\n fd = delayed(_dist_wrapper)\r\n ret = np.empty((X.shape[0], Y.shape[0]), dtype=dtype, order='F')\r\n Parallel(backend=\"threading\", n_jobs=n_jobs)(\r\n fd(func, ret, s, X, Y[s], **kwds)\r\n for s in gen_even_slices(_num_samples(Y), effective_n_jobs(n_jobs)))\r\n\r\n if (X is Y or Y is None) and func is euclidean_distances:\r\n # zeroing diagonal for euclidean norm.\r\n # TODO: do it also for other norms.\r\n np.fill_diagonal(ret, 0)\r\n\r\n return ret\r\n\r\n\r\ndef _pairwise_callable(X, Y, metric, force_all_finite=True, **kwds):\r\n \"\"\"Handle the callable case for pairwise_{distances,kernels}.\r\n \"\"\"\r\n X, Y = check_pairwise_arrays(X, Y, force_all_finite=force_all_finite)\r\n\r\n if X is Y:\r\n # Only calculate metric for upper triangle\r\n out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')\r\n iterator = itertools.combinations(range(X.shape[0]), 2)\r\n for i, j in iterator:\r\n out[i, j] = metric(X[i], Y[j], **kwds)\r\n\r\n # Make symmetric\r\n # NB: out += out.T will produce incorrect results\r\n out = out + out.T\r\n\r\n # Calculate diagonal\r\n # NB: nonzero diagonals are allowed for both metrics and kernels\r\n for i in range(X.shape[0]):\r\n x = X[i]\r\n out[i, i] = metric(x, x, **kwds)\r\n\r\n else:\r\n # Calculate all cells\r\n out = np.empty((X.shape[0], Y.shape[0]), dtype='float')\r\n iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))\r\n for i, j in iterator:\r\n out[i, j] = metric(X[i], Y[j], **kwds)\r\n\r\n return out\r\n\r\n\r\n_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',\r\n 'braycurtis', 'canberra', 'chebyshev', 'correlation',\r\n 'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',\r\n 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',\r\n 'russellrao', 'seuclidean', 'sokalmichener',\r\n 'sokalsneath', 'sqeuclidean', 'yule', \"wminkowski\",\r\n 'nan_euclidean', 'haversine']\r\n\r\n_NAN_METRICS = ['nan_euclidean']\r\n\r\n\r\ndef _check_chunk_size(reduced, chunk_size):\r\n \"\"\"Checks chunk is a sequence of expected size or a tuple of same.\r\n \"\"\"\r\n if reduced is None:\r\n return\r\n is_tuple = isinstance(reduced, tuple)\r\n if not is_tuple:\r\n reduced = (reduced,)\r\n if any(isinstance(r, tuple) or not hasattr(r, '__iter__')\r\n for r in reduced):\r\n raise TypeError('reduce_func returned %r. '\r\n 'Expected sequence(s) of length %d.' %\r\n (reduced if is_tuple else reduced[0], chunk_size))\r\n if any(_num_samples(r) != chunk_size for r in reduced):\r\n actual_size = tuple(_num_samples(r) for r in reduced)\r\n raise ValueError('reduce_func returned object of length %s. '\r\n 'Expected same length as input: %d.' %\r\n (actual_size if is_tuple else actual_size[0],\r\n chunk_size))\r\n\r\n\r\ndef _precompute_metric_params(X, Y, metric=None, **kwds):\r\n \"\"\"Precompute data-derived metric parameters if not provided.\r\n \"\"\"\r\n if metric == \"seuclidean\" and 'V' not in kwds:\r\n # There is a bug in scipy < 1.5 that will cause a crash if\r\n # X.dtype != np.double (float64). See PR #15730\r\n dtype = np.float64 if sp_version < parse_version('1.5') else None\r\n if X is Y:\r\n V = np.var(X, axis=0, ddof=1, dtype=dtype)\r\n else:\r\n warnings.warn(\r\n \"from version 1.0 (renaming of 0.25), pairwise_distances for \"\r\n \"metric='seuclidean' will require V to be specified if Y is \"\r\n \"passed.\",\r\n FutureWarning\r\n )\r\n V = np.var(np.vstack([X, Y]), axis=0, ddof=1, dtype=dtype)\r\n return {'V': V}\r\n if metric == \"mahalanobis\" and 'VI' not in kwds:\r\n if X is Y:\r\n VI = np.linalg.inv(np.cov(X.T)).T\r\n else:\r\n warnings.warn(\r\n \"from version 1.0 (renaming of 0.25), pairwise_distances for \"\r\n \"metric='mahalanobis' will require VI to be specified if Y \"\r\n \"is passed.\",\r\n FutureWarning\r\n )\r\n VI = np.linalg.inv(np.cov(np.vstack([X, Y]).T)).T\r\n return {'VI': VI}\r\n return {}\r\n\r\n\r\n@_deprecate_positional_args\r\ndef pairwise_distances_chunked(X, Y=None, *, reduce_func=None,\r\n metric='euclidean', n_jobs=None,\r\n working_memory=None, **kwds):\r\n \"\"\"Generate a distance matrix chunk by chunk with optional reduction.\r\n\r\n In cases where not all of a pairwise distance matrix needs to be stored at\r\n once, this is used to calculate pairwise distances in\r\n ``working_memory``-sized chunks. If ``reduce_func`` is given, it is run\r\n on each chunk and its return values are concatenated into lists, arrays\r\n or sparse matrices.\r\n\r\n Parameters\r\n ----------\r\n X : ndarray of shape (n_samples_X, n_samples_X) or \\\r\n (n_samples_X, n_features)\r\n Array of pairwise distances between samples, or a feature array.\r\n The shape the array should be (n_samples_X, n_samples_X) if\r\n metric='precomputed' and (n_samples_X, n_features) otherwise.\r\n\r\n Y : ndarray of shape (n_samples_Y, n_features), default=None\r\n An optional second feature array. Only allowed if\r\n metric != \"precomputed\".\r\n\r\n reduce_func : callable, default=None\r\n The function which is applied on each chunk of the distance matrix,\r\n reducing it to needed values. ``reduce_func(D_chunk, start)``\r\n is called repeatedly, where ``D_chunk`` is a contiguous vertical\r\n slice of the pairwise distance matrix, starting at row ``start``.\r\n It should return one of: None; an array, a list, or a sparse matrix\r\n of length ``D_chunk.shape[0]``; or a tuple of such objects. Returning\r\n None is useful for in-place operations, rather than reductions.\r\n\r\n If None, pairwise_distances_chunked returns a generator of vertical\r\n chunks of the distance matrix.\r\n\r\n metric : str or callable, default='euclidean'\r\n The metric to use when calculating distance between instances in a\r\n feature array. If metric is a string, it must be one of the options\r\n allowed by scipy.spatial.distance.pdist for its metric parameter, or\r\n a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.\r\n If metric is \"precomputed\", X is assumed to be a distance matrix.\r\n Alternatively, if metric is a callable function, it is called on each\r\n pair of instances (rows) and the resulting value recorded. The callable\r\n should take two arrays from X as input and return a value indicating\r\n the distance between them.\r\n\r\n n_jobs : int, default=None\r\n The number of jobs to use for the computation. This works by breaking\r\n down the pairwise matrix into n_jobs even slices and computing them in\r\n parallel.\r\n\r\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\r\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\r\n for more details.\r\n\r\n working_memory : int, default=None\r\n The sought maximum memory for temporary distance matrix chunks.\r\n When None (default), the value of\r\n ``sklearn.get_config()['working_memory']`` is used.\r\n\r\n `**kwds` : optional keyword parameters\r\n Any further parameters are passed directly to the distance function.\r\n If using a scipy.spatial.distance metric, the parameters are still\r\n metric dependent. See the scipy docs for usage examples.\r\n\r\n Yields\r\n ------\r\n D_chunk : {ndarray, sparse matrix}\r\n A contiguous slice of distance matrix, optionally processed by\r\n ``reduce_func``.\r\n\r\n Examples\r\n --------\r\n Without reduce_func:\r\n\r\n >>> import numpy as np\r\n >>> from sklearn.metrics import pairwise_distances_chunked\r\n >>> X = np.random.RandomState(0).rand(5, 3)\r\n >>> D_chunk = next(pairwise_distances_chunked(X))\r\n >>> D_chunk\r\n array([[0. ..., 0.29..., 0.41..., 0.19..., 0.57...],\r\n [0.29..., 0. ..., 0.57..., 0.41..., 0.76...],\r\n [0.41..., 0.57..., 0. ..., 0.44..., 0.90...],\r\n [0.19..., 0.41..., 0.44..., 0. ..., 0.51...],\r\n [0.57..., 0.76..., 0.90..., 0.51..., 0. ...]])\r\n\r\n Retrieve all neighbors and average distance within radius r:\r\n\r\n >>> r = .2\r\n >>> def reduce_func(D_chunk, start):\r\n ... neigh = [np.flatnonzero(d < r) for d in D_chunk]\r\n ... avg_dist = (D_chunk * (D_chunk < r)).mean(axis=1)\r\n ... return neigh, avg_dist\r\n >>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func)\r\n >>> neigh, avg_dist = next(gen)\r\n >>> neigh\r\n [array([0, 3]), array([1]), array([2]), array([0, 3]), array([4])]\r\n >>> avg_dist\r\n array([0.039..., 0. , 0. , 0.039..., 0. ])\r\n\r\n Where r is defined per sample, we need to make use of ``start``:\r\n\r\n >>> r = [.2, .4, .4, .3, .1]\r\n >>> def reduce_func(D_chunk, start):\r\n ... neigh = [np.flatnonzero(d < r[i])\r\n ... for i, d in enumerate(D_chunk, start)]\r\n ... return neigh\r\n >>> neigh = next(pairwise_distances_chunked(X, reduce_func=reduce_func))\r\n >>> neigh\r\n [array([0, 3]), array([0, 1]), array([2]), array([0, 3]), array([4])]\r\n\r\n Force row-by-row generation by reducing ``working_memory``:\r\n\r\n >>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func,\r\n ... working_memory=0)\r\n >>> next(gen)\r\n [array([0, 3])]\r\n >>> next(gen)\r\n [array([0, 1])]\r\n \"\"\"\r\n n_samples_X = _num_samples(X)\r\n if metric == 'precomputed':\r\n slices = (slice(0, n_samples_X),)\r\n else:\r\n if Y is None:\r\n Y = X\r\n # We get as many rows as possible within our working_memory budget to\r\n # store len(Y) distances in each row of output.\r\n #\r\n # Note:\r\n # - this will get at least 1 row, even if 1 row of distances will\r\n # exceed working_memory.\r\n # - this does not account for any temporary memory usage while\r\n # calculating distances (e.g. difference of vectors in manhattan\r\n # distance.\r\n chunk_n_rows = get_chunk_n_rows(row_bytes=8 * _num_samples(Y),\r\n max_n_rows=n_samples_X,\r\n working_memory=working_memory)\r\n slices = gen_batches(n_samples_X, chunk_n_rows)\r\n\r\n # precompute data-derived metric params\r\n params = _precompute_metric_params(X, Y, metric=metric, **kwds)\r\n kwds.update(**params)\r\n\r\n for sl in slices:\r\n if sl.start == 0 and sl.stop == n_samples_X:\r\n X_chunk = X # enable optimised paths for X is Y\r\n else:\r\n X_chunk = X[sl]\r\n D_chunk = pairwise_distances(X_chunk, Y, metric=metric,\r\n n_jobs=n_jobs, **kwds)\r\n if ((X is Y or Y is None)\r\n and PAIRWISE_DISTANCE_FUNCTIONS.get(metric, None)\r\n is euclidean_distances):\r\n # zeroing diagonal, taking care of aliases of \"euclidean\",\r\n # i.e. \"l2\"\r\n D_chunk.flat[sl.start::_num_samples(X) + 1] = 0\r\n if reduce_func is not None:\r\n chunk_size = D_chunk.shape[0]\r\n D_chunk = reduce_func(D_chunk, sl.start)\r\n _check_chunk_size(D_chunk, chunk_size)\r\n yield D_chunk\r\n\r\n\r\n@_deprecate_positional_args\r\ndef pairwise_distances(X, Y=None, metric=\"euclidean\", *, n_jobs=None,\r\n force_all_finite=True, **kwds):\r\n \"\"\"Compute the distance matrix from a vector array X and optional Y.\r\n\r\n This method takes either a vector array or a distance matrix, and returns\r\n a distance matrix. If the input is a vector array, the distances are\r\n computed. If the input is a distances matrix, it is returned instead.\r\n\r\n This method provides a safe way to take a distance matrix as input, while\r\n preserving compatibility with many other algorithms that take a vector\r\n array.\r\n\r\n If Y is given (default is None), then the returned matrix is the pairwise\r\n distance between the arrays from both X and Y.\r\n\r\n Valid values for metric are:\r\n\r\n - From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',\r\n 'manhattan']. These metrics support sparse matrix\r\n inputs.\r\n ['nan_euclidean'] but it does not yet support sparse matrices.\r\n\r\n - From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',\r\n 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',\r\n 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',\r\n 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']\r\n See the documentation for scipy.spatial.distance for details on these\r\n metrics. These metrics do not support sparse matrix inputs.\r\n\r\n Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are\r\n valid scipy.spatial.distance metrics), the scikit-learn implementation\r\n will be used, which is faster and has support for sparse matrices (except\r\n for 'cityblock'). For a verbose description of the metrics from\r\n scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics\r\n function.\r\n\r\n Read more in the :ref:`User Guide <metrics>`.\r\n\r\n Parameters\r\n ----------\r\n X : ndarray of shape (n_samples_X, n_samples_X) or \\\r\n (n_samples_X, n_features)\r\n Array of pairwise distances between samples, or a feature array.\r\n The shape of the array should be (n_samples_X, n_samples_X) if\r\n metric == \"precomputed\" and (n_samples_X, n_features) otherwise.\r\n\r\n Y : ndarray of shape (n_samples_Y, n_features), default=None\r\n An optional second feature array. Only allowed if\r\n metric != \"precomputed\".\r\n\r\n metric : str or callable, default='euclidean'\r\n The metric to use when calculating distance between instances in a\r\n feature array. If metric is a string, it must be one of the options\r\n allowed by scipy.spatial.distance.pdist for its metric parameter, or\r\n a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``.\r\n If metric is \"precomputed\", X is assumed to be a distance matrix.\r\n Alternatively, if metric is a callable function, it is called on each\r\n pair of instances (rows) and the resulting value recorded. The callable\r\n should take two arrays from X as input and return a value indicating\r\n the distance between them.\r\n\r\n n_jobs : int, default=None\r\n The number of jobs to use for the computation. This works by breaking\r\n down the pairwise matrix into n_jobs even slices and computing them in\r\n parallel.\r\n\r\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\r\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\r\n for more details.\r\n\r\n force_all_finite : bool or 'allow-nan', default=True\r\n Whether to raise an error on np.inf, np.nan, pd.NA in array. Ignored\r\n for a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``. The\r\n possibilities are:\r\n\r\n - True: Force all values of array to be finite.\r\n - False: accepts np.inf, np.nan, pd.NA in array.\r\n - 'allow-nan': accepts only np.nan and pd.NA values in array. Values\r\n cannot be infinite.\r\n\r\n .. versionadded:: 0.22\r\n ``force_all_finite`` accepts the string ``'allow-nan'``.\r\n\r\n .. versionchanged:: 0.23\r\n Accepts `pd.NA` and converts it into `np.nan`.\r\n\r\n **kwds : optional keyword parameters\r\n Any further parameters are passed directly to the distance function.\r\n If using a scipy.spatial.distance metric, the parameters are still\r\n metric dependent. See the scipy docs for usage examples.\r\n\r\n Returns\r\n -------\r\n D : ndarray of shape (n_samples_X, n_samples_X) or \\\r\n (n_samples_X, n_samples_Y)\r\n A distance matrix D such that D_{i, j} is the distance between the\r\n ith and jth vectors of the given matrix X, if Y is None.\r\n If Y is not None, then D_{i, j} is the distance between the ith array\r\n from X and the jth array from Y.\r\n\r\n See Also\r\n --------\r\n pairwise_distances_chunked : Performs the same calculation as this\r\n function, but returns a generator of chunks of the distance matrix, in\r\n order to limit memory usage.\r\n paired_distances : Computes the distances between corresponding elements\r\n of two arrays.\r\n \"\"\"\r\n if (metric not in _VALID_METRICS and\r\n not callable(metric) and metric != \"precomputed\"):\r\n raise ValueError(\"Unknown metric %s. \"\r\n \"Valid metrics are %s, or 'precomputed', or a \"\r\n \"callable\" % (metric, _VALID_METRICS))\r\n\r\n if metric == \"precomputed\":\r\n X, _ = check_pairwise_arrays(X, Y, precomputed=True,\r\n force_all_finite=force_all_finite)\r\n\r\n whom = (\"`pairwise_distances`. Precomputed distance \"\r\n \" need to have non-negative values.\")\r\n check_non_negative(X, whom=whom)\r\n return X\r\n elif metric in PAIRWISE_DISTANCE_FUNCTIONS:\r\n func = PAIRWISE_DISTANCE_FUNCTIONS[metric]\r\n elif callable(metric):\r\n func = partial(_pairwise_callable, metric=metric,\r\n force_all_finite=force_all_finite, **kwds)\r\n else:\r\n if issparse(X) or issparse(Y):\r\n raise TypeError(\"scipy distance metrics do not\"\r\n \" support sparse matrices.\")\r\n\r\n dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None\r\n\r\n if (dtype == bool and\r\n (X.dtype != bool or (Y is not None and Y.dtype != bool))):\r\n msg = \"Data was converted to boolean for metric %s\" % metric\r\n warnings.warn(msg, DataConversionWarning)\r\n\r\n X, Y = check_pairwise_arrays(X, Y, dtype=dtype,\r\n force_all_finite=force_all_finite)\r\n\r\n # precompute data-derived metric params\r\n params = _precompute_metric_params(X, Y, metric=metric, **kwds)\r\n kwds.update(**params)\r\n\r\n if effective_n_jobs(n_jobs) == 1 and X is Y:\r\n return distance.squareform(distance.pdist(X, metric=metric,\r\n **kwds))\r\n func = partial(distance.cdist, metric=metric, **kwds)\r\n\r\n return _parallel_pairwise(X, Y, func, n_jobs, **kwds)\r\n\r\n\r\n# These distances require boolean arrays, when using scipy.spatial.distance\r\nPAIRWISE_BOOLEAN_FUNCTIONS = [\r\n 'dice',\r\n 'jaccard',\r\n 'kulsinski',\r\n 'matching',\r\n 'rogerstanimoto',\r\n 'russellrao',\r\n 'sokalmichener',\r\n 'sokalsneath',\r\n 'yule',\r\n]\r\n\r\n# Helper functions - distance\r\nPAIRWISE_KERNEL_FUNCTIONS = {\r\n # If updating this dictionary, update the doc in both distance_metrics()\r\n # and also in pairwise_distances()!\r\n 'additive_chi2': additive_chi2_kernel,\r\n 'chi2': chi2_kernel,\r\n 'linear': linear_kernel,\r\n 'polynomial': polynomial_kernel,\r\n 'poly': polynomial_kernel,\r\n 'rbf': rbf_kernel,\r\n 'laplacian': laplacian_kernel,\r\n 'sigmoid': sigmoid_kernel,\r\n 'cosine': cosine_similarity, }\r\n\r\n\r\ndef kernel_metrics():\r\n \"\"\"Valid metrics for pairwise_kernels.\r\n\r\n This function simply returns the valid pairwise distance metrics.\r\n It exists, however, to allow for a verbose description of the mapping for\r\n each of the valid strings.\r\n\r\n The valid distance metrics, and the function they map to, are:\r\n =============== ========================================\r\n metric Function\r\n =============== ========================================\r\n 'additive_chi2' sklearn.pairwise.additive_chi2_kernel\r\n 'chi2' sklearn.pairwise.chi2_kernel\r\n 'linear' sklearn.pairwise.linear_kernel\r\n 'poly' sklearn.pairwise.polynomial_kernel\r\n 'polynomial' sklearn.pairwise.polynomial_kernel\r\n 'rbf' sklearn.pairwise.rbf_kernel\r\n 'laplacian' sklearn.pairwise.laplacian_kernel\r\n 'sigmoid' sklearn.pairwise.sigmoid_kernel\r\n 'cosine' sklearn.pairwise.cosine_similarity\r\n =============== ========================================\r\n\r\n Read more in the :ref:`User Guide <metrics>`.\r\n \"\"\"\r\n return PAIRWISE_KERNEL_FUNCTIONS\r\n\r\n\r\nKERNEL_PARAMS = {\r\n \"additive_chi2\": (),\r\n \"chi2\": frozenset([\"gamma\"]),\r\n \"cosine\": (),\r\n \"linear\": (),\r\n \"poly\": frozenset([\"gamma\", \"degree\", \"coef0\"]),\r\n \"polynomial\": frozenset([\"gamma\", \"degree\", \"coef0\"]),\r\n \"rbf\": frozenset([\"gamma\"]),\r\n \"laplacian\": frozenset([\"gamma\"]),\r\n \"sigmoid\": frozenset([\"gamma\", \"coef0\"]),\r\n}\r\n\r\n\r\n@_deprecate_positional_args\r\ndef pairwise_kernels(X, Y=None, metric=\"linear\", *, filter_params=False,\r\n n_jobs=None, **kwds):\r\n \"\"\"Compute the kernel between arrays X and optional array Y.\r\n\r\n This method takes either a vector array or a kernel matrix, and returns\r\n a kernel matrix. If the input is a vector array, the kernels are\r\n computed. If the input is a kernel matrix, it is returned instead.\r\n\r\n This method provides a safe way to take a kernel matrix as input, while\r\n preserving compatibility with many other algorithms that take a vector\r\n array.\r\n\r\n If Y is given (default is None), then the returned matrix is the pairwise\r\n kernel between the arrays from both X and Y.\r\n\r\n Valid values for metric are:\r\n ['additive_chi2', 'chi2', 'linear', 'poly', 'polynomial', 'rbf',\r\n 'laplacian', 'sigmoid', 'cosine']\r\n\r\n Read more in the :ref:`User Guide <metrics>`.\r\n\r\n Parameters\r\n ----------\r\n X : ndarray of shape (n_samples_X, n_samples_X) or \\\r\n (n_samples_X, n_features)\r\n Array of pairwise kernels between samples, or a feature array.\r\n The shape of the array should be (n_samples_X, n_samples_X) if\r\n metric == \"precomputed\" and (n_samples_X, n_features) otherwise.\r\n\r\n Y : ndarray of shape (n_samples_Y, n_features), default=None\r\n A second feature array only if X has shape (n_samples_X, n_features).\r\n\r\n metric : str or callable, default=\"linear\"\r\n The metric to use when calculating kernel between instances in a\r\n feature array. If metric is a string, it must be one of the metrics\r\n in pairwise.PAIRWISE_KERNEL_FUNCTIONS.\r\n If metric is \"precomputed\", X is assumed to be a kernel matrix.\r\n Alternatively, if metric is a callable function, it is called on each\r\n pair of instances (rows) and the resulting value recorded. The callable\r\n should take two rows from X as input and return the corresponding\r\n kernel value as a single number. This means that callables from\r\n :mod:`sklearn.metrics.pairwise` are not allowed, as they operate on\r\n matrices, not single samples. Use the string identifying the kernel\r\n instead.\r\n\r\n filter_params : bool, default=False\r\n Whether to filter invalid parameters or not.\r\n\r\n n_jobs : int, default=None\r\n The number of jobs to use for the computation. This works by breaking\r\n down the pairwise matrix into n_jobs even slices and computing them in\r\n parallel.\r\n\r\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\r\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\r\n for more details.\r\n\r\n **kwds : optional keyword parameters\r\n Any further parameters are passed directly to the kernel function.\r\n\r\n Returns\r\n -------\r\n K : ndarray of shape (n_samples_X, n_samples_X) or \\\r\n (n_samples_X, n_samples_Y)\r\n A kernel matrix K such that K_{i, j} is the kernel between the\r\n ith and jth vectors of the given matrix X, if Y is None.\r\n If Y is not None, then K_{i, j} is the kernel between the ith array\r\n from X and the jth array from Y.\r\n\r\n Notes\r\n -----\r\n If metric is 'precomputed', Y is ignored and X is returned.\r\n\r\n \"\"\"\r\n # import GPKernel locally to prevent circular imports\r\n from ..gaussian_process.kernels import Kernel as GPKernel\r\n\r\n if metric == \"precomputed\":\r\n X, _ = check_pairwise_arrays(X, Y, precomputed=True)\r\n return X\r\n elif isinstance(metric, GPKernel):\r\n func = metric.__call__\r\n elif metric in PAIRWISE_KERNEL_FUNCTIONS:\r\n if filter_params:\r\n kwds = {k: kwds[k] for k in kwds\r\n if k in KERNEL_PARAMS[metric]}\r\n func = PAIRWISE_KERNEL_FUNCTIONS[metric]\r\n elif callable(metric):\r\n func = partial(_pairwise_callable, metric=metric, **kwds)\r\n else:\r\n raise ValueError(\"Unknown kernel %r\" % metric)\r\n\r\n return _parallel_pairwise(X, Y, func, n_jobs, **kwds)\r\n", "from datetime import date, datetime, timedelta\r\n\r\nfrom dateutil import tz\r\nimport numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nfrom pandas import DataFrame, Index, Series, Timestamp, date_range\r\nimport pandas._testing as tm\r\n\r\n\r\nclass TestDatetimeIndex:\r\n def test_setitem_with_datetime_tz(self):\r\n # 16889\r\n # support .loc with alignment and tz-aware DatetimeIndex\r\n mask = np.array([True, False, True, False])\r\n\r\n idx = date_range(\"20010101\", periods=4, tz=\"UTC\")\r\n df = DataFrame({\"a\": np.arange(4)}, index=idx).astype(\"float64\")\r\n\r\n result = df.copy()\r\n result.loc[mask, :] = df.loc[mask, :]\r\n tm.assert_frame_equal(result, df)\r\n\r\n result = df.copy()\r\n result.loc[mask] = df.loc[mask]\r\n tm.assert_frame_equal(result, df)\r\n\r\n idx = date_range(\"20010101\", periods=4)\r\n df = DataFrame({\"a\": np.arange(4)}, index=idx).astype(\"float64\")\r\n\r\n result = df.copy()\r\n result.loc[mask, :] = df.loc[mask, :]\r\n tm.assert_frame_equal(result, df)\r\n\r\n result = df.copy()\r\n result.loc[mask] = df.loc[mask]\r\n tm.assert_frame_equal(result, df)\r\n\r\n def test_indexing_with_datetime_tz(self):\r\n\r\n # GH#8260\r\n # support datetime64 with tz\r\n\r\n idx = Index(date_range(\"20130101\", periods=3, tz=\"US/Eastern\"), name=\"foo\")\r\n dr = date_range(\"20130110\", periods=3)\r\n df = DataFrame({\"A\": idx, \"B\": dr})\r\n df[\"C\"] = idx\r\n df.iloc[1, 1] = pd.NaT\r\n df.iloc[1, 2] = pd.NaT\r\n\r\n # indexing\r\n result = df.iloc[1]\r\n expected = Series(\r\n [Timestamp(\"2013-01-02 00:00:00-0500\", tz=\"US/Eastern\"), pd.NaT, pd.NaT],\r\n index=list(\"ABC\"),\r\n dtype=\"object\",\r\n name=1,\r\n )\r\n tm.assert_series_equal(result, expected)\r\n result = df.loc[1]\r\n expected = Series(\r\n [Timestamp(\"2013-01-02 00:00:00-0500\", tz=\"US/Eastern\"), pd.NaT, pd.NaT],\r\n index=list(\"ABC\"),\r\n dtype=\"object\",\r\n name=1,\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n # indexing - fast_xs\r\n df = DataFrame({\"a\": date_range(\"2014-01-01\", periods=10, tz=\"UTC\")})\r\n result = df.iloc[5]\r\n expected = Series(\r\n [Timestamp(\"2014-01-06 00:00:00+0000\", tz=\"UTC\")], index=[\"a\"], name=5\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = df.loc[5]\r\n tm.assert_series_equal(result, expected)\r\n\r\n # indexing - boolean\r\n result = df[df.a > df.a[3]]\r\n expected = df.iloc[4:]\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # indexing - setting an element\r\n df = DataFrame(\r\n data=pd.to_datetime([\"2015-03-30 20:12:32\", \"2015-03-12 00:11:11\"]),\r\n columns=[\"time\"],\r\n )\r\n df[\"new_col\"] = [\"new\", \"old\"]\r\n df.time = df.set_index(\"time\").index.tz_localize(\"UTC\")\r\n v = df[df.new_col == \"new\"].set_index(\"time\").index.tz_convert(\"US/Pacific\")\r\n\r\n # trying to set a single element on a part of a different timezone\r\n # this converts to object\r\n df2 = df.copy()\r\n df2.loc[df2.new_col == \"new\", \"time\"] = v\r\n\r\n expected = Series([v[0], df.loc[1, \"time\"]], name=\"time\")\r\n tm.assert_series_equal(df2.time, expected)\r\n\r\n v = df.loc[df.new_col == \"new\", \"time\"] + pd.Timedelta(\"1s\")\r\n df.loc[df.new_col == \"new\", \"time\"] = v\r\n tm.assert_series_equal(df.loc[df.new_col == \"new\", \"time\"], v)\r\n\r\n def test_consistency_with_tz_aware_scalar(self):\r\n # xef gh-12938\r\n # various ways of indexing the same tz-aware scalar\r\n df = Series([Timestamp(\"2016-03-30 14:35:25\", tz=\"Europe/Brussels\")]).to_frame()\r\n\r\n df = pd.concat([df, df]).reset_index(drop=True)\r\n expected = Timestamp(\"2016-03-30 14:35:25+0200\", tz=\"Europe/Brussels\")\r\n\r\n result = df[0][0]\r\n assert result == expected\r\n\r\n result = df.iloc[0, 0]\r\n assert result == expected\r\n\r\n result = df.loc[0, 0]\r\n assert result == expected\r\n\r\n result = df.iat[0, 0]\r\n assert result == expected\r\n\r\n result = df.at[0, 0]\r\n assert result == expected\r\n\r\n result = df[0].loc[0]\r\n assert result == expected\r\n\r\n result = df[0].at[0]\r\n assert result == expected\r\n\r\n def test_indexing_with_datetimeindex_tz(self):\r\n\r\n # GH 12050\r\n # indexing on a series with a datetimeindex with tz\r\n index = date_range(\"2015-01-01\", periods=2, tz=\"utc\")\r\n\r\n ser = Series(range(2), index=index, dtype=\"int64\")\r\n\r\n # list-like indexing\r\n\r\n for sel in (index, list(index)):\r\n # getitem\r\n result = ser[sel]\r\n expected = ser.copy()\r\n if sel is not index:\r\n expected.index = expected.index._with_freq(None)\r\n tm.assert_series_equal(result, expected)\r\n\r\n # setitem\r\n result = ser.copy()\r\n result[sel] = 1\r\n expected = Series(1, index=index)\r\n tm.assert_series_equal(result, expected)\r\n\r\n # .loc getitem\r\n result = ser.loc[sel]\r\n expected = ser.copy()\r\n if sel is not index:\r\n expected.index = expected.index._with_freq(None)\r\n tm.assert_series_equal(result, expected)\r\n\r\n # .loc setitem\r\n result = ser.copy()\r\n result.loc[sel] = 1\r\n expected = Series(1, index=index)\r\n tm.assert_series_equal(result, expected)\r\n\r\n # single element indexing\r\n\r\n # getitem\r\n assert ser[index[1]] == 1\r\n\r\n # setitem\r\n result = ser.copy()\r\n result[index[1]] = 5\r\n expected = Series([0, 5], index=index)\r\n tm.assert_series_equal(result, expected)\r\n\r\n # .loc getitem\r\n assert ser.loc[index[1]] == 1\r\n\r\n # .loc setitem\r\n result = ser.copy()\r\n result.loc[index[1]] = 5\r\n expected = Series([0, 5], index=index)\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_partial_setting_with_datetimelike_dtype(self):\r\n\r\n # GH9478\r\n # a datetimeindex alignment issue with partial setting\r\n df = DataFrame(\r\n np.arange(6.0).reshape(3, 2),\r\n columns=list(\"AB\"),\r\n index=date_range(\"1/1/2000\", periods=3, freq=\"1H\"),\r\n )\r\n expected = df.copy()\r\n expected[\"C\"] = [expected.index[0]] + [pd.NaT, pd.NaT]\r\n\r\n mask = df.A < 1\r\n df.loc[mask, \"C\"] = df.loc[mask].index\r\n tm.assert_frame_equal(df, expected)\r\n\r\n def test_loc_setitem_datetime(self):\r\n\r\n # GH 9516\r\n dt1 = Timestamp(\"20130101 09:00:00\")\r\n dt2 = Timestamp(\"20130101 10:00:00\")\r\n\r\n for conv in [\r\n lambda x: x,\r\n lambda x: x.to_datetime64(),\r\n lambda x: x.to_pydatetime(),\r\n lambda x: np.datetime64(x),\r\n ]:\r\n\r\n df = DataFrame()\r\n df.loc[conv(dt1), \"one\"] = 100\r\n df.loc[conv(dt2), \"one\"] = 200\r\n\r\n expected = DataFrame({\"one\": [100.0, 200.0]}, index=[dt1, dt2])\r\n tm.assert_frame_equal(df, expected)\r\n\r\n def test_series_partial_set_datetime(self):\r\n # GH 11497\r\n\r\n idx = date_range(\"2011-01-01\", \"2011-01-02\", freq=\"D\", name=\"idx\")\r\n ser = Series([0.1, 0.2], index=idx, name=\"s\")\r\n\r\n result = ser.loc[[Timestamp(\"2011-01-01\"), Timestamp(\"2011-01-02\")]]\r\n exp = Series([0.1, 0.2], index=idx, name=\"s\")\r\n exp.index = exp.index._with_freq(None)\r\n tm.assert_series_equal(result, exp, check_index_type=True)\r\n\r\n keys = [\r\n Timestamp(\"2011-01-02\"),\r\n Timestamp(\"2011-01-02\"),\r\n Timestamp(\"2011-01-01\"),\r\n ]\r\n exp = Series(\r\n [0.2, 0.2, 0.1], index=pd.DatetimeIndex(keys, name=\"idx\"), name=\"s\"\r\n )\r\n tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)\r\n\r\n keys = [\r\n Timestamp(\"2011-01-03\"),\r\n Timestamp(\"2011-01-02\"),\r\n Timestamp(\"2011-01-03\"),\r\n ]\r\n with pytest.raises(KeyError, match=\"with any missing labels\"):\r\n ser.loc[keys]\r\n\r\n def test_series_partial_set_period(self):\r\n # GH 11497\r\n\r\n idx = pd.period_range(\"2011-01-01\", \"2011-01-02\", freq=\"D\", name=\"idx\")\r\n ser = Series([0.1, 0.2], index=idx, name=\"s\")\r\n\r\n result = ser.loc[\r\n [pd.Period(\"2011-01-01\", freq=\"D\"), pd.Period(\"2011-01-02\", freq=\"D\")]\r\n ]\r\n exp = Series([0.1, 0.2], index=idx, name=\"s\")\r\n tm.assert_series_equal(result, exp, check_index_type=True)\r\n\r\n keys = [\r\n pd.Period(\"2011-01-02\", freq=\"D\"),\r\n pd.Period(\"2011-01-02\", freq=\"D\"),\r\n pd.Period(\"2011-01-01\", freq=\"D\"),\r\n ]\r\n exp = Series([0.2, 0.2, 0.1], index=pd.PeriodIndex(keys, name=\"idx\"), name=\"s\")\r\n tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)\r\n\r\n keys = [\r\n pd.Period(\"2011-01-03\", freq=\"D\"),\r\n pd.Period(\"2011-01-02\", freq=\"D\"),\r\n pd.Period(\"2011-01-03\", freq=\"D\"),\r\n ]\r\n with pytest.raises(KeyError, match=\"with any missing labels\"):\r\n ser.loc[keys]\r\n\r\n def test_nanosecond_getitem_setitem_with_tz(self):\r\n # GH 11679\r\n data = [\"2016-06-28 08:30:00.123456789\"]\r\n index = pd.DatetimeIndex(data, dtype=\"datetime64[ns, America/Chicago]\")\r\n df = DataFrame({\"a\": [10]}, index=index)\r\n result = df.loc[df.index[0]]\r\n expected = Series(10, index=[\"a\"], name=df.index[0])\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = df.copy()\r\n result.loc[df.index[0], \"a\"] = -1\r\n expected = DataFrame(-1, index=index, columns=[\"a\"])\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_loc_getitem_across_dst(self):\r\n # GH 21846\r\n idx = pd.date_range(\r\n \"2017-10-29 01:30:00\", tz=\"Europe/Berlin\", periods=5, freq=\"30 min\"\r\n )\r\n series2 = pd.Series([0, 1, 2, 3, 4], index=idx)\r\n\r\n t_1 = pd.Timestamp(\r\n \"2017-10-29 02:30:00+02:00\", tz=\"Europe/Berlin\", freq=\"30min\"\r\n )\r\n t_2 = pd.Timestamp(\r\n \"2017-10-29 02:00:00+01:00\", tz=\"Europe/Berlin\", freq=\"30min\"\r\n )\r\n result = series2.loc[t_1:t_2]\r\n expected = pd.Series([2, 3], index=idx[2:4])\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = series2[t_1]\r\n expected = 2\r\n assert result == expected\r\n\r\n def test_loc_incremental_setitem_with_dst(self):\r\n # GH 20724\r\n base = datetime(2015, 11, 1, tzinfo=tz.gettz(\"US/Pacific\"))\r\n idxs = [base + timedelta(seconds=i * 900) for i in range(16)]\r\n result = pd.Series([0], index=[idxs[0]])\r\n for ts in idxs:\r\n result.loc[ts] = 1\r\n expected = pd.Series(1, index=idxs)\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_loc_setitem_with_existing_dst(self):\r\n # GH 18308\r\n start = pd.Timestamp(\"2017-10-29 00:00:00+0200\", tz=\"Europe/Madrid\")\r\n end = pd.Timestamp(\"2017-10-29 03:00:00+0100\", tz=\"Europe/Madrid\")\r\n ts = pd.Timestamp(\"2016-10-10 03:00:00\", tz=\"Europe/Madrid\")\r\n idx = pd.date_range(start, end, closed=\"left\", freq=\"H\")\r\n result = pd.DataFrame(index=idx, columns=[\"value\"])\r\n result.loc[ts, \"value\"] = 12\r\n expected = pd.DataFrame(\r\n [np.nan] * len(idx) + [12],\r\n index=idx.append(pd.DatetimeIndex([ts])),\r\n columns=[\"value\"],\r\n dtype=object,\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_loc_str_slicing(self):\r\n ix = pd.period_range(start=\"2017-01-01\", end=\"2018-01-01\", freq=\"M\")\r\n ser = ix.to_series()\r\n result = ser.loc[:\"2017-12\"]\r\n expected = ser.iloc[:-1]\r\n\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_loc_label_slicing(self):\r\n ix = pd.period_range(start=\"2017-01-01\", end=\"2018-01-01\", freq=\"M\")\r\n ser = ix.to_series()\r\n result = ser.loc[: ix[-2]]\r\n expected = ser.iloc[:-1]\r\n\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"slice_, positions\",\r\n [\r\n [slice(date(2018, 1, 1), None), [0, 1, 2]],\r\n [slice(date(2019, 1, 2), None), [2]],\r\n [slice(date(2020, 1, 1), None), []],\r\n [slice(None, date(2020, 1, 1)), [0, 1, 2]],\r\n [slice(None, date(2019, 1, 1)), [0]],\r\n ],\r\n )\r\n def test_getitem_slice_date(self, slice_, positions):\r\n # https://github.com/pandas-dev/pandas/issues/31501\r\n s = pd.Series(\r\n [0, 1, 2],\r\n pd.DatetimeIndex([\"2019-01-01\", \"2019-01-01T06:00:00\", \"2019-01-02\"]),\r\n )\r\n result = s[slice_]\r\n expected = s.take(positions)\r\n tm.assert_series_equal(result, expected)\r\n", "from datetime import datetime\r\nimport sys\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nfrom pandas.compat import PYPY\r\n\r\nimport pandas as pd\r\nfrom pandas import DataFrame, Index, Series\r\nimport pandas._testing as tm\r\nfrom pandas.core.accessor import PandasDelegate\r\nfrom pandas.core.base import NoNewAttributesMixin, PandasObject\r\n\r\n\r\[email protected](\r\n params=[\r\n Series,\r\n lambda x, **kwargs: DataFrame({\"a\": x}, **kwargs)[\"a\"],\r\n lambda x, **kwargs: DataFrame(x, **kwargs)[0],\r\n Index,\r\n ],\r\n ids=[\"Series\", \"DataFrame-dict\", \"DataFrame-array\", \"Index\"],\r\n)\r\ndef constructor(request):\r\n return request.param\r\n\r\n\r\nclass TestPandasDelegate:\r\n class Delegator:\r\n _properties = [\"foo\"]\r\n _methods = [\"bar\"]\r\n\r\n def _set_foo(self, value):\r\n self.foo = value\r\n\r\n def _get_foo(self):\r\n return self.foo\r\n\r\n foo = property(_get_foo, _set_foo, doc=\"foo property\")\r\n\r\n def bar(self, *args, **kwargs):\r\n \"\"\" a test bar method \"\"\"\r\n pass\r\n\r\n class Delegate(PandasDelegate, PandasObject):\r\n def __init__(self, obj):\r\n self.obj = obj\r\n\r\n def setup_method(self, method):\r\n pass\r\n\r\n def test_invalid_delegation(self):\r\n # these show that in order for the delegation to work\r\n # the _delegate_* methods need to be overridden to not raise\r\n # a TypeError\r\n\r\n self.Delegate._add_delegate_accessors(\r\n delegate=self.Delegator,\r\n accessors=self.Delegator._properties,\r\n typ=\"property\",\r\n )\r\n self.Delegate._add_delegate_accessors(\r\n delegate=self.Delegator, accessors=self.Delegator._methods, typ=\"method\"\r\n )\r\n\r\n delegate = self.Delegate(self.Delegator())\r\n\r\n msg = \"You cannot access the property foo\"\r\n with pytest.raises(TypeError, match=msg):\r\n delegate.foo\r\n\r\n msg = \"The property foo cannot be set\"\r\n with pytest.raises(TypeError, match=msg):\r\n delegate.foo = 5\r\n\r\n msg = \"You cannot access the property foo\"\r\n with pytest.raises(TypeError, match=msg):\r\n delegate.foo()\r\n\r\n @pytest.mark.skipif(PYPY, reason=\"not relevant for PyPy\")\r\n def test_memory_usage(self):\r\n # Delegate does not implement memory_usage.\r\n # Check that we fall back to in-built `__sizeof__`\r\n # GH 12924\r\n delegate = self.Delegate(self.Delegator())\r\n sys.getsizeof(delegate)\r\n\r\n\r\nclass TestNoNewAttributesMixin:\r\n def test_mixin(self):\r\n class T(NoNewAttributesMixin):\r\n pass\r\n\r\n t = T()\r\n assert not hasattr(t, \"__frozen\")\r\n\r\n t.a = \"test\"\r\n assert t.a == \"test\"\r\n\r\n t._freeze()\r\n assert \"__frozen\" in dir(t)\r\n assert getattr(t, \"__frozen\")\r\n msg = \"You cannot add any new attribute\"\r\n with pytest.raises(AttributeError, match=msg):\r\n t.b = \"test\"\r\n\r\n assert not hasattr(t, \"b\")\r\n\r\n\r\nclass TestConstruction:\r\n # test certain constructor behaviours on dtype inference across Series,\r\n # Index and DataFrame\r\n\r\n @pytest.mark.parametrize(\r\n \"klass\",\r\n [\r\n Series,\r\n lambda x, **kwargs: DataFrame({\"a\": x}, **kwargs)[\"a\"],\r\n pytest.param(\r\n lambda x, **kwargs: DataFrame(x, **kwargs)[0], marks=pytest.mark.xfail\r\n ),\r\n Index,\r\n ],\r\n )\r\n @pytest.mark.parametrize(\r\n \"a\",\r\n [\r\n np.array([\"2263-01-01\"], dtype=\"datetime64[D]\"),\r\n np.array([datetime(2263, 1, 1)], dtype=object),\r\n np.array([np.datetime64(\"2263-01-01\", \"D\")], dtype=object),\r\n np.array([\"2263-01-01\"], dtype=object),\r\n ],\r\n ids=[\r\n \"datetime64[D]\",\r\n \"object-datetime.datetime\",\r\n \"object-numpy-scalar\",\r\n \"object-string\",\r\n ],\r\n )\r\n def test_constructor_datetime_outofbound(self, a, klass):\r\n # GH-26853 (+ bug GH-26206 out of bound non-ns unit)\r\n\r\n # No dtype specified (dtype inference)\r\n # datetime64[non-ns] raise error, other cases result in object dtype\r\n # and preserve original data\r\n if a.dtype.kind == \"M\":\r\n msg = \"Out of bounds\"\r\n with pytest.raises(pd.errors.OutOfBoundsDatetime, match=msg):\r\n klass(a)\r\n else:\r\n result = klass(a)\r\n assert result.dtype == \"object\"\r\n tm.assert_numpy_array_equal(result.to_numpy(), a)\r\n\r\n # Explicit dtype specified\r\n # Forced conversion fails for all -> all cases raise error\r\n msg = \"Out of bounds\"\r\n with pytest.raises(pd.errors.OutOfBoundsDatetime, match=msg):\r\n klass(a, dtype=\"datetime64[ns]\")\r\n\r\n def test_constructor_datetime_nonns(self, constructor):\r\n arr = np.array([\"2020-01-01T00:00:00.000000\"], dtype=\"datetime64[us]\")\r\n expected = constructor(pd.to_datetime([\"2020-01-01\"]))\r\n result = constructor(arr)\r\n tm.assert_equal(result, expected)\r\n\r\n # https://github.com/pandas-dev/pandas/issues/34843\r\n arr.flags.writeable = False\r\n result = constructor(arr)\r\n tm.assert_equal(result, expected)\r\n", "import numpy as np\r\nimport pytest\r\n\r\nfrom pandas.core.dtypes.common import is_datetime64_dtype, is_timedelta64_dtype\r\nfrom pandas.core.dtypes.dtypes import DatetimeTZDtype\r\n\r\nimport pandas as pd\r\nfrom pandas import CategoricalIndex, Series, Timedelta, Timestamp\r\nimport pandas._testing as tm\r\nfrom pandas.core.arrays import (\r\n DatetimeArray,\r\n IntervalArray,\r\n PandasArray,\r\n PeriodArray,\r\n SparseArray,\r\n TimedeltaArray,\r\n)\r\n\r\n\r\nclass TestToIterable:\r\n # test that we convert an iterable to python types\r\n\r\n dtypes = [\r\n (\"int8\", int),\r\n (\"int16\", int),\r\n (\"int32\", int),\r\n (\"int64\", int),\r\n (\"uint8\", int),\r\n (\"uint16\", int),\r\n (\"uint32\", int),\r\n (\"uint64\", int),\r\n (\"float16\", float),\r\n (\"float32\", float),\r\n (\"float64\", float),\r\n (\"datetime64[ns]\", Timestamp),\r\n (\"datetime64[ns, US/Eastern]\", Timestamp),\r\n (\"timedelta64[ns]\", Timedelta),\r\n ]\r\n\r\n @pytest.mark.parametrize(\"dtype, rdtype\", dtypes)\r\n @pytest.mark.parametrize(\r\n \"method\",\r\n [\r\n lambda x: x.tolist(),\r\n lambda x: x.to_list(),\r\n lambda x: list(x),\r\n lambda x: list(x.__iter__()),\r\n ],\r\n ids=[\"tolist\", \"to_list\", \"list\", \"iter\"],\r\n )\r\n def test_iterable(self, index_or_series, method, dtype, rdtype):\r\n # gh-10904\r\n # gh-13258\r\n # coerce iteration to underlying python / pandas types\r\n typ = index_or_series\r\n s = typ([1], dtype=dtype)\r\n result = method(s)[0]\r\n assert isinstance(result, rdtype)\r\n\r\n @pytest.mark.parametrize(\r\n \"dtype, rdtype, obj\",\r\n [\r\n (\"object\", object, \"a\"),\r\n (\"object\", int, 1),\r\n (\"category\", object, \"a\"),\r\n (\"category\", int, 1),\r\n ],\r\n )\r\n @pytest.mark.parametrize(\r\n \"method\",\r\n [\r\n lambda x: x.tolist(),\r\n lambda x: x.to_list(),\r\n lambda x: list(x),\r\n lambda x: list(x.__iter__()),\r\n ],\r\n ids=[\"tolist\", \"to_list\", \"list\", \"iter\"],\r\n )\r\n def test_iterable_object_and_category(\r\n self, index_or_series, method, dtype, rdtype, obj\r\n ):\r\n # gh-10904\r\n # gh-13258\r\n # coerce iteration to underlying python / pandas types\r\n typ = index_or_series\r\n s = typ([obj], dtype=dtype)\r\n result = method(s)[0]\r\n assert isinstance(result, rdtype)\r\n\r\n @pytest.mark.parametrize(\"dtype, rdtype\", dtypes)\r\n def test_iterable_items(self, dtype, rdtype):\r\n # gh-13258\r\n # test if items yields the correct boxed scalars\r\n # this only applies to series\r\n s = Series([1], dtype=dtype)\r\n _, result = list(s.items())[0]\r\n assert isinstance(result, rdtype)\r\n\r\n _, result = list(s.items())[0]\r\n assert isinstance(result, rdtype)\r\n\r\n @pytest.mark.parametrize(\r\n \"dtype, rdtype\", dtypes + [(\"object\", int), (\"category\", int)]\r\n )\r\n def test_iterable_map(self, index_or_series, dtype, rdtype):\r\n # gh-13236\r\n # coerce iteration to underlying python / pandas types\r\n typ = index_or_series\r\n s = typ([1], dtype=dtype)\r\n result = s.map(type)[0]\r\n if not isinstance(rdtype, tuple):\r\n rdtype = tuple([rdtype])\r\n assert result in rdtype\r\n\r\n @pytest.mark.parametrize(\r\n \"method\",\r\n [\r\n lambda x: x.tolist(),\r\n lambda x: x.to_list(),\r\n lambda x: list(x),\r\n lambda x: list(x.__iter__()),\r\n ],\r\n ids=[\"tolist\", \"to_list\", \"list\", \"iter\"],\r\n )\r\n def test_categorial_datetimelike(self, method):\r\n i = CategoricalIndex([Timestamp(\"1999-12-31\"), Timestamp(\"2000-12-31\")])\r\n\r\n result = method(i)[0]\r\n assert isinstance(result, Timestamp)\r\n\r\n def test_iter_box(self):\r\n vals = [Timestamp(\"2011-01-01\"), Timestamp(\"2011-01-02\")]\r\n s = Series(vals)\r\n assert s.dtype == \"datetime64[ns]\"\r\n for res, exp in zip(s, vals):\r\n assert isinstance(res, Timestamp)\r\n assert res.tz is None\r\n assert res == exp\r\n\r\n vals = [\r\n Timestamp(\"2011-01-01\", tz=\"US/Eastern\"),\r\n Timestamp(\"2011-01-02\", tz=\"US/Eastern\"),\r\n ]\r\n s = Series(vals)\r\n\r\n assert s.dtype == \"datetime64[ns, US/Eastern]\"\r\n for res, exp in zip(s, vals):\r\n assert isinstance(res, Timestamp)\r\n assert res.tz == exp.tz\r\n assert res == exp\r\n\r\n # timedelta\r\n vals = [Timedelta(\"1 days\"), Timedelta(\"2 days\")]\r\n s = Series(vals)\r\n assert s.dtype == \"timedelta64[ns]\"\r\n for res, exp in zip(s, vals):\r\n assert isinstance(res, Timedelta)\r\n assert res == exp\r\n\r\n # period\r\n vals = [pd.Period(\"2011-01-01\", freq=\"M\"), pd.Period(\"2011-01-02\", freq=\"M\")]\r\n s = Series(vals)\r\n assert s.dtype == \"Period[M]\"\r\n for res, exp in zip(s, vals):\r\n assert isinstance(res, pd.Period)\r\n assert res.freq == \"M\"\r\n assert res == exp\r\n\r\n\r\[email protected](\r\n \"array, expected_type, dtype\",\r\n [\r\n (np.array([0, 1], dtype=np.int64), np.ndarray, \"int64\"),\r\n (np.array([\"a\", \"b\"]), np.ndarray, \"object\"),\r\n (pd.Categorical([\"a\", \"b\"]), pd.Categorical, \"category\"),\r\n (\r\n pd.DatetimeIndex([\"2017\", \"2018\"], tz=\"US/Central\"),\r\n DatetimeArray,\r\n \"datetime64[ns, US/Central]\",\r\n ),\r\n (\r\n pd.PeriodIndex([2018, 2019], freq=\"A\"),\r\n PeriodArray,\r\n pd.core.dtypes.dtypes.PeriodDtype(\"A-DEC\"),\r\n ),\r\n (pd.IntervalIndex.from_breaks([0, 1, 2]), IntervalArray, \"interval\",),\r\n # This test is currently failing for datetime64[ns] and timedelta64[ns].\r\n # The NumPy type system is sufficient for representing these types, so\r\n # we just use NumPy for Series / DataFrame columns of these types (so\r\n # we get consolidation and so on).\r\n # However, DatetimeIndex and TimedeltaIndex use the DateLikeArray\r\n # abstraction to for code reuse.\r\n # At the moment, we've judged that allowing this test to fail is more\r\n # practical that overriding Series._values to special case\r\n # Series[M8[ns]] and Series[m8[ns]] to return a DateLikeArray.\r\n pytest.param(\r\n pd.DatetimeIndex([\"2017\", \"2018\"]),\r\n np.ndarray,\r\n \"datetime64[ns]\",\r\n marks=[pytest.mark.xfail(reason=\"datetime _values\", strict=True)],\r\n ),\r\n pytest.param(\r\n pd.TimedeltaIndex([10 ** 10]),\r\n np.ndarray,\r\n \"m8[ns]\",\r\n marks=[pytest.mark.xfail(reason=\"timedelta _values\", strict=True)],\r\n ),\r\n ],\r\n)\r\ndef test_values_consistent(array, expected_type, dtype):\r\n l_values = pd.Series(array)._values\r\n r_values = pd.Index(array)._values\r\n assert type(l_values) is expected_type\r\n assert type(l_values) is type(r_values)\r\n\r\n tm.assert_equal(l_values, r_values)\r\n\r\n\r\[email protected](\"arr\", [np.array([1, 2, 3])])\r\ndef test_numpy_array(arr):\r\n ser = pd.Series(arr)\r\n result = ser.array\r\n expected = PandasArray(arr)\r\n tm.assert_extension_array_equal(result, expected)\r\n\r\n\r\ndef test_numpy_array_all_dtypes(any_numpy_dtype):\r\n ser = pd.Series(dtype=any_numpy_dtype)\r\n result = ser.array\r\n if is_datetime64_dtype(any_numpy_dtype):\r\n assert isinstance(result, DatetimeArray)\r\n elif is_timedelta64_dtype(any_numpy_dtype):\r\n assert isinstance(result, TimedeltaArray)\r\n else:\r\n assert isinstance(result, PandasArray)\r\n\r\n\r\[email protected](\r\n \"array, attr\",\r\n [\r\n (pd.Categorical([\"a\", \"b\"]), \"_codes\"),\r\n (pd.core.arrays.period_array([\"2000\", \"2001\"], freq=\"D\"), \"_data\"),\r\n (pd.core.arrays.integer_array([0, np.nan]), \"_data\"),\r\n (IntervalArray.from_breaks([0, 1]), \"_left\"),\r\n (SparseArray([0, 1]), \"_sparse_values\"),\r\n (DatetimeArray(np.array([1, 2], dtype=\"datetime64[ns]\")), \"_data\"),\r\n # tz-aware Datetime\r\n (\r\n DatetimeArray(\r\n np.array(\r\n [\"2000-01-01T12:00:00\", \"2000-01-02T12:00:00\"], dtype=\"M8[ns]\"\r\n ),\r\n dtype=DatetimeTZDtype(tz=\"US/Central\"),\r\n ),\r\n \"_data\",\r\n ),\r\n ],\r\n)\r\ndef test_array(array, attr, index_or_series):\r\n box = index_or_series\r\n if array.dtype.name in (\"Int64\", \"Sparse[int64, 0]\") and box is pd.Index:\r\n pytest.skip(f\"No index type for {array.dtype}\")\r\n result = box(array, copy=False).array\r\n\r\n if attr:\r\n array = getattr(array, attr)\r\n result = getattr(result, attr)\r\n\r\n assert result is array\r\n\r\n\r\ndef test_array_multiindex_raises():\r\n idx = pd.MultiIndex.from_product([[\"A\"], [\"a\", \"b\"]])\r\n msg = \"MultiIndex has no single backing array\"\r\n with pytest.raises(ValueError, match=msg):\r\n idx.array\r\n\r\n\r\[email protected](\r\n \"array, expected\",\r\n [\r\n (np.array([1, 2], dtype=np.int64), np.array([1, 2], dtype=np.int64)),\r\n (pd.Categorical([\"a\", \"b\"]), np.array([\"a\", \"b\"], dtype=object)),\r\n (\r\n pd.core.arrays.period_array([\"2000\", \"2001\"], freq=\"D\"),\r\n np.array([pd.Period(\"2000\", freq=\"D\"), pd.Period(\"2001\", freq=\"D\")]),\r\n ),\r\n (\r\n pd.core.arrays.integer_array([0, np.nan]),\r\n np.array([0, pd.NA], dtype=object),\r\n ),\r\n (\r\n IntervalArray.from_breaks([0, 1, 2]),\r\n np.array([pd.Interval(0, 1), pd.Interval(1, 2)], dtype=object),\r\n ),\r\n (SparseArray([0, 1]), np.array([0, 1], dtype=np.int64)),\r\n # tz-naive datetime\r\n (\r\n DatetimeArray(np.array([\"2000\", \"2001\"], dtype=\"M8[ns]\")),\r\n np.array([\"2000\", \"2001\"], dtype=\"M8[ns]\"),\r\n ),\r\n # tz-aware stays tz`-aware\r\n (\r\n DatetimeArray(\r\n np.array(\r\n [\"2000-01-01T06:00:00\", \"2000-01-02T06:00:00\"], dtype=\"M8[ns]\"\r\n ),\r\n dtype=DatetimeTZDtype(tz=\"US/Central\"),\r\n ),\r\n np.array(\r\n [\r\n pd.Timestamp(\"2000-01-01\", tz=\"US/Central\"),\r\n pd.Timestamp(\"2000-01-02\", tz=\"US/Central\"),\r\n ]\r\n ),\r\n ),\r\n # Timedelta\r\n (\r\n TimedeltaArray(np.array([0, 3600000000000], dtype=\"i8\"), freq=\"H\"),\r\n np.array([0, 3600000000000], dtype=\"m8[ns]\"),\r\n ),\r\n ],\r\n)\r\ndef test_to_numpy(array, expected, index_or_series):\r\n box = index_or_series\r\n thing = box(array)\r\n\r\n if array.dtype.name in (\"Int64\", \"Sparse[int64, 0]\") and box is pd.Index:\r\n pytest.skip(f\"No index type for {array.dtype}\")\r\n\r\n result = thing.to_numpy()\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n\r\[email protected](\"as_series\", [True, False])\r\[email protected](\r\n \"arr\", [np.array([1, 2, 3], dtype=\"int64\"), np.array([\"a\", \"b\", \"c\"], dtype=object)]\r\n)\r\ndef test_to_numpy_copy(arr, as_series):\r\n obj = pd.Index(arr, copy=False)\r\n if as_series:\r\n obj = pd.Series(obj.values, copy=False)\r\n\r\n # no copy by default\r\n result = obj.to_numpy()\r\n assert np.shares_memory(arr, result) is True\r\n\r\n result = obj.to_numpy(copy=False)\r\n assert np.shares_memory(arr, result) is True\r\n\r\n # copy=True\r\n result = obj.to_numpy(copy=True)\r\n assert np.shares_memory(arr, result) is False\r\n\r\n\r\[email protected](\"as_series\", [True, False])\r\ndef test_to_numpy_dtype(as_series):\r\n tz = \"US/Eastern\"\r\n obj = pd.DatetimeIndex([\"2000\", \"2001\"], tz=tz)\r\n if as_series:\r\n obj = pd.Series(obj)\r\n\r\n # preserve tz by default\r\n result = obj.to_numpy()\r\n expected = np.array(\r\n [pd.Timestamp(\"2000\", tz=tz), pd.Timestamp(\"2001\", tz=tz)], dtype=object\r\n )\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n result = obj.to_numpy(dtype=\"object\")\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n result = obj.to_numpy(dtype=\"M8[ns]\")\r\n expected = np.array([\"2000-01-01T05\", \"2001-01-01T05\"], dtype=\"M8[ns]\")\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n\r\[email protected](\r\n \"values, dtype, na_value, expected\",\r\n [\r\n ([1, 2, None], \"float64\", 0, [1.0, 2.0, 0.0]),\r\n (\r\n [pd.Timestamp(\"2000\"), pd.Timestamp(\"2000\"), pd.NaT],\r\n None,\r\n pd.Timestamp(\"2000\"),\r\n [np.datetime64(\"2000-01-01T00:00:00.000000000\")] * 3,\r\n ),\r\n ],\r\n)\r\ndef test_to_numpy_na_value_numpy_dtype(\r\n index_or_series, values, dtype, na_value, expected\r\n):\r\n obj = index_or_series(values)\r\n result = obj.to_numpy(dtype=dtype, na_value=na_value)\r\n expected = np.array(expected)\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n\r\ndef test_to_numpy_kwargs_raises():\r\n # numpy\r\n s = pd.Series([1, 2, 3])\r\n msg = r\"to_numpy\\(\\) got an unexpected keyword argument 'foo'\"\r\n with pytest.raises(TypeError, match=msg):\r\n s.to_numpy(foo=True)\r\n\r\n # extension\r\n s = pd.Series([1, 2, 3], dtype=\"Int64\")\r\n with pytest.raises(TypeError, match=msg):\r\n s.to_numpy(foo=True)\r\n\r\n\r\[email protected](\r\n \"data\",\r\n [\r\n {\"a\": [1, 2, 3], \"b\": [1, 2, None]},\r\n {\"a\": np.array([1, 2, 3]), \"b\": np.array([1, 2, np.nan])},\r\n {\"a\": pd.array([1, 2, 3]), \"b\": pd.array([1, 2, None])},\r\n ],\r\n)\r\[email protected](\"dtype, na_value\", [(float, np.nan), (object, None)])\r\ndef test_to_numpy_dataframe_na_value(data, dtype, na_value):\r\n # https://github.com/pandas-dev/pandas/issues/33820\r\n df = pd.DataFrame(data)\r\n result = df.to_numpy(dtype=dtype, na_value=na_value)\r\n expected = np.array([[1, 1], [2, 2], [3, na_value]], dtype=dtype)\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n\r\[email protected](\r\n \"data, expected\",\r\n [\r\n (\r\n {\"a\": pd.array([1, 2, None])},\r\n np.array([[1.0], [2.0], [np.nan]], dtype=float),\r\n ),\r\n (\r\n {\"a\": [1, 2, 3], \"b\": [1, 2, 3]},\r\n np.array([[1, 1], [2, 2], [3, 3]], dtype=float),\r\n ),\r\n ],\r\n)\r\ndef test_to_numpy_dataframe_single_block(data, expected):\r\n # https://github.com/pandas-dev/pandas/issues/33820\r\n df = pd.DataFrame(data)\r\n result = df.to_numpy(dtype=float, na_value=np.nan)\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n\r\ndef test_to_numpy_dataframe_single_block_no_mutate():\r\n # https://github.com/pandas-dev/pandas/issues/33820\r\n result = pd.DataFrame(np.array([1.0, 2.0, np.nan]))\r\n expected = pd.DataFrame(np.array([1.0, 2.0, np.nan]))\r\n result.to_numpy(na_value=0.0)\r\n tm.assert_frame_equal(result, expected)\r\n", "from sklearn import get_config, set_config, config_context\r\nfrom sklearn.utils._testing import assert_raises\r\n\r\n\r\ndef test_config_context():\r\n assert get_config() == {'assume_finite': False, 'working_memory': 1024,\r\n 'print_changed_only': True,\r\n 'display': 'text'}\r\n\r\n # Not using as a context manager affects nothing\r\n config_context(assume_finite=True)\r\n assert get_config()['assume_finite'] is False\r\n\r\n with config_context(assume_finite=True):\r\n assert get_config() == {'assume_finite': True, 'working_memory': 1024,\r\n 'print_changed_only': True,\r\n 'display': 'text'}\r\n assert get_config()['assume_finite'] is False\r\n\r\n with config_context(assume_finite=True):\r\n with config_context(assume_finite=None):\r\n assert get_config()['assume_finite'] is True\r\n\r\n assert get_config()['assume_finite'] is True\r\n\r\n with config_context(assume_finite=False):\r\n assert get_config()['assume_finite'] is False\r\n\r\n with config_context(assume_finite=None):\r\n assert get_config()['assume_finite'] is False\r\n\r\n # global setting will not be retained outside of context that\r\n # did not modify this setting\r\n set_config(assume_finite=True)\r\n assert get_config()['assume_finite'] is True\r\n\r\n assert get_config()['assume_finite'] is False\r\n\r\n assert get_config()['assume_finite'] is True\r\n\r\n assert get_config() == {'assume_finite': False, 'working_memory': 1024,\r\n 'print_changed_only': True,\r\n 'display': 'text'}\r\n\r\n # No positional arguments\r\n assert_raises(TypeError, config_context, True)\r\n # No unknown arguments\r\n assert_raises(TypeError, config_context(do_something_else=True).__enter__)\r\n\r\n\r\ndef test_config_context_exception():\r\n assert get_config()['assume_finite'] is False\r\n try:\r\n with config_context(assume_finite=True):\r\n assert get_config()['assume_finite'] is True\r\n raise ValueError()\r\n except ValueError:\r\n pass\r\n assert get_config()['assume_finite'] is False\r\n\r\n\r\ndef test_set_config():\r\n assert get_config()['assume_finite'] is False\r\n set_config(assume_finite=None)\r\n assert get_config()['assume_finite'] is False\r\n set_config(assume_finite=True)\r\n assert get_config()['assume_finite'] is True\r\n set_config(assume_finite=None)\r\n assert get_config()['assume_finite'] is True\r\n set_config(assume_finite=False)\r\n assert get_config()['assume_finite'] is False\r\n\r\n # No unknown arguments\r\n assert_raises(TypeError, set_config, do_something_else=True)\r\n", "import os\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nfrom pandas import DataFrame\r\nimport pandas._testing as tm\r\n\r\nfrom pandas.io.excel import ExcelWriter, _OpenpyxlWriter\r\n\r\nopenpyxl = pytest.importorskip(\"openpyxl\")\r\n\r\npytestmark = pytest.mark.parametrize(\"ext\", [\".xlsx\"])\r\n\r\n\r\ndef test_to_excel_styleconverter(ext):\r\n from openpyxl import styles\r\n\r\n hstyle = {\r\n \"font\": {\"color\": \"00FF0000\", \"bold\": True},\r\n \"borders\": {\"top\": \"thin\", \"right\": \"thin\", \"bottom\": \"thin\", \"left\": \"thin\"},\r\n \"alignment\": {\"horizontal\": \"center\", \"vertical\": \"top\"},\r\n \"fill\": {\"patternType\": \"solid\", \"fgColor\": {\"rgb\": \"006666FF\", \"tint\": 0.3}},\r\n \"number_format\": {\"format_code\": \"0.00\"},\r\n \"protection\": {\"locked\": True, \"hidden\": False},\r\n }\r\n\r\n font_color = styles.Color(\"00FF0000\")\r\n font = styles.Font(bold=True, color=font_color)\r\n side = styles.Side(style=styles.borders.BORDER_THIN)\r\n border = styles.Border(top=side, right=side, bottom=side, left=side)\r\n alignment = styles.Alignment(horizontal=\"center\", vertical=\"top\")\r\n fill_color = styles.Color(rgb=\"006666FF\", tint=0.3)\r\n fill = styles.PatternFill(patternType=\"solid\", fgColor=fill_color)\r\n\r\n number_format = \"0.00\"\r\n\r\n protection = styles.Protection(locked=True, hidden=False)\r\n\r\n kw = _OpenpyxlWriter._convert_to_style_kwargs(hstyle)\r\n assert kw[\"font\"] == font\r\n assert kw[\"border\"] == border\r\n assert kw[\"alignment\"] == alignment\r\n assert kw[\"fill\"] == fill\r\n assert kw[\"number_format\"] == number_format\r\n assert kw[\"protection\"] == protection\r\n\r\n\r\ndef test_write_cells_merge_styled(ext):\r\n from pandas.io.formats.excel import ExcelCell\r\n\r\n sheet_name = \"merge_styled\"\r\n\r\n sty_b1 = {\"font\": {\"color\": \"00FF0000\"}}\r\n sty_a2 = {\"font\": {\"color\": \"0000FF00\"}}\r\n\r\n initial_cells = [\r\n ExcelCell(col=1, row=0, val=42, style=sty_b1),\r\n ExcelCell(col=0, row=1, val=99, style=sty_a2),\r\n ]\r\n\r\n sty_merged = {\"font\": {\"color\": \"000000FF\", \"bold\": True}}\r\n sty_kwargs = _OpenpyxlWriter._convert_to_style_kwargs(sty_merged)\r\n openpyxl_sty_merged = sty_kwargs[\"font\"]\r\n merge_cells = [\r\n ExcelCell(\r\n col=0, row=0, val=\"pandas\", mergestart=1, mergeend=1, style=sty_merged\r\n )\r\n ]\r\n\r\n with tm.ensure_clean(ext) as path:\r\n writer = _OpenpyxlWriter(path)\r\n writer.write_cells(initial_cells, sheet_name=sheet_name)\r\n writer.write_cells(merge_cells, sheet_name=sheet_name)\r\n\r\n wks = writer.sheets[sheet_name]\r\n xcell_b1 = wks[\"B1\"]\r\n xcell_a2 = wks[\"A2\"]\r\n assert xcell_b1.font == openpyxl_sty_merged\r\n assert xcell_a2.font == openpyxl_sty_merged\r\n\r\n\r\[email protected](\r\n \"mode,expected\", [(\"w\", [\"baz\"]), (\"a\", [\"foo\", \"bar\", \"baz\"])]\r\n)\r\ndef test_write_append_mode(ext, mode, expected):\r\n df = DataFrame([1], columns=[\"baz\"])\r\n\r\n with tm.ensure_clean(ext) as f:\r\n wb = openpyxl.Workbook()\r\n wb.worksheets[0].title = \"foo\"\r\n wb.worksheets[0][\"A1\"].value = \"foo\"\r\n wb.create_sheet(\"bar\")\r\n wb.worksheets[1][\"A1\"].value = \"bar\"\r\n wb.save(f)\r\n\r\n writer = ExcelWriter(f, engine=\"openpyxl\", mode=mode)\r\n df.to_excel(writer, sheet_name=\"baz\", index=False)\r\n writer.save()\r\n\r\n wb2 = openpyxl.load_workbook(f)\r\n result = [sheet.title for sheet in wb2.worksheets]\r\n assert result == expected\r\n\r\n for index, cell_value in enumerate(expected):\r\n assert wb2.worksheets[index][\"A1\"].value == cell_value\r\n\r\n\r\ndef test_to_excel_with_openpyxl_engine(ext, tmpdir):\r\n # GH 29854\r\n df1 = DataFrame({\"A\": np.linspace(1, 10, 10)})\r\n df2 = DataFrame({\"B\": np.linspace(1, 20, 10)})\r\n df = pd.concat([df1, df2], axis=1)\r\n styled = df.style.applymap(\r\n lambda val: \"color: %s\" % (\"red\" if val < 0 else \"black\")\r\n ).highlight_max()\r\n\r\n filename = tmpdir / \"styled.xlsx\"\r\n styled.to_excel(filename, engine=\"openpyxl\")\r\n\r\n assert filename.exists()\r\n os.remove(filename)\r\n", "\"\"\"Sparse accessor\"\"\"\r\n\r\nimport numpy as np\r\n\r\nfrom pandas.compat._optional import import_optional_dependency\r\n\r\nfrom pandas.core.dtypes.cast import find_common_type\r\n\r\nfrom pandas.core.accessor import PandasDelegate, delegate_names\r\nfrom pandas.core.arrays.sparse.array import SparseArray\r\nfrom pandas.core.arrays.sparse.dtype import SparseDtype\r\n\r\n\r\nclass BaseAccessor:\r\n _validation_msg = \"Can only use the '.sparse' accessor with Sparse data.\"\r\n\r\n def __init__(self, data=None):\r\n self._parent = data\r\n self._validate(data)\r\n\r\n def _validate(self, data):\r\n raise NotImplementedError\r\n\r\n\r\n@delegate_names(\r\n SparseArray, [\"npoints\", \"density\", \"fill_value\", \"sp_values\"], typ=\"property\"\r\n)\r\nclass SparseAccessor(BaseAccessor, PandasDelegate):\r\n \"\"\"\r\n Accessor for SparseSparse from other sparse matrix data types.\r\n \"\"\"\r\n\r\n def _validate(self, data):\r\n if not isinstance(data.dtype, SparseDtype):\r\n raise AttributeError(self._validation_msg)\r\n\r\n def _delegate_property_get(self, name, *args, **kwargs):\r\n return getattr(self._parent.array, name)\r\n\r\n def _delegate_method(self, name, *args, **kwargs):\r\n if name == \"from_coo\":\r\n return self.from_coo(*args, **kwargs)\r\n elif name == \"to_coo\":\r\n return self.to_coo(*args, **kwargs)\r\n else:\r\n raise ValueError\r\n\r\n @classmethod\r\n def from_coo(cls, A, dense_index=False):\r\n \"\"\"\r\n Create a Series with sparse values from a scipy.sparse.coo_matrix.\r\n\r\n Parameters\r\n ----------\r\n A : scipy.sparse.coo_matrix\r\n dense_index : bool, default False\r\n If False (default), the SparseSeries index consists of only the\r\n coords of the non-null entries of the original coo_matrix.\r\n If True, the SparseSeries index consists of the full sorted\r\n (row, col) coordinates of the coo_matrix.\r\n\r\n Returns\r\n -------\r\n s : Series\r\n A Series with sparse values.\r\n\r\n Examples\r\n --------\r\n >>> from scipy import sparse\r\n\r\n >>> A = sparse.coo_matrix(\r\n ... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)\r\n ... )\r\n >>> A\r\n <3x4 sparse matrix of type '<class 'numpy.float64'>'\r\n with 3 stored elements in COOrdinate format>\r\n\r\n >>> A.todense()\r\n matrix([[0., 0., 1., 2.],\r\n [3., 0., 0., 0.],\r\n [0., 0., 0., 0.]])\r\n\r\n >>> ss = pd.Series.sparse.from_coo(A)\r\n >>> ss\r\n 0 2 1.0\r\n 3 2.0\r\n 1 0 3.0\r\n dtype: Sparse[float64, nan]\r\n \"\"\"\r\n from pandas import Series\r\n from pandas.core.arrays.sparse.scipy_sparse import _coo_to_sparse_series\r\n\r\n result = _coo_to_sparse_series(A, dense_index=dense_index)\r\n result = Series(result.array, index=result.index, copy=False)\r\n\r\n return result\r\n\r\n def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False):\r\n \"\"\"\r\n Create a scipy.sparse.coo_matrix from a Series with MultiIndex.\r\n\r\n Use row_levels and column_levels to determine the row and column\r\n coordinates respectively. row_levels and column_levels are the names\r\n (labels) or numbers of the levels. {row_levels, column_levels} must be\r\n a partition of the MultiIndex level names (or numbers).\r\n\r\n Parameters\r\n ----------\r\n row_levels : tuple/list\r\n column_levels : tuple/list\r\n sort_labels : bool, default False\r\n Sort the row and column labels before forming the sparse matrix.\r\n\r\n Returns\r\n -------\r\n y : scipy.sparse.coo_matrix\r\n rows : list (row labels)\r\n columns : list (column labels)\r\n\r\n Examples\r\n --------\r\n >>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])\r\n >>> s.index = pd.MultiIndex.from_tuples(\r\n ... [\r\n ... (1, 2, \"a\", 0),\r\n ... (1, 2, \"a\", 1),\r\n ... (1, 1, \"b\", 0),\r\n ... (1, 1, \"b\", 1),\r\n ... (2, 1, \"b\", 0),\r\n ... (2, 1, \"b\", 1)\r\n ... ],\r\n ... names=[\"A\", \"B\", \"C\", \"D\"],\r\n ... )\r\n >>> s\r\n A B C D\r\n 1 2 a 0 3.0\r\n 1 NaN\r\n 1 b 0 1.0\r\n 1 3.0\r\n 2 1 b 0 NaN\r\n 1 NaN\r\n dtype: float64\r\n\r\n >>> ss = s.astype(\"Sparse\")\r\n >>> ss\r\n A B C D\r\n 1 2 a 0 3.0\r\n 1 NaN\r\n 1 b 0 1.0\r\n 1 3.0\r\n 2 1 b 0 NaN\r\n 1 NaN\r\n dtype: Sparse[float64, nan]\r\n\r\n >>> A, rows, columns = ss.sparse.to_coo(\r\n ... row_levels=[\"A\", \"B\"], column_levels=[\"C\", \"D\"], sort_labels=True\r\n ... )\r\n >>> A\r\n <3x4 sparse matrix of type '<class 'numpy.float64'>'\r\n with 3 stored elements in COOrdinate format>\r\n >>> A.todense()\r\n matrix([[0., 0., 1., 3.],\r\n [3., 0., 0., 0.],\r\n [0., 0., 0., 0.]])\r\n\r\n >>> rows\r\n [(1, 1), (1, 2), (2, 1)]\r\n >>> columns\r\n [('a', 0), ('a', 1), ('b', 0), ('b', 1)]\r\n \"\"\"\r\n from pandas.core.arrays.sparse.scipy_sparse import _sparse_series_to_coo\r\n\r\n A, rows, columns = _sparse_series_to_coo(\r\n self._parent, row_levels, column_levels, sort_labels=sort_labels\r\n )\r\n return A, rows, columns\r\n\r\n def to_dense(self):\r\n \"\"\"\r\n Convert a Series from sparse values to dense.\r\n\r\n .. versionadded:: 0.25.0\r\n\r\n Returns\r\n -------\r\n Series:\r\n A Series with the same values, stored as a dense array.\r\n\r\n Examples\r\n --------\r\n >>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0]))\r\n >>> series\r\n 0 0\r\n 1 1\r\n 2 0\r\n dtype: Sparse[int64, 0]\r\n\r\n >>> series.sparse.to_dense()\r\n 0 0\r\n 1 1\r\n 2 0\r\n dtype: int64\r\n \"\"\"\r\n from pandas import Series\r\n\r\n return Series(\r\n self._parent.array.to_dense(),\r\n index=self._parent.index,\r\n name=self._parent.name,\r\n )\r\n\r\n\r\nclass SparseFrameAccessor(BaseAccessor, PandasDelegate):\r\n \"\"\"\r\n DataFrame accessor for sparse data.\r\n\r\n .. versionadded:: 0.25.0\r\n \"\"\"\r\n\r\n def _validate(self, data):\r\n dtypes = data.dtypes\r\n if not all(isinstance(t, SparseDtype) for t in dtypes):\r\n raise AttributeError(self._validation_msg)\r\n\r\n @classmethod\r\n def from_spmatrix(cls, data, index=None, columns=None):\r\n \"\"\"\r\n Create a new DataFrame from a scipy sparse matrix.\r\n\r\n .. versionadded:: 0.25.0\r\n\r\n Parameters\r\n ----------\r\n data : scipy.sparse.spmatrix\r\n Must be convertible to csc format.\r\n index, columns : Index, optional\r\n Row and column labels to use for the resulting DataFrame.\r\n Defaults to a RangeIndex.\r\n\r\n Returns\r\n -------\r\n DataFrame\r\n Each column of the DataFrame is stored as a\r\n :class:`arrays.SparseArray`.\r\n\r\n Examples\r\n --------\r\n >>> import scipy.sparse\r\n >>> mat = scipy.sparse.eye(3)\r\n >>> pd.DataFrame.sparse.from_spmatrix(mat)\r\n 0 1 2\r\n 0 1.0 0.0 0.0\r\n 1 0.0 1.0 0.0\r\n 2 0.0 0.0 1.0\r\n \"\"\"\r\n from pandas._libs.sparse import IntIndex\r\n\r\n from pandas import DataFrame\r\n\r\n data = data.tocsc()\r\n index, columns = cls._prep_index(data, index, columns)\r\n n_rows, n_columns = data.shape\r\n # We need to make sure indices are sorted, as we create\r\n # IntIndex with no input validation (i.e. check_integrity=False ).\r\n # Indices may already be sorted in scipy in which case this adds\r\n # a small overhead.\r\n data.sort_indices()\r\n indices = data.indices\r\n indptr = data.indptr\r\n array_data = data.data\r\n dtype = SparseDtype(array_data.dtype, 0)\r\n arrays = []\r\n for i in range(n_columns):\r\n sl = slice(indptr[i], indptr[i + 1])\r\n idx = IntIndex(n_rows, indices[sl], check_integrity=False)\r\n arr = SparseArray._simple_new(array_data[sl], idx, dtype)\r\n arrays.append(arr)\r\n return DataFrame._from_arrays(\r\n arrays, columns=columns, index=index, verify_integrity=False\r\n )\r\n\r\n def to_dense(self):\r\n \"\"\"\r\n Convert a DataFrame with sparse values to dense.\r\n\r\n .. versionadded:: 0.25.0\r\n\r\n Returns\r\n -------\r\n DataFrame\r\n A DataFrame with the same values stored as dense arrays.\r\n\r\n Examples\r\n --------\r\n >>> df = pd.DataFrame({\"A\": pd.arrays.SparseArray([0, 1, 0])})\r\n >>> df.sparse.to_dense()\r\n A\r\n 0 0\r\n 1 1\r\n 2 0\r\n \"\"\"\r\n from pandas import DataFrame\r\n\r\n data = {k: v.array.to_dense() for k, v in self._parent.items()}\r\n return DataFrame(data, index=self._parent.index, columns=self._parent.columns)\r\n\r\n def to_coo(self):\r\n \"\"\"\r\n Return the contents of the frame as a sparse SciPy COO matrix.\r\n\r\n .. versionadded:: 0.25.0\r\n\r\n Returns\r\n -------\r\n coo_matrix : scipy.sparse.spmatrix\r\n If the caller is heterogeneous and contains booleans or objects,\r\n the result will be of dtype=object. See Notes.\r\n\r\n Notes\r\n -----\r\n The dtype will be the lowest-common-denominator type (implicit\r\n upcasting); that is to say if the dtypes (even of numeric types)\r\n are mixed, the one that accommodates all will be chosen.\r\n\r\n e.g. If the dtypes are float16 and float32, dtype will be upcast to\r\n float32. By numpy.find_common_type convention, mixing int64 and\r\n and uint64 will result in a float64 dtype.\r\n \"\"\"\r\n import_optional_dependency(\"scipy\")\r\n from scipy.sparse import coo_matrix\r\n\r\n dtype = find_common_type(self._parent.dtypes)\r\n if isinstance(dtype, SparseDtype):\r\n dtype = dtype.subtype\r\n\r\n cols, rows, datas = [], [], []\r\n for col, name in enumerate(self._parent):\r\n s = self._parent[name]\r\n row = s.array.sp_index.to_int_index().indices\r\n cols.append(np.repeat(col, len(row)))\r\n rows.append(row)\r\n datas.append(s.array.sp_values.astype(dtype, copy=False))\r\n\r\n cols = np.concatenate(cols)\r\n rows = np.concatenate(rows)\r\n datas = np.concatenate(datas)\r\n return coo_matrix((datas, (rows, cols)), shape=self._parent.shape)\r\n\r\n @property\r\n def density(self) -> float:\r\n \"\"\"\r\n Ratio of non-sparse points to total (dense) data points.\r\n \"\"\"\r\n return np.mean([column.array.density for _, column in self._parent.items()])\r\n\r\n @staticmethod\r\n def _prep_index(data, index, columns):\r\n from pandas.core.indexes.api import ensure_index\r\n import pandas.core.indexes.base as ibase\r\n\r\n N, K = data.shape\r\n if index is None:\r\n index = ibase.default_index(N)\r\n else:\r\n index = ensure_index(index)\r\n if columns is None:\r\n columns = ibase.default_index(K)\r\n else:\r\n columns = ensure_index(columns)\r\n\r\n if len(columns) != K:\r\n raise ValueError(f\"Column length mismatch: {len(columns)} vs. {K}\")\r\n if len(index) != N:\r\n raise ValueError(f\"Index length mismatch: {len(index)} vs. {N}\")\r\n return index, columns\r\n", "\"\"\"\r\nBoilerplate functions used in defining binary operations.\r\n\"\"\"\r\nfrom functools import wraps\r\nfrom typing import Callable\r\n\r\nfrom pandas._libs.lib import item_from_zerodim\r\nfrom pandas._typing import F\r\n\r\nfrom pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries\r\n\r\n\r\ndef unpack_zerodim_and_defer(name: str) -> Callable[[F], F]:\r\n \"\"\"\r\n Boilerplate for pandas conventions in arithmetic and comparison methods.\r\n\r\n Parameters\r\n ----------\r\n name : str\r\n\r\n Returns\r\n -------\r\n decorator\r\n \"\"\"\r\n\r\n def wrapper(method: F) -> F:\r\n return _unpack_zerodim_and_defer(method, name)\r\n\r\n return wrapper\r\n\r\n\r\ndef _unpack_zerodim_and_defer(method, name: str):\r\n \"\"\"\r\n Boilerplate for pandas conventions in arithmetic and comparison methods.\r\n\r\n Ensure method returns NotImplemented when operating against \"senior\"\r\n classes. Ensure zero-dimensional ndarrays are always unpacked.\r\n\r\n Parameters\r\n ----------\r\n method : binary method\r\n name : str\r\n\r\n Returns\r\n -------\r\n method\r\n \"\"\"\r\n is_cmp = name.strip(\"__\") in {\"eq\", \"ne\", \"lt\", \"le\", \"gt\", \"ge\"}\r\n\r\n @wraps(method)\r\n def new_method(self, other):\r\n\r\n if is_cmp and isinstance(self, ABCIndexClass) and isinstance(other, ABCSeries):\r\n # For comparison ops, Index does *not* defer to Series\r\n pass\r\n else:\r\n for cls in [ABCDataFrame, ABCSeries, ABCIndexClass]:\r\n if isinstance(self, cls):\r\n break\r\n if isinstance(other, cls):\r\n return NotImplemented\r\n\r\n other = item_from_zerodim(other)\r\n\r\n return method(self, other)\r\n\r\n return new_method\r\n", "\"\"\" Test cases for Series.plot \"\"\"\r\n\r\n\r\nfrom datetime import datetime\r\nfrom itertools import chain\r\n\r\nimport numpy as np\r\nfrom numpy.random import randn\r\nimport pytest\r\n\r\nimport pandas.util._test_decorators as td\r\n\r\nimport pandas as pd\r\nfrom pandas import DataFrame, Series, date_range\r\nimport pandas._testing as tm\r\nfrom pandas.tests.plotting.common import TestPlotBase, _check_plot_works\r\n\r\nimport pandas.plotting as plotting\r\n\r\n\r\[email protected]_if_no_mpl\r\nclass TestSeriesPlots(TestPlotBase):\r\n def setup_method(self, method):\r\n TestPlotBase.setup_method(self, method)\r\n import matplotlib as mpl\r\n\r\n mpl.rcdefaults()\r\n\r\n self.ts = tm.makeTimeSeries()\r\n self.ts.name = \"ts\"\r\n\r\n self.series = tm.makeStringSeries()\r\n self.series.name = \"series\"\r\n\r\n self.iseries = tm.makePeriodSeries()\r\n self.iseries.name = \"iseries\"\r\n\r\n @pytest.mark.slow\r\n def test_plot(self):\r\n _check_plot_works(self.ts.plot, label=\"foo\")\r\n _check_plot_works(self.ts.plot, use_index=False)\r\n axes = _check_plot_works(self.ts.plot, rot=0)\r\n self._check_ticks_props(axes, xrot=0)\r\n\r\n ax = _check_plot_works(self.ts.plot, style=\".\", logy=True)\r\n self._check_ax_scales(ax, yaxis=\"log\")\r\n\r\n ax = _check_plot_works(self.ts.plot, style=\".\", logx=True)\r\n self._check_ax_scales(ax, xaxis=\"log\")\r\n\r\n ax = _check_plot_works(self.ts.plot, style=\".\", loglog=True)\r\n self._check_ax_scales(ax, xaxis=\"log\", yaxis=\"log\")\r\n\r\n _check_plot_works(self.ts[:10].plot.bar)\r\n _check_plot_works(self.ts.plot.area, stacked=False)\r\n _check_plot_works(self.iseries.plot)\r\n\r\n for kind in [\"line\", \"bar\", \"barh\", \"kde\", \"hist\", \"box\"]:\r\n _check_plot_works(self.series[:5].plot, kind=kind)\r\n\r\n _check_plot_works(self.series[:10].plot.barh)\r\n ax = _check_plot_works(Series(randn(10)).plot.bar, color=\"black\")\r\n self._check_colors([ax.patches[0]], facecolors=[\"black\"])\r\n\r\n # GH 6951\r\n ax = _check_plot_works(self.ts.plot, subplots=True)\r\n self._check_axes_shape(ax, axes_num=1, layout=(1, 1))\r\n\r\n ax = _check_plot_works(self.ts.plot, subplots=True, layout=(-1, 1))\r\n self._check_axes_shape(ax, axes_num=1, layout=(1, 1))\r\n ax = _check_plot_works(self.ts.plot, subplots=True, layout=(1, -1))\r\n self._check_axes_shape(ax, axes_num=1, layout=(1, 1))\r\n\r\n @pytest.mark.slow\r\n def test_plot_figsize_and_title(self):\r\n # figsize and title\r\n _, ax = self.plt.subplots()\r\n ax = self.series.plot(title=\"Test\", figsize=(16, 8), ax=ax)\r\n self._check_text_labels(ax.title, \"Test\")\r\n self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8))\r\n\r\n def test_dont_modify_rcParams(self):\r\n # GH 8242\r\n key = \"axes.prop_cycle\"\r\n colors = self.plt.rcParams[key]\r\n _, ax = self.plt.subplots()\r\n Series([1, 2, 3]).plot(ax=ax)\r\n assert colors == self.plt.rcParams[key]\r\n\r\n def test_ts_line_lim(self):\r\n fig, ax = self.plt.subplots()\r\n ax = self.ts.plot(ax=ax)\r\n xmin, xmax = ax.get_xlim()\r\n lines = ax.get_lines()\r\n assert xmin <= lines[0].get_data(orig=False)[0][0]\r\n assert xmax >= lines[0].get_data(orig=False)[0][-1]\r\n tm.close()\r\n\r\n ax = self.ts.plot(secondary_y=True, ax=ax)\r\n xmin, xmax = ax.get_xlim()\r\n lines = ax.get_lines()\r\n assert xmin <= lines[0].get_data(orig=False)[0][0]\r\n assert xmax >= lines[0].get_data(orig=False)[0][-1]\r\n\r\n def test_ts_area_lim(self):\r\n _, ax = self.plt.subplots()\r\n ax = self.ts.plot.area(stacked=False, ax=ax)\r\n xmin, xmax = ax.get_xlim()\r\n line = ax.get_lines()[0].get_data(orig=False)[0]\r\n assert xmin <= line[0]\r\n assert xmax >= line[-1]\r\n tm.close()\r\n\r\n # GH 7471\r\n _, ax = self.plt.subplots()\r\n ax = self.ts.plot.area(stacked=False, x_compat=True, ax=ax)\r\n xmin, xmax = ax.get_xlim()\r\n line = ax.get_lines()[0].get_data(orig=False)[0]\r\n assert xmin <= line[0]\r\n assert xmax >= line[-1]\r\n tm.close()\r\n\r\n tz_ts = self.ts.copy()\r\n tz_ts.index = tz_ts.tz_localize(\"GMT\").tz_convert(\"CET\")\r\n _, ax = self.plt.subplots()\r\n ax = tz_ts.plot.area(stacked=False, x_compat=True, ax=ax)\r\n xmin, xmax = ax.get_xlim()\r\n line = ax.get_lines()[0].get_data(orig=False)[0]\r\n assert xmin <= line[0]\r\n assert xmax >= line[-1]\r\n tm.close()\r\n\r\n _, ax = self.plt.subplots()\r\n ax = tz_ts.plot.area(stacked=False, secondary_y=True, ax=ax)\r\n xmin, xmax = ax.get_xlim()\r\n line = ax.get_lines()[0].get_data(orig=False)[0]\r\n assert xmin <= line[0]\r\n assert xmax >= line[-1]\r\n\r\n def test_label(self):\r\n s = Series([1, 2])\r\n _, ax = self.plt.subplots()\r\n ax = s.plot(label=\"LABEL\", legend=True, ax=ax)\r\n self._check_legend_labels(ax, labels=[\"LABEL\"])\r\n self.plt.close()\r\n _, ax = self.plt.subplots()\r\n ax = s.plot(legend=True, ax=ax)\r\n self._check_legend_labels(ax, labels=[\"None\"])\r\n self.plt.close()\r\n # get name from index\r\n s.name = \"NAME\"\r\n _, ax = self.plt.subplots()\r\n ax = s.plot(legend=True, ax=ax)\r\n self._check_legend_labels(ax, labels=[\"NAME\"])\r\n self.plt.close()\r\n # override the default\r\n _, ax = self.plt.subplots()\r\n ax = s.plot(legend=True, label=\"LABEL\", ax=ax)\r\n self._check_legend_labels(ax, labels=[\"LABEL\"])\r\n self.plt.close()\r\n # Add lebel info, but don't draw\r\n _, ax = self.plt.subplots()\r\n ax = s.plot(legend=False, label=\"LABEL\", ax=ax)\r\n assert ax.get_legend() is None # Hasn't been drawn\r\n ax.legend() # draw it\r\n self._check_legend_labels(ax, labels=[\"LABEL\"])\r\n\r\n def test_boolean(self):\r\n # GH 23719\r\n s = Series([False, False, True])\r\n _check_plot_works(s.plot, include_bool=True)\r\n\r\n msg = \"no numeric data to plot\"\r\n with pytest.raises(TypeError, match=msg):\r\n _check_plot_works(s.plot)\r\n\r\n def test_line_area_nan_series(self):\r\n values = [1, 2, np.nan, 3]\r\n s = Series(values)\r\n ts = Series(values, index=tm.makeDateIndex(k=4))\r\n\r\n for d in [s, ts]:\r\n ax = _check_plot_works(d.plot)\r\n masked = ax.lines[0].get_ydata()\r\n # remove nan for comparison purpose\r\n exp = np.array([1, 2, 3], dtype=np.float64)\r\n tm.assert_numpy_array_equal(np.delete(masked.data, 2), exp)\r\n tm.assert_numpy_array_equal(\r\n masked.mask, np.array([False, False, True, False])\r\n )\r\n\r\n expected = np.array([1, 2, 0, 3], dtype=np.float64)\r\n ax = _check_plot_works(d.plot, stacked=True)\r\n tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)\r\n ax = _check_plot_works(d.plot.area)\r\n tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)\r\n ax = _check_plot_works(d.plot.area, stacked=False)\r\n tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)\r\n\r\n def test_line_use_index_false(self):\r\n s = Series([1, 2, 3], index=[\"a\", \"b\", \"c\"])\r\n s.index.name = \"The Index\"\r\n _, ax = self.plt.subplots()\r\n ax = s.plot(use_index=False, ax=ax)\r\n label = ax.get_xlabel()\r\n assert label == \"\"\r\n _, ax = self.plt.subplots()\r\n ax2 = s.plot.bar(use_index=False, ax=ax)\r\n label2 = ax2.get_xlabel()\r\n assert label2 == \"\"\r\n\r\n @pytest.mark.slow\r\n def test_bar_log(self):\r\n expected = np.array([1e-1, 1e0, 1e1, 1e2, 1e3, 1e4])\r\n\r\n _, ax = self.plt.subplots()\r\n ax = Series([200, 500]).plot.bar(log=True, ax=ax)\r\n tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)\r\n tm.close()\r\n\r\n _, ax = self.plt.subplots()\r\n ax = Series([200, 500]).plot.barh(log=True, ax=ax)\r\n tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)\r\n tm.close()\r\n\r\n # GH 9905\r\n expected = np.array([1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1])\r\n\r\n _, ax = self.plt.subplots()\r\n ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind=\"bar\", ax=ax)\r\n ymin = 0.0007943282347242822\r\n ymax = 0.12589254117941673\r\n res = ax.get_ylim()\r\n tm.assert_almost_equal(res[0], ymin)\r\n tm.assert_almost_equal(res[1], ymax)\r\n tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)\r\n tm.close()\r\n\r\n _, ax = self.plt.subplots()\r\n ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind=\"barh\", ax=ax)\r\n res = ax.get_xlim()\r\n tm.assert_almost_equal(res[0], ymin)\r\n tm.assert_almost_equal(res[1], ymax)\r\n tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)\r\n\r\n @pytest.mark.slow\r\n def test_bar_ignore_index(self):\r\n df = Series([1, 2, 3, 4], index=[\"a\", \"b\", \"c\", \"d\"])\r\n _, ax = self.plt.subplots()\r\n ax = df.plot.bar(use_index=False, ax=ax)\r\n self._check_text_labels(ax.get_xticklabels(), [\"0\", \"1\", \"2\", \"3\"])\r\n\r\n def test_bar_user_colors(self):\r\n s = Series([1, 2, 3, 4])\r\n ax = s.plot.bar(color=[\"red\", \"blue\", \"blue\", \"red\"])\r\n result = [p.get_facecolor() for p in ax.patches]\r\n expected = [\r\n (1.0, 0.0, 0.0, 1.0),\r\n (0.0, 0.0, 1.0, 1.0),\r\n (0.0, 0.0, 1.0, 1.0),\r\n (1.0, 0.0, 0.0, 1.0),\r\n ]\r\n assert result == expected\r\n\r\n def test_rotation(self):\r\n df = DataFrame(randn(5, 5))\r\n # Default rot 0\r\n _, ax = self.plt.subplots()\r\n axes = df.plot(ax=ax)\r\n self._check_ticks_props(axes, xrot=0)\r\n\r\n _, ax = self.plt.subplots()\r\n axes = df.plot(rot=30, ax=ax)\r\n self._check_ticks_props(axes, xrot=30)\r\n\r\n def test_irregular_datetime(self):\r\n from pandas.plotting._matplotlib.converter import DatetimeConverter\r\n\r\n rng = date_range(\"1/1/2000\", \"3/1/2000\")\r\n rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]\r\n ser = Series(randn(len(rng)), rng)\r\n _, ax = self.plt.subplots()\r\n ax = ser.plot(ax=ax)\r\n xp = DatetimeConverter.convert(datetime(1999, 1, 1), \"\", ax)\r\n ax.set_xlim(\"1/1/1999\", \"1/1/2001\")\r\n assert xp == ax.get_xlim()[0]\r\n\r\n def test_unsorted_index_xlim(self):\r\n ser = Series(\r\n [0.0, 1.0, np.nan, 3.0, 4.0, 5.0, 6.0],\r\n index=[1.0, 0.0, 3.0, 2.0, np.nan, 3.0, 2.0],\r\n )\r\n _, ax = self.plt.subplots()\r\n ax = ser.plot(ax=ax)\r\n xmin, xmax = ax.get_xlim()\r\n lines = ax.get_lines()\r\n assert xmin <= np.nanmin(lines[0].get_data(orig=False)[0])\r\n assert xmax >= np.nanmax(lines[0].get_data(orig=False)[0])\r\n\r\n @pytest.mark.slow\r\n def test_pie_series(self):\r\n # if sum of values is less than 1.0, pie handle them as rate and draw\r\n # semicircle.\r\n series = Series(\r\n np.random.randint(1, 5), index=[\"a\", \"b\", \"c\", \"d\", \"e\"], name=\"YLABEL\"\r\n )\r\n ax = _check_plot_works(series.plot.pie)\r\n self._check_text_labels(ax.texts, series.index)\r\n assert ax.get_ylabel() == \"YLABEL\"\r\n\r\n # without wedge labels\r\n ax = _check_plot_works(series.plot.pie, labels=None)\r\n self._check_text_labels(ax.texts, [\"\"] * 5)\r\n\r\n # with less colors than elements\r\n color_args = [\"r\", \"g\", \"b\"]\r\n ax = _check_plot_works(series.plot.pie, colors=color_args)\r\n\r\n color_expected = [\"r\", \"g\", \"b\", \"r\", \"g\"]\r\n self._check_colors(ax.patches, facecolors=color_expected)\r\n\r\n # with labels and colors\r\n labels = [\"A\", \"B\", \"C\", \"D\", \"E\"]\r\n color_args = [\"r\", \"g\", \"b\", \"c\", \"m\"]\r\n ax = _check_plot_works(series.plot.pie, labels=labels, colors=color_args)\r\n self._check_text_labels(ax.texts, labels)\r\n self._check_colors(ax.patches, facecolors=color_args)\r\n\r\n # with autopct and fontsize\r\n ax = _check_plot_works(\r\n series.plot.pie, colors=color_args, autopct=\"%.2f\", fontsize=7\r\n )\r\n pcts = [f\"{s*100:.2f}\" for s in series.values / float(series.sum())]\r\n expected_texts = list(chain.from_iterable(zip(series.index, pcts)))\r\n self._check_text_labels(ax.texts, expected_texts)\r\n for t in ax.texts:\r\n assert t.get_fontsize() == 7\r\n\r\n # includes negative value\r\n with pytest.raises(ValueError):\r\n series = Series([1, 2, 0, 4, -1], index=[\"a\", \"b\", \"c\", \"d\", \"e\"])\r\n series.plot.pie()\r\n\r\n # includes nan\r\n series = Series([1, 2, np.nan, 4], index=[\"a\", \"b\", \"c\", \"d\"], name=\"YLABEL\")\r\n ax = _check_plot_works(series.plot.pie)\r\n self._check_text_labels(ax.texts, [\"a\", \"b\", \"\", \"d\"])\r\n\r\n def test_pie_nan(self):\r\n s = Series([1, np.nan, 1, 1])\r\n _, ax = self.plt.subplots()\r\n ax = s.plot.pie(legend=True, ax=ax)\r\n expected = [\"0\", \"\", \"2\", \"3\"]\r\n result = [x.get_text() for x in ax.texts]\r\n assert result == expected\r\n\r\n @pytest.mark.slow\r\n def test_hist_df_kwargs(self):\r\n df = DataFrame(np.random.randn(10, 2))\r\n _, ax = self.plt.subplots()\r\n ax = df.plot.hist(bins=5, ax=ax)\r\n assert len(ax.patches) == 10\r\n\r\n @pytest.mark.slow\r\n def test_hist_df_with_nonnumerics(self):\r\n # GH 9853\r\n with tm.RNGContext(1):\r\n df = DataFrame(np.random.randn(10, 4), columns=[\"A\", \"B\", \"C\", \"D\"])\r\n df[\"E\"] = [\"x\", \"y\"] * 5\r\n _, ax = self.plt.subplots()\r\n ax = df.plot.hist(bins=5, ax=ax)\r\n assert len(ax.patches) == 20\r\n\r\n _, ax = self.plt.subplots()\r\n ax = df.plot.hist(ax=ax) # bins=10\r\n assert len(ax.patches) == 40\r\n\r\n @pytest.mark.slow\r\n def test_hist_legacy(self):\r\n _check_plot_works(self.ts.hist)\r\n _check_plot_works(self.ts.hist, grid=False)\r\n _check_plot_works(self.ts.hist, figsize=(8, 10))\r\n # _check_plot_works adds an ax so catch warning. see GH #13188\r\n with tm.assert_produces_warning(UserWarning):\r\n _check_plot_works(self.ts.hist, by=self.ts.index.month)\r\n with tm.assert_produces_warning(UserWarning):\r\n _check_plot_works(self.ts.hist, by=self.ts.index.month, bins=5)\r\n\r\n fig, ax = self.plt.subplots(1, 1)\r\n _check_plot_works(self.ts.hist, ax=ax)\r\n _check_plot_works(self.ts.hist, ax=ax, figure=fig)\r\n _check_plot_works(self.ts.hist, figure=fig)\r\n tm.close()\r\n\r\n fig, (ax1, ax2) = self.plt.subplots(1, 2)\r\n _check_plot_works(self.ts.hist, figure=fig, ax=ax1)\r\n _check_plot_works(self.ts.hist, figure=fig, ax=ax2)\r\n\r\n with pytest.raises(ValueError):\r\n self.ts.hist(by=self.ts.index, figure=fig)\r\n\r\n @pytest.mark.slow\r\n def test_hist_bins_legacy(self):\r\n df = DataFrame(np.random.randn(10, 2))\r\n ax = df.hist(bins=2)[0][0]\r\n assert len(ax.patches) == 2\r\n\r\n @pytest.mark.slow\r\n def test_hist_layout(self):\r\n df = self.hist_df\r\n with pytest.raises(ValueError):\r\n df.height.hist(layout=(1, 1))\r\n\r\n with pytest.raises(ValueError):\r\n df.height.hist(layout=[1, 1])\r\n\r\n @pytest.mark.slow\r\n def test_hist_layout_with_by(self):\r\n df = self.hist_df\r\n\r\n # _check_plot_works adds an ax so catch warning. see GH #13188\r\n with tm.assert_produces_warning(UserWarning):\r\n axes = _check_plot_works(df.height.hist, by=df.gender, layout=(2, 1))\r\n self._check_axes_shape(axes, axes_num=2, layout=(2, 1))\r\n\r\n with tm.assert_produces_warning(UserWarning):\r\n axes = _check_plot_works(df.height.hist, by=df.gender, layout=(3, -1))\r\n self._check_axes_shape(axes, axes_num=2, layout=(3, 1))\r\n\r\n with tm.assert_produces_warning(UserWarning):\r\n axes = _check_plot_works(df.height.hist, by=df.category, layout=(4, 1))\r\n self._check_axes_shape(axes, axes_num=4, layout=(4, 1))\r\n\r\n with tm.assert_produces_warning(UserWarning):\r\n axes = _check_plot_works(df.height.hist, by=df.category, layout=(2, -1))\r\n self._check_axes_shape(axes, axes_num=4, layout=(2, 2))\r\n\r\n with tm.assert_produces_warning(UserWarning):\r\n axes = _check_plot_works(df.height.hist, by=df.category, layout=(3, -1))\r\n self._check_axes_shape(axes, axes_num=4, layout=(3, 2))\r\n\r\n with tm.assert_produces_warning(UserWarning):\r\n axes = _check_plot_works(df.height.hist, by=df.category, layout=(-1, 4))\r\n self._check_axes_shape(axes, axes_num=4, layout=(1, 4))\r\n\r\n with tm.assert_produces_warning(UserWarning):\r\n axes = _check_plot_works(df.height.hist, by=df.classroom, layout=(2, 2))\r\n self._check_axes_shape(axes, axes_num=3, layout=(2, 2))\r\n\r\n axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))\r\n self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7))\r\n\r\n @pytest.mark.slow\r\n def test_hist_no_overlap(self):\r\n from matplotlib.pyplot import gcf, subplot\r\n\r\n x = Series(randn(2))\r\n y = Series(randn(2))\r\n subplot(121)\r\n x.hist()\r\n subplot(122)\r\n y.hist()\r\n fig = gcf()\r\n axes = fig.axes\r\n assert len(axes) == 2\r\n\r\n @pytest.mark.slow\r\n def test_hist_secondary_legend(self):\r\n # GH 9610\r\n df = DataFrame(np.random.randn(30, 4), columns=list(\"abcd\"))\r\n\r\n # primary -> secondary\r\n _, ax = self.plt.subplots()\r\n ax = df[\"a\"].plot.hist(legend=True, ax=ax)\r\n df[\"b\"].plot.hist(ax=ax, legend=True, secondary_y=True)\r\n # both legends are dran on left ax\r\n # left and right axis must be visible\r\n self._check_legend_labels(ax, labels=[\"a\", \"b (right)\"])\r\n assert ax.get_yaxis().get_visible()\r\n assert ax.right_ax.get_yaxis().get_visible()\r\n tm.close()\r\n\r\n # secondary -> secondary\r\n _, ax = self.plt.subplots()\r\n ax = df[\"a\"].plot.hist(legend=True, secondary_y=True, ax=ax)\r\n df[\"b\"].plot.hist(ax=ax, legend=True, secondary_y=True)\r\n # both legends are draw on left ax\r\n # left axis must be invisible, right axis must be visible\r\n self._check_legend_labels(ax.left_ax, labels=[\"a (right)\", \"b (right)\"])\r\n assert not ax.left_ax.get_yaxis().get_visible()\r\n assert ax.get_yaxis().get_visible()\r\n tm.close()\r\n\r\n # secondary -> primary\r\n _, ax = self.plt.subplots()\r\n ax = df[\"a\"].plot.hist(legend=True, secondary_y=True, ax=ax)\r\n # right axes is returned\r\n df[\"b\"].plot.hist(ax=ax, legend=True)\r\n # both legends are draw on left ax\r\n # left and right axis must be visible\r\n self._check_legend_labels(ax.left_ax, labels=[\"a (right)\", \"b\"])\r\n assert ax.left_ax.get_yaxis().get_visible()\r\n assert ax.get_yaxis().get_visible()\r\n tm.close()\r\n\r\n @pytest.mark.slow\r\n def test_df_series_secondary_legend(self):\r\n # GH 9779\r\n df = DataFrame(np.random.randn(30, 3), columns=list(\"abc\"))\r\n s = Series(np.random.randn(30), name=\"x\")\r\n\r\n # primary -> secondary (without passing ax)\r\n _, ax = self.plt.subplots()\r\n ax = df.plot(ax=ax)\r\n s.plot(legend=True, secondary_y=True, ax=ax)\r\n # both legends are dran on left ax\r\n # left and right axis must be visible\r\n self._check_legend_labels(ax, labels=[\"a\", \"b\", \"c\", \"x (right)\"])\r\n assert ax.get_yaxis().get_visible()\r\n assert ax.right_ax.get_yaxis().get_visible()\r\n tm.close()\r\n\r\n # primary -> secondary (with passing ax)\r\n _, ax = self.plt.subplots()\r\n ax = df.plot(ax=ax)\r\n s.plot(ax=ax, legend=True, secondary_y=True)\r\n # both legends are dran on left ax\r\n # left and right axis must be visible\r\n self._check_legend_labels(ax, labels=[\"a\", \"b\", \"c\", \"x (right)\"])\r\n assert ax.get_yaxis().get_visible()\r\n assert ax.right_ax.get_yaxis().get_visible()\r\n tm.close()\r\n\r\n # secondary -> secondary (without passing ax)\r\n _, ax = self.plt.subplots()\r\n ax = df.plot(secondary_y=True, ax=ax)\r\n s.plot(legend=True, secondary_y=True, ax=ax)\r\n # both legends are dran on left ax\r\n # left axis must be invisible and right axis must be visible\r\n expected = [\"a (right)\", \"b (right)\", \"c (right)\", \"x (right)\"]\r\n self._check_legend_labels(ax.left_ax, labels=expected)\r\n assert not ax.left_ax.get_yaxis().get_visible()\r\n assert ax.get_yaxis().get_visible()\r\n tm.close()\r\n\r\n # secondary -> secondary (with passing ax)\r\n _, ax = self.plt.subplots()\r\n ax = df.plot(secondary_y=True, ax=ax)\r\n s.plot(ax=ax, legend=True, secondary_y=True)\r\n # both legends are dran on left ax\r\n # left axis must be invisible and right axis must be visible\r\n expected = [\"a (right)\", \"b (right)\", \"c (right)\", \"x (right)\"]\r\n self._check_legend_labels(ax.left_ax, expected)\r\n assert not ax.left_ax.get_yaxis().get_visible()\r\n assert ax.get_yaxis().get_visible()\r\n tm.close()\r\n\r\n # secondary -> secondary (with passing ax)\r\n _, ax = self.plt.subplots()\r\n ax = df.plot(secondary_y=True, mark_right=False, ax=ax)\r\n s.plot(ax=ax, legend=True, secondary_y=True)\r\n # both legends are dran on left ax\r\n # left axis must be invisible and right axis must be visible\r\n expected = [\"a\", \"b\", \"c\", \"x (right)\"]\r\n self._check_legend_labels(ax.left_ax, expected)\r\n assert not ax.left_ax.get_yaxis().get_visible()\r\n assert ax.get_yaxis().get_visible()\r\n tm.close()\r\n\r\n @pytest.mark.slow\r\n @pytest.mark.parametrize(\r\n \"input_logy, expected_scale\", [(True, \"log\"), (\"sym\", \"symlog\")]\r\n )\r\n def test_secondary_logy(self, input_logy, expected_scale):\r\n # GH 25545\r\n s1 = Series(np.random.randn(30))\r\n s2 = Series(np.random.randn(30))\r\n\r\n # GH 24980\r\n ax1 = s1.plot(logy=input_logy)\r\n ax2 = s2.plot(secondary_y=True, logy=input_logy)\r\n\r\n assert ax1.get_yscale() == expected_scale\r\n assert ax2.get_yscale() == expected_scale\r\n\r\n @pytest.mark.slow\r\n def test_plot_fails_with_dupe_color_and_style(self):\r\n x = Series(randn(2))\r\n with pytest.raises(ValueError):\r\n _, ax = self.plt.subplots()\r\n x.plot(style=\"k--\", color=\"k\", ax=ax)\r\n\r\n @pytest.mark.slow\r\n @td.skip_if_no_scipy\r\n def test_hist_kde(self):\r\n\r\n _, ax = self.plt.subplots()\r\n ax = self.ts.plot.hist(logy=True, ax=ax)\r\n self._check_ax_scales(ax, yaxis=\"log\")\r\n xlabels = ax.get_xticklabels()\r\n # ticks are values, thus ticklabels are blank\r\n self._check_text_labels(xlabels, [\"\"] * len(xlabels))\r\n ylabels = ax.get_yticklabels()\r\n self._check_text_labels(ylabels, [\"\"] * len(ylabels))\r\n\r\n _check_plot_works(self.ts.plot.kde)\r\n _check_plot_works(self.ts.plot.density)\r\n _, ax = self.plt.subplots()\r\n ax = self.ts.plot.kde(logy=True, ax=ax)\r\n self._check_ax_scales(ax, yaxis=\"log\")\r\n xlabels = ax.get_xticklabels()\r\n self._check_text_labels(xlabels, [\"\"] * len(xlabels))\r\n ylabels = ax.get_yticklabels()\r\n self._check_text_labels(ylabels, [\"\"] * len(ylabels))\r\n\r\n @pytest.mark.slow\r\n @td.skip_if_no_scipy\r\n def test_kde_kwargs(self):\r\n sample_points = np.linspace(-100, 100, 20)\r\n _check_plot_works(self.ts.plot.kde, bw_method=\"scott\", ind=20)\r\n _check_plot_works(self.ts.plot.kde, bw_method=None, ind=20)\r\n _check_plot_works(self.ts.plot.kde, bw_method=None, ind=np.int_(20))\r\n _check_plot_works(self.ts.plot.kde, bw_method=0.5, ind=sample_points)\r\n _check_plot_works(self.ts.plot.density, bw_method=0.5, ind=sample_points)\r\n _, ax = self.plt.subplots()\r\n ax = self.ts.plot.kde(logy=True, bw_method=0.5, ind=sample_points, ax=ax)\r\n self._check_ax_scales(ax, yaxis=\"log\")\r\n self._check_text_labels(ax.yaxis.get_label(), \"Density\")\r\n\r\n @pytest.mark.slow\r\n @td.skip_if_no_scipy\r\n def test_kde_missing_vals(self):\r\n s = Series(np.random.uniform(size=50))\r\n s[0] = np.nan\r\n axes = _check_plot_works(s.plot.kde)\r\n\r\n # gh-14821: check if the values have any missing values\r\n assert any(~np.isnan(axes.lines[0].get_xdata()))\r\n\r\n @pytest.mark.slow\r\n def test_hist_kwargs(self):\r\n _, ax = self.plt.subplots()\r\n ax = self.ts.plot.hist(bins=5, ax=ax)\r\n assert len(ax.patches) == 5\r\n self._check_text_labels(ax.yaxis.get_label(), \"Frequency\")\r\n tm.close()\r\n\r\n _, ax = self.plt.subplots()\r\n ax = self.ts.plot.hist(orientation=\"horizontal\", ax=ax)\r\n self._check_text_labels(ax.xaxis.get_label(), \"Frequency\")\r\n tm.close()\r\n\r\n _, ax = self.plt.subplots()\r\n ax = self.ts.plot.hist(align=\"left\", stacked=True, ax=ax)\r\n tm.close()\r\n\r\n @pytest.mark.slow\r\n @td.skip_if_no_scipy\r\n def test_hist_kde_color(self):\r\n _, ax = self.plt.subplots()\r\n ax = self.ts.plot.hist(logy=True, bins=10, color=\"b\", ax=ax)\r\n self._check_ax_scales(ax, yaxis=\"log\")\r\n assert len(ax.patches) == 10\r\n self._check_colors(ax.patches, facecolors=[\"b\"] * 10)\r\n\r\n _, ax = self.plt.subplots()\r\n ax = self.ts.plot.kde(logy=True, color=\"r\", ax=ax)\r\n self._check_ax_scales(ax, yaxis=\"log\")\r\n lines = ax.get_lines()\r\n assert len(lines) == 1\r\n self._check_colors(lines, [\"r\"])\r\n\r\n @pytest.mark.slow\r\n def test_boxplot_series(self):\r\n _, ax = self.plt.subplots()\r\n ax = self.ts.plot.box(logy=True, ax=ax)\r\n self._check_ax_scales(ax, yaxis=\"log\")\r\n xlabels = ax.get_xticklabels()\r\n self._check_text_labels(xlabels, [self.ts.name])\r\n ylabels = ax.get_yticklabels()\r\n self._check_text_labels(ylabels, [\"\"] * len(ylabels))\r\n\r\n @pytest.mark.slow\r\n def test_kind_both_ways(self):\r\n s = Series(range(3))\r\n kinds = (\r\n plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds\r\n )\r\n for kind in kinds:\r\n _, ax = self.plt.subplots()\r\n s.plot(kind=kind, ax=ax)\r\n self.plt.close()\r\n _, ax = self.plt.subplots()\r\n getattr(s.plot, kind)()\r\n self.plt.close()\r\n\r\n @pytest.mark.slow\r\n def test_invalid_plot_data(self):\r\n s = Series(list(\"abcd\"))\r\n _, ax = self.plt.subplots()\r\n for kind in plotting.PlotAccessor._common_kinds:\r\n\r\n msg = \"no numeric data to plot\"\r\n with pytest.raises(TypeError, match=msg):\r\n s.plot(kind=kind, ax=ax)\r\n\r\n @pytest.mark.slow\r\n def test_valid_object_plot(self):\r\n s = Series(range(10), dtype=object)\r\n for kind in plotting.PlotAccessor._common_kinds:\r\n _check_plot_works(s.plot, kind=kind)\r\n\r\n def test_partially_invalid_plot_data(self):\r\n s = Series([\"a\", \"b\", 1.0, 2])\r\n _, ax = self.plt.subplots()\r\n for kind in plotting.PlotAccessor._common_kinds:\r\n\r\n msg = \"no numeric data to plot\"\r\n with pytest.raises(TypeError, match=msg):\r\n s.plot(kind=kind, ax=ax)\r\n\r\n def test_invalid_kind(self):\r\n s = Series([1, 2])\r\n with pytest.raises(ValueError):\r\n s.plot(kind=\"aasdf\")\r\n\r\n @pytest.mark.slow\r\n def test_dup_datetime_index_plot(self):\r\n dr1 = date_range(\"1/1/2009\", periods=4)\r\n dr2 = date_range(\"1/2/2009\", periods=4)\r\n index = dr1.append(dr2)\r\n values = randn(index.size)\r\n s = Series(values, index=index)\r\n _check_plot_works(s.plot)\r\n\r\n def test_errorbar_asymmetrical(self):\r\n # GH9536\r\n s = Series(np.arange(10), name=\"x\")\r\n err = np.random.rand(2, 10)\r\n\r\n ax = s.plot(yerr=err, xerr=err)\r\n\r\n result = np.vstack([i.vertices[:, 1] for i in ax.collections[1].get_paths()])\r\n expected = (err.T * np.array([-1, 1])) + s.to_numpy().reshape(-1, 1)\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n msg = (\r\n \"Asymmetrical error bars should be provided \"\r\n f\"with the shape \\\\(2, {len(s)}\\\\)\"\r\n )\r\n with pytest.raises(ValueError, match=msg):\r\n s.plot(yerr=np.random.rand(2, 11))\r\n\r\n tm.close()\r\n\r\n @pytest.mark.slow\r\n def test_errorbar_plot(self):\r\n\r\n s = Series(np.arange(10), name=\"x\")\r\n s_err = np.random.randn(10)\r\n d_err = DataFrame(randn(10, 2), index=s.index, columns=[\"x\", \"y\"])\r\n # test line and bar plots\r\n kinds = [\"line\", \"bar\"]\r\n for kind in kinds:\r\n ax = _check_plot_works(s.plot, yerr=Series(s_err), kind=kind)\r\n self._check_has_errorbars(ax, xerr=0, yerr=1)\r\n ax = _check_plot_works(s.plot, yerr=s_err, kind=kind)\r\n self._check_has_errorbars(ax, xerr=0, yerr=1)\r\n ax = _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind)\r\n self._check_has_errorbars(ax, xerr=0, yerr=1)\r\n ax = _check_plot_works(s.plot, yerr=d_err, kind=kind)\r\n self._check_has_errorbars(ax, xerr=0, yerr=1)\r\n ax = _check_plot_works(s.plot, xerr=0.2, yerr=0.2, kind=kind)\r\n self._check_has_errorbars(ax, xerr=1, yerr=1)\r\n\r\n ax = _check_plot_works(s.plot, xerr=s_err)\r\n self._check_has_errorbars(ax, xerr=1, yerr=0)\r\n\r\n # test time series plotting\r\n ix = date_range(\"1/1/2000\", \"1/1/2001\", freq=\"M\")\r\n ts = Series(np.arange(12), index=ix, name=\"x\")\r\n ts_err = Series(np.random.randn(12), index=ix)\r\n td_err = DataFrame(randn(12, 2), index=ix, columns=[\"x\", \"y\"])\r\n\r\n ax = _check_plot_works(ts.plot, yerr=ts_err)\r\n self._check_has_errorbars(ax, xerr=0, yerr=1)\r\n ax = _check_plot_works(ts.plot, yerr=td_err)\r\n self._check_has_errorbars(ax, xerr=0, yerr=1)\r\n\r\n # check incorrect lengths and types\r\n with pytest.raises(ValueError):\r\n s.plot(yerr=np.arange(11))\r\n\r\n s_err = [\"zzz\"] * 10\r\n with pytest.raises(TypeError):\r\n s.plot(yerr=s_err)\r\n\r\n def test_table(self):\r\n _check_plot_works(self.series.plot, table=True)\r\n _check_plot_works(self.series.plot, table=self.series)\r\n\r\n @pytest.mark.slow\r\n def test_series_grid_settings(self):\r\n # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792\r\n self._check_grid_settings(\r\n Series([1, 2, 3]),\r\n plotting.PlotAccessor._series_kinds + plotting.PlotAccessor._common_kinds,\r\n )\r\n\r\n @pytest.mark.slow\r\n def test_standard_colors(self):\r\n from pandas.plotting._matplotlib.style import _get_standard_colors\r\n\r\n for c in [\"r\", \"red\", \"green\", \"#FF0000\"]:\r\n result = _get_standard_colors(1, color=c)\r\n assert result == [c]\r\n\r\n result = _get_standard_colors(1, color=[c])\r\n assert result == [c]\r\n\r\n result = _get_standard_colors(3, color=c)\r\n assert result == [c] * 3\r\n\r\n result = _get_standard_colors(3, color=[c])\r\n assert result == [c] * 3\r\n\r\n @pytest.mark.slow\r\n def test_standard_colors_all(self):\r\n import matplotlib.colors as colors\r\n\r\n from pandas.plotting._matplotlib.style import _get_standard_colors\r\n\r\n # multiple colors like mediumaquamarine\r\n for c in colors.cnames:\r\n result = _get_standard_colors(num_colors=1, color=c)\r\n assert result == [c]\r\n\r\n result = _get_standard_colors(num_colors=1, color=[c])\r\n assert result == [c]\r\n\r\n result = _get_standard_colors(num_colors=3, color=c)\r\n assert result == [c] * 3\r\n\r\n result = _get_standard_colors(num_colors=3, color=[c])\r\n assert result == [c] * 3\r\n\r\n # single letter colors like k\r\n for c in colors.ColorConverter.colors:\r\n result = _get_standard_colors(num_colors=1, color=c)\r\n assert result == [c]\r\n\r\n result = _get_standard_colors(num_colors=1, color=[c])\r\n assert result == [c]\r\n\r\n result = _get_standard_colors(num_colors=3, color=c)\r\n assert result == [c] * 3\r\n\r\n result = _get_standard_colors(num_colors=3, color=[c])\r\n assert result == [c] * 3\r\n\r\n def test_series_plot_color_kwargs(self):\r\n # GH1890\r\n _, ax = self.plt.subplots()\r\n ax = Series(np.arange(12) + 1).plot(color=\"green\", ax=ax)\r\n self._check_colors(ax.get_lines(), linecolors=[\"green\"])\r\n\r\n def test_time_series_plot_color_kwargs(self):\r\n # #1890\r\n _, ax = self.plt.subplots()\r\n ax = Series(np.arange(12) + 1, index=date_range(\"1/1/2000\", periods=12)).plot(\r\n color=\"green\", ax=ax\r\n )\r\n self._check_colors(ax.get_lines(), linecolors=[\"green\"])\r\n\r\n def test_time_series_plot_color_with_empty_kwargs(self):\r\n import matplotlib as mpl\r\n\r\n def_colors = self._unpack_cycler(mpl.rcParams)\r\n index = date_range(\"1/1/2000\", periods=12)\r\n s = Series(np.arange(1, 13), index=index)\r\n\r\n ncolors = 3\r\n\r\n _, ax = self.plt.subplots()\r\n for i in range(ncolors):\r\n ax = s.plot(ax=ax)\r\n self._check_colors(ax.get_lines(), linecolors=def_colors[:ncolors])\r\n\r\n def test_xticklabels(self):\r\n # GH11529\r\n s = Series(np.arange(10), index=[f\"P{i:02d}\" for i in range(10)])\r\n _, ax = self.plt.subplots()\r\n ax = s.plot(xticks=[0, 3, 5, 9], ax=ax)\r\n exp = [f\"P{i:02d}\" for i in [0, 3, 5, 9]]\r\n self._check_text_labels(ax.get_xticklabels(), exp)\r\n\r\n def test_xtick_barPlot(self):\r\n # GH28172\r\n s = pd.Series(range(10), index=[f\"P{i:02d}\" for i in range(10)])\r\n ax = s.plot.bar(xticks=range(0, 11, 2))\r\n exp = np.array(list(range(0, 11, 2)))\r\n tm.assert_numpy_array_equal(exp, ax.get_xticks())\r\n\r\n def test_custom_business_day_freq(self):\r\n # GH7222\r\n from pandas.tseries.offsets import CustomBusinessDay\r\n\r\n s = Series(\r\n range(100, 121),\r\n index=pd.bdate_range(\r\n start=\"2014-05-01\",\r\n end=\"2014-06-01\",\r\n freq=CustomBusinessDay(holidays=[\"2014-05-26\"]),\r\n ),\r\n )\r\n\r\n _check_plot_works(s.plot)\r\n\r\n @pytest.mark.xfail\r\n def test_plot_accessor_updates_on_inplace(self):\r\n s = Series([1, 2, 3, 4])\r\n _, ax = self.plt.subplots()\r\n ax = s.plot(ax=ax)\r\n before = ax.xaxis.get_ticklocs()\r\n\r\n s.drop([0, 1], inplace=True)\r\n _, ax = self.plt.subplots()\r\n after = ax.xaxis.get_ticklocs()\r\n tm.assert_numpy_array_equal(before, after)\r\n\r\n @pytest.mark.parametrize(\"kind\", [\"line\", \"area\"])\r\n def test_plot_xlim_for_series(self, kind):\r\n # test if xlim is also correctly plotted in Series for line and area\r\n # GH 27686\r\n s = Series([2, 3])\r\n _, ax = self.plt.subplots()\r\n s.plot(kind=kind, ax=ax)\r\n xlims = ax.get_xlim()\r\n\r\n assert xlims[0] < 0\r\n assert xlims[1] > 1\r\n\r\n def test_plot_no_rows(self):\r\n # GH 27758\r\n df = pd.Series(dtype=int)\r\n assert df.empty\r\n ax = df.plot()\r\n assert len(ax.get_lines()) == 1\r\n line = ax.get_lines()[0]\r\n assert len(line.get_xdata()) == 0\r\n assert len(line.get_ydata()) == 0\r\n\r\n def test_plot_no_numeric_data(self):\r\n df = pd.Series([\"a\", \"b\", \"c\"])\r\n with pytest.raises(TypeError):\r\n df.plot()\r\n\r\n def test_style_single_ok(self):\r\n s = pd.Series([1, 2])\r\n ax = s.plot(style=\"s\", color=\"C3\")\r\n assert ax.lines[0].get_color() == [\"C3\"]\r\n\r\n @pytest.mark.parametrize(\r\n \"index_name, old_label, new_label\",\r\n [(None, \"\", \"new\"), (\"old\", \"old\", \"new\"), (None, \"\", \"\")],\r\n )\r\n @pytest.mark.parametrize(\"kind\", [\"line\", \"area\", \"bar\"])\r\n def test_xlabel_ylabel_series(self, kind, index_name, old_label, new_label):\r\n # GH 9093\r\n ser = pd.Series([1, 2, 3, 4])\r\n ser.index.name = index_name\r\n\r\n # default is the ylabel is not shown and xlabel is index name\r\n ax = ser.plot(kind=kind)\r\n assert ax.get_ylabel() == \"\"\r\n assert ax.get_xlabel() == old_label\r\n\r\n # old xlabel will be overriden and assigned ylabel will be used as ylabel\r\n ax = ser.plot(kind=kind, ylabel=new_label, xlabel=new_label)\r\n assert ax.get_ylabel() == new_label\r\n assert ax.get_xlabel() == new_label\r\n", "import pytest\r\n\r\nimport numpy as np\r\nfrom functools import partial\r\nimport itertools\r\n\r\nfrom sklearn.base import clone\r\n\r\nfrom sklearn.exceptions import ConvergenceWarning\r\n\r\nfrom sklearn.utils import check_array\r\n\r\nfrom sklearn.utils._testing import assert_array_almost_equal\r\nfrom sklearn.utils._testing import assert_array_equal\r\nfrom sklearn.utils._testing import ignore_warnings\r\nfrom sklearn.utils._testing import TempMemmap\r\n\r\nfrom sklearn.decomposition import DictionaryLearning\r\nfrom sklearn.decomposition import MiniBatchDictionaryLearning\r\nfrom sklearn.decomposition import SparseCoder\r\nfrom sklearn.decomposition import dict_learning\r\nfrom sklearn.decomposition import dict_learning_online\r\nfrom sklearn.decomposition import sparse_encode\r\nfrom sklearn.utils.estimator_checks import check_transformer_data_not_an_array\r\nfrom sklearn.utils.estimator_checks import check_transformer_general\r\nfrom sklearn.utils.estimator_checks import check_transformers_unfitted\r\n\r\n\r\nrng_global = np.random.RandomState(0)\r\nn_samples, n_features = 10, 8\r\nX = rng_global.randn(n_samples, n_features)\r\n\r\n\r\ndef test_sparse_encode_shapes_omp():\r\n rng = np.random.RandomState(0)\r\n algorithms = ['omp', 'lasso_lars', 'lasso_cd', 'lars', 'threshold']\r\n for n_components, n_samples in itertools.product([1, 5], [1, 9]):\r\n X_ = rng.randn(n_samples, n_features)\r\n dictionary = rng.randn(n_components, n_features)\r\n for algorithm, n_jobs in itertools.product(algorithms, [1, 3]):\r\n code = sparse_encode(X_, dictionary, algorithm=algorithm,\r\n n_jobs=n_jobs)\r\n assert code.shape == (n_samples, n_components)\r\n\r\n\r\ndef test_dict_learning_shapes():\r\n n_components = 5\r\n dico = DictionaryLearning(n_components, random_state=0).fit(X)\r\n assert dico.components_.shape == (n_components, n_features)\r\n\r\n n_components = 1\r\n dico = DictionaryLearning(n_components, random_state=0).fit(X)\r\n assert dico.components_.shape == (n_components, n_features)\r\n assert dico.transform(X).shape == (X.shape[0], n_components)\r\n\r\n\r\ndef test_dict_learning_overcomplete():\r\n n_components = 12\r\n dico = DictionaryLearning(n_components, random_state=0).fit(X)\r\n assert dico.components_.shape == (n_components, n_features)\r\n\r\n\r\ndef test_max_iter():\r\n def ricker_function(resolution, center, width):\r\n \"\"\"Discrete sub-sampled Ricker (Mexican hat) wavelet\"\"\"\r\n x = np.linspace(0, resolution - 1, resolution)\r\n x = ((2 / (np.sqrt(3 * width) * np.pi ** .25))\r\n * (1 - (x - center) ** 2 / width ** 2)\r\n * np.exp(-(x - center) ** 2 / (2 * width ** 2)))\r\n return x\r\n\r\n def ricker_matrix(width, resolution, n_components):\r\n \"\"\"Dictionary of Ricker (Mexican hat) wavelets\"\"\"\r\n centers = np.linspace(0, resolution - 1, n_components)\r\n D = np.empty((n_components, resolution))\r\n for i, center in enumerate(centers):\r\n D[i] = ricker_function(resolution, center, width)\r\n D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]\r\n return D\r\n\r\n transform_algorithm = 'lasso_cd'\r\n resolution = 1024\r\n subsampling = 3 # subsampling factor\r\n n_components = resolution // subsampling\r\n\r\n # Compute a wavelet dictionary\r\n D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,\r\n n_components=n_components // 5)\r\n for w in (10, 50, 100, 500, 1000))]\r\n\r\n X = np.linspace(0, resolution - 1, resolution)\r\n first_quarter = X < resolution / 4\r\n X[first_quarter] = 3.\r\n X[np.logical_not(first_quarter)] = -1.\r\n X = X.reshape(1, -1)\r\n\r\n # check that the underlying model fails to converge\r\n with pytest.warns(ConvergenceWarning):\r\n model = SparseCoder(D_multi, transform_algorithm=transform_algorithm,\r\n transform_max_iter=1)\r\n model.fit_transform(X)\r\n\r\n # check that the underlying model converges w/o warnings\r\n with pytest.warns(None) as record:\r\n model = SparseCoder(D_multi, transform_algorithm=transform_algorithm,\r\n transform_max_iter=2000)\r\n model.fit_transform(X)\r\n assert not record.list\r\n\r\n\r\ndef test_dict_learning_lars_positive_parameter():\r\n n_components = 5\r\n alpha = 1\r\n err_msg = \"Positive constraint not supported for 'lars' coding method.\"\r\n with pytest.raises(ValueError, match=err_msg):\r\n dict_learning(X, n_components, alpha=alpha, positive_code=True)\r\n\r\n\r\[email protected](\"transform_algorithm\", [\r\n \"lasso_lars\",\r\n \"lasso_cd\",\r\n \"threshold\",\r\n])\r\[email protected](\"positive_code\", [False, True])\r\[email protected](\"positive_dict\", [False, True])\r\ndef test_dict_learning_positivity(transform_algorithm,\r\n positive_code,\r\n positive_dict):\r\n n_components = 5\r\n dico = DictionaryLearning(\r\n n_components, transform_algorithm=transform_algorithm, random_state=0,\r\n positive_code=positive_code, positive_dict=positive_dict,\r\n fit_algorithm=\"cd\").fit(X)\r\n\r\n code = dico.transform(X)\r\n if positive_dict:\r\n assert (dico.components_ >= 0).all()\r\n else:\r\n assert (dico.components_ < 0).any()\r\n if positive_code:\r\n assert (code >= 0).all()\r\n else:\r\n assert (code < 0).any()\r\n\r\n\r\[email protected](\"positive_dict\", [False, True])\r\ndef test_dict_learning_lars_dict_positivity(positive_dict):\r\n n_components = 5\r\n dico = DictionaryLearning(\r\n n_components, transform_algorithm=\"lars\", random_state=0,\r\n positive_dict=positive_dict, fit_algorithm=\"cd\").fit(X)\r\n\r\n if positive_dict:\r\n assert (dico.components_ >= 0).all()\r\n else:\r\n assert (dico.components_ < 0).any()\r\n\r\n\r\ndef test_dict_learning_lars_code_positivity():\r\n n_components = 5\r\n dico = DictionaryLearning(\r\n n_components, transform_algorithm=\"lars\", random_state=0,\r\n positive_code=True, fit_algorithm=\"cd\").fit(X)\r\n\r\n err_msg = \"Positive constraint not supported for '{}' coding method.\"\r\n err_msg = err_msg.format(\"lars\")\r\n with pytest.raises(ValueError, match=err_msg):\r\n dico.transform(X)\r\n\r\n\r\ndef test_dict_learning_reconstruction():\r\n n_components = 12\r\n dico = DictionaryLearning(n_components, transform_algorithm='omp',\r\n transform_alpha=0.001, random_state=0)\r\n code = dico.fit(X).transform(X)\r\n assert_array_almost_equal(np.dot(code, dico.components_), X)\r\n\r\n dico.set_params(transform_algorithm='lasso_lars')\r\n code = dico.transform(X)\r\n assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)\r\n\r\n # used to test lars here too, but there's no guarantee the number of\r\n # nonzero atoms is right.\r\n\r\n\r\ndef test_dict_learning_reconstruction_parallel():\r\n # regression test that parallel reconstruction works with n_jobs>1\r\n n_components = 12\r\n dico = DictionaryLearning(n_components, transform_algorithm='omp',\r\n transform_alpha=0.001, random_state=0, n_jobs=4)\r\n code = dico.fit(X).transform(X)\r\n assert_array_almost_equal(np.dot(code, dico.components_), X)\r\n\r\n dico.set_params(transform_algorithm='lasso_lars')\r\n code = dico.transform(X)\r\n assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)\r\n\r\n\r\ndef test_dict_learning_lassocd_readonly_data():\r\n n_components = 12\r\n with TempMemmap(X) as X_read_only:\r\n dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',\r\n transform_alpha=0.001, random_state=0,\r\n n_jobs=4)\r\n with ignore_warnings(category=ConvergenceWarning):\r\n code = dico.fit(X_read_only).transform(X_read_only)\r\n assert_array_almost_equal(np.dot(code, dico.components_), X_read_only,\r\n decimal=2)\r\n\r\n\r\ndef test_dict_learning_nonzero_coefs():\r\n n_components = 4\r\n dico = DictionaryLearning(n_components, transform_algorithm='lars',\r\n transform_n_nonzero_coefs=3, random_state=0)\r\n code = dico.fit(X).transform(X[np.newaxis, 1])\r\n assert len(np.flatnonzero(code)) == 3\r\n\r\n dico.set_params(transform_algorithm='omp')\r\n code = dico.transform(X[np.newaxis, 1])\r\n assert len(np.flatnonzero(code)) == 3\r\n\r\n\r\ndef test_dict_learning_unknown_fit_algorithm():\r\n n_components = 5\r\n dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')\r\n with pytest.raises(ValueError):\r\n dico.fit(X)\r\n\r\n\r\ndef test_dict_learning_split():\r\n n_components = 5\r\n dico = DictionaryLearning(n_components, transform_algorithm='threshold',\r\n random_state=0)\r\n code = dico.fit(X).transform(X)\r\n dico.split_sign = True\r\n split_code = dico.transform(X)\r\n\r\n assert_array_almost_equal(split_code[:, :n_components] -\r\n split_code[:, n_components:], code)\r\n\r\n\r\ndef test_dict_learning_online_shapes():\r\n rng = np.random.RandomState(0)\r\n n_components = 8\r\n code, dictionary = dict_learning_online(X, n_components=n_components,\r\n alpha=1, random_state=rng)\r\n assert code.shape == (n_samples, n_components)\r\n assert dictionary.shape == (n_components, n_features)\r\n assert np.dot(code, dictionary).shape == X.shape\r\n\r\n\r\ndef test_dict_learning_online_lars_positive_parameter():\r\n alpha = 1\r\n err_msg = \"Positive constraint not supported for 'lars' coding method.\"\r\n with pytest.raises(ValueError, match=err_msg):\r\n dict_learning_online(X, alpha=alpha, positive_code=True)\r\n\r\n\r\[email protected](\"transform_algorithm\", [\r\n \"lasso_lars\",\r\n \"lasso_cd\",\r\n \"threshold\",\r\n])\r\[email protected](\"positive_code\", [False, True])\r\[email protected](\"positive_dict\", [False, True])\r\ndef test_minibatch_dictionary_learning_positivity(transform_algorithm,\r\n positive_code,\r\n positive_dict):\r\n n_components = 8\r\n dico = MiniBatchDictionaryLearning(\r\n n_components, transform_algorithm=transform_algorithm, random_state=0,\r\n positive_code=positive_code, positive_dict=positive_dict,\r\n fit_algorithm='cd').fit(X)\r\n\r\n code = dico.transform(X)\r\n if positive_dict:\r\n assert (dico.components_ >= 0).all()\r\n else:\r\n assert (dico.components_ < 0).any()\r\n if positive_code:\r\n assert (code >= 0).all()\r\n else:\r\n assert (code < 0).any()\r\n\r\n\r\[email protected](\"positive_dict\", [False, True])\r\ndef test_minibatch_dictionary_learning_lars(positive_dict):\r\n n_components = 8\r\n\r\n dico = MiniBatchDictionaryLearning(\r\n n_components, transform_algorithm=\"lars\", random_state=0,\r\n positive_dict=positive_dict, fit_algorithm='cd').fit(X)\r\n\r\n if positive_dict:\r\n assert (dico.components_ >= 0).all()\r\n else:\r\n assert (dico.components_ < 0).any()\r\n\r\n\r\[email protected](\"positive_code\", [False, True])\r\[email protected](\"positive_dict\", [False, True])\r\ndef test_dict_learning_online_positivity(positive_code,\r\n positive_dict):\r\n rng = np.random.RandomState(0)\r\n n_components = 8\r\n\r\n code, dictionary = dict_learning_online(X, n_components=n_components,\r\n method=\"cd\",\r\n alpha=1, random_state=rng,\r\n positive_dict=positive_dict,\r\n positive_code=positive_code)\r\n if positive_dict:\r\n assert (dictionary >= 0).all()\r\n else:\r\n assert (dictionary < 0).any()\r\n if positive_code:\r\n assert (code >= 0).all()\r\n else:\r\n assert (code < 0).any()\r\n\r\n\r\ndef test_dict_learning_online_verbosity():\r\n n_components = 5\r\n # test verbosity\r\n from io import StringIO\r\n import sys\r\n\r\n old_stdout = sys.stdout\r\n try:\r\n sys.stdout = StringIO()\r\n dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,\r\n random_state=0)\r\n dico.fit(X)\r\n dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,\r\n random_state=0)\r\n dico.fit(X)\r\n dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,\r\n random_state=0)\r\n dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,\r\n random_state=0)\r\n finally:\r\n sys.stdout = old_stdout\r\n\r\n assert dico.components_.shape == (n_components, n_features)\r\n\r\n\r\ndef test_dict_learning_online_estimator_shapes():\r\n n_components = 5\r\n dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)\r\n dico.fit(X)\r\n assert dico.components_.shape == (n_components, n_features)\r\n\r\n\r\ndef test_dict_learning_online_overcomplete():\r\n n_components = 12\r\n dico = MiniBatchDictionaryLearning(n_components, n_iter=20,\r\n random_state=0).fit(X)\r\n assert dico.components_.shape == (n_components, n_features)\r\n\r\n\r\ndef test_dict_learning_online_initialization():\r\n n_components = 12\r\n rng = np.random.RandomState(0)\r\n V = rng.randn(n_components, n_features)\r\n dico = MiniBatchDictionaryLearning(n_components, n_iter=0,\r\n dict_init=V, random_state=0).fit(X)\r\n assert_array_equal(dico.components_, V)\r\n\r\n\r\ndef test_dict_learning_online_readonly_initialization():\r\n n_components = 12\r\n rng = np.random.RandomState(0)\r\n V = rng.randn(n_components, n_features)\r\n V.setflags(write=False)\r\n MiniBatchDictionaryLearning(n_components, n_iter=1, dict_init=V,\r\n random_state=0, shuffle=False).fit(X)\r\n\r\n\r\ndef test_dict_learning_online_partial_fit():\r\n n_components = 12\r\n rng = np.random.RandomState(0)\r\n V = rng.randn(n_components, n_features) # random init\r\n V /= np.sum(V ** 2, axis=1)[:, np.newaxis]\r\n dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),\r\n batch_size=1,\r\n alpha=1, shuffle=False, dict_init=V,\r\n random_state=0).fit(X)\r\n dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,\r\n n_iter=1, dict_init=V,\r\n random_state=0)\r\n for i in range(10):\r\n for sample in X:\r\n dict2.partial_fit(sample[np.newaxis, :])\r\n\r\n assert not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0)\r\n assert_array_almost_equal(dict1.components_, dict2.components_,\r\n decimal=2)\r\n\r\n\r\ndef test_dict_learning_iter_offset():\r\n n_components = 12\r\n rng = np.random.RandomState(0)\r\n V = rng.randn(n_components, n_features)\r\n dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10,\r\n dict_init=V, random_state=0,\r\n shuffle=False)\r\n dict2 = MiniBatchDictionaryLearning(n_components, n_iter=10,\r\n dict_init=V, random_state=0,\r\n shuffle=False)\r\n dict1.fit(X)\r\n for sample in X:\r\n dict2.partial_fit(sample[np.newaxis, :])\r\n\r\n assert dict1.iter_offset_ == dict2.iter_offset_\r\n\r\n\r\ndef test_sparse_encode_shapes():\r\n n_components = 12\r\n rng = np.random.RandomState(0)\r\n V = rng.randn(n_components, n_features) # random init\r\n V /= np.sum(V ** 2, axis=1)[:, np.newaxis]\r\n for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):\r\n code = sparse_encode(X, V, algorithm=algo)\r\n assert code.shape == (n_samples, n_components)\r\n\r\n\r\[email protected](\"algo\", [\r\n 'lasso_lars',\r\n 'lasso_cd',\r\n 'threshold'\r\n])\r\[email protected](\"positive\", [False, True])\r\ndef test_sparse_encode_positivity(algo, positive):\r\n n_components = 12\r\n rng = np.random.RandomState(0)\r\n V = rng.randn(n_components, n_features) # random init\r\n V /= np.sum(V ** 2, axis=1)[:, np.newaxis]\r\n code = sparse_encode(X, V, algorithm=algo, positive=positive)\r\n if positive:\r\n assert (code >= 0).all()\r\n else:\r\n assert (code < 0).any()\r\n\r\n\r\[email protected](\"algo\", ['lars', 'omp'])\r\ndef test_sparse_encode_unavailable_positivity(algo):\r\n n_components = 12\r\n rng = np.random.RandomState(0)\r\n V = rng.randn(n_components, n_features) # random init\r\n V /= np.sum(V ** 2, axis=1)[:, np.newaxis]\r\n err_msg = \"Positive constraint not supported for '{}' coding method.\"\r\n err_msg = err_msg.format(algo)\r\n with pytest.raises(ValueError, match=err_msg):\r\n sparse_encode(X, V, algorithm=algo, positive=True)\r\n\r\n\r\ndef test_sparse_encode_input():\r\n n_components = 100\r\n rng = np.random.RandomState(0)\r\n V = rng.randn(n_components, n_features) # random init\r\n V /= np.sum(V ** 2, axis=1)[:, np.newaxis]\r\n Xf = check_array(X, order='F')\r\n for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):\r\n a = sparse_encode(X, V, algorithm=algo)\r\n b = sparse_encode(Xf, V, algorithm=algo)\r\n assert_array_almost_equal(a, b)\r\n\r\n\r\ndef test_sparse_encode_error():\r\n n_components = 12\r\n rng = np.random.RandomState(0)\r\n V = rng.randn(n_components, n_features) # random init\r\n V /= np.sum(V ** 2, axis=1)[:, np.newaxis]\r\n code = sparse_encode(X, V, alpha=0.001)\r\n assert not np.all(code == 0)\r\n assert np.sqrt(np.sum((np.dot(code, V) - X) ** 2)) < 0.1\r\n\r\n\r\ndef test_sparse_encode_error_default_sparsity():\r\n rng = np.random.RandomState(0)\r\n X = rng.randn(100, 64)\r\n D = rng.randn(2, 64)\r\n code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',\r\n n_nonzero_coefs=None)\r\n assert code.shape == (100, 2)\r\n\r\n\r\ndef test_unknown_method():\r\n n_components = 12\r\n rng = np.random.RandomState(0)\r\n V = rng.randn(n_components, n_features) # random init\r\n with pytest.raises(ValueError):\r\n sparse_encode(X, V, algorithm=\"<unknown>\")\r\n\r\n\r\ndef test_sparse_coder_estimator():\r\n n_components = 12\r\n rng = np.random.RandomState(0)\r\n V = rng.randn(n_components, n_features) # random init\r\n V /= np.sum(V ** 2, axis=1)[:, np.newaxis]\r\n coder = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',\r\n transform_alpha=0.001).transform(X)\r\n assert not np.all(coder == 0)\r\n assert np.sqrt(np.sum((np.dot(coder, V) - X) ** 2)) < 0.1\r\n\r\n\r\ndef test_sparse_coder_estimator_clone():\r\n n_components = 12\r\n rng = np.random.RandomState(0)\r\n V = rng.randn(n_components, n_features) # random init\r\n V /= np.sum(V ** 2, axis=1)[:, np.newaxis]\r\n coder = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',\r\n transform_alpha=0.001)\r\n cloned = clone(coder)\r\n assert id(cloned) != id(coder)\r\n np.testing.assert_allclose(cloned.dictionary, coder.dictionary)\r\n assert id(cloned.dictionary) != id(coder.dictionary)\r\n assert cloned.n_components_ == coder.n_components_\r\n assert cloned.n_features_in_ == coder.n_features_in_\r\n data = np.random.rand(n_samples, n_features).astype(np.float32)\r\n np.testing.assert_allclose(cloned.transform(data),\r\n coder.transform(data))\r\n\r\n\r\ndef test_sparse_coder_parallel_mmap():\r\n # Non-regression test for:\r\n # https://github.com/scikit-learn/scikit-learn/issues/5956\r\n # Test that SparseCoder does not error by passing reading only\r\n # arrays to child processes\r\n\r\n rng = np.random.RandomState(777)\r\n n_components, n_features = 40, 64\r\n init_dict = rng.rand(n_components, n_features)\r\n # Ensure that `data` is >2M. Joblib memory maps arrays\r\n # if they are larger than 1MB. The 4 accounts for float32\r\n # data type\r\n n_samples = int(2e6) // (4 * n_features)\r\n data = np.random.rand(n_samples, n_features).astype(np.float32)\r\n\r\n sc = SparseCoder(init_dict, transform_algorithm='omp', n_jobs=2)\r\n sc.fit_transform(data)\r\n\r\n\r\ndef test_sparse_coder_common_transformer():\r\n rng = np.random.RandomState(777)\r\n n_components, n_features = 40, 3\r\n init_dict = rng.rand(n_components, n_features)\r\n\r\n sc = SparseCoder(init_dict)\r\n\r\n check_transformer_data_not_an_array(sc.__class__.__name__, sc)\r\n check_transformer_general(sc.__class__.__name__, sc)\r\n check_transformer_general_memmap = partial(\r\n check_transformer_general, readonly_memmap=True\r\n )\r\n check_transformer_general_memmap(sc.__class__.__name__, sc)\r\n check_transformers_unfitted(sc.__class__.__name__, sc)\r\n\r\n\r\n# TODO: remove in 1.1\r\ndef test_sparse_coder_deprecation():\r\n # check that we raise a deprecation warning when accessing `components_`\r\n rng = np.random.RandomState(777)\r\n n_components, n_features = 40, 64\r\n init_dict = rng.rand(n_components, n_features)\r\n sc = SparseCoder(init_dict)\r\n\r\n with pytest.warns(FutureWarning, match=\"'components_' is deprecated\"):\r\n sc.components_\r\n\r\n\r\ndef test_sparse_coder_n_features_in():\r\n d = np.array([[1, 2, 3], [1, 2, 3]])\r\n sc = SparseCoder(d)\r\n assert sc.n_features_in_ == d.shape[1]\r\n", "\"\"\"Modified Olivetti faces dataset.\r\n\r\nThe original database was available from (now defunct)\r\n\r\n https://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html\r\n\r\nThe version retrieved here comes in MATLAB format from the personal\r\nweb page of Sam Roweis:\r\n\r\n https://cs.nyu.edu/~roweis/\r\n\"\"\"\r\n\r\n# Copyright (c) 2011 David Warde-Farley <wardefar at iro dot umontreal dot ca>\r\n# License: BSD 3 clause\r\n\r\nfrom os.path import dirname, exists, join\r\nfrom os import makedirs, remove\r\n\r\nimport numpy as np\r\nfrom scipy.io.matlab import loadmat\r\nimport joblib\r\n\r\nfrom . import get_data_home\r\nfrom ._base import _fetch_remote\r\nfrom ._base import RemoteFileMetadata\r\nfrom ._base import _pkl_filepath\r\nfrom ..utils import check_random_state, Bunch\r\nfrom ..utils.validation import _deprecate_positional_args\r\n\r\n# The original data can be found at:\r\n# https://cs.nyu.edu/~roweis/data/olivettifaces.mat\r\nFACES = RemoteFileMetadata(\r\n filename='olivettifaces.mat',\r\n url='https://ndownloader.figshare.com/files/5976027',\r\n checksum=('b612fb967f2dc77c9c62d3e1266e0c73'\r\n 'd5fca46a4b8906c18e454d41af987794'))\r\n\r\n\r\n@_deprecate_positional_args\r\ndef fetch_olivetti_faces(*, data_home=None, shuffle=False, random_state=0,\r\n download_if_missing=True, return_X_y=False):\r\n \"\"\"Load the Olivetti faces data-set from AT&T (classification).\r\n\r\n Download it if necessary.\r\n\r\n ================= =====================\r\n Classes 40\r\n Samples total 400\r\n Dimensionality 4096\r\n Features real, between 0 and 1\r\n ================= =====================\r\n\r\n Read more in the :ref:`User Guide <olivetti_faces_dataset>`.\r\n\r\n Parameters\r\n ----------\r\n data_home : str, default=None\r\n Specify another download and cache folder for the datasets. By default\r\n all scikit-learn data is stored in '~/scikit_learn_data' subfolders.\r\n\r\n shuffle : bool, default=False\r\n If True the order of the dataset is shuffled to avoid having\r\n images of the same person grouped.\r\n\r\n random_state : int, RandomState instance or None, default=0\r\n Determines random number generation for dataset shuffling. Pass an int\r\n for reproducible output across multiple function calls.\r\n See :term:`Glossary <random_state>`.\r\n\r\n download_if_missing : bool, default=True\r\n If False, raise a IOError if the data is not locally available\r\n instead of trying to download the data from the source site.\r\n\r\n return_X_y : bool, default=False\r\n If True, returns `(data, target)` instead of a `Bunch` object. See\r\n below for more information about the `data` and `target` object.\r\n\r\n .. versionadded:: 0.22\r\n\r\n Returns\r\n -------\r\n data : :class:`~sklearn.utils.Bunch`\r\n Dictionary-like object, with the following attributes.\r\n\r\n data: ndarray, shape (400, 4096)\r\n Each row corresponds to a ravelled\r\n face image of original size 64 x 64 pixels.\r\n images : ndarray, shape (400, 64, 64)\r\n Each row is a face image\r\n corresponding to one of the 40 subjects of the dataset.\r\n target : ndarray, shape (400,)\r\n Labels associated to each face image.\r\n Those labels are ranging from 0-39 and correspond to the\r\n Subject IDs.\r\n DESCR : str\r\n Description of the modified Olivetti Faces Dataset.\r\n\r\n (data, target) : tuple if `return_X_y=True`\r\n .. versionadded:: 0.22\r\n \"\"\"\r\n data_home = get_data_home(data_home=data_home)\r\n if not exists(data_home):\r\n makedirs(data_home)\r\n filepath = _pkl_filepath(data_home, 'olivetti.pkz')\r\n if not exists(filepath):\r\n if not download_if_missing:\r\n raise IOError(\"Data not found and `download_if_missing` is False\")\r\n\r\n print('downloading Olivetti faces from %s to %s'\r\n % (FACES.url, data_home))\r\n mat_path = _fetch_remote(FACES, dirname=data_home)\r\n mfile = loadmat(file_name=mat_path)\r\n # delete raw .mat data\r\n remove(mat_path)\r\n\r\n faces = mfile['faces'].T.copy()\r\n joblib.dump(faces, filepath, compress=6)\r\n del mfile\r\n else:\r\n faces = joblib.load(filepath)\r\n\r\n # We want floating point data, but float32 is enough (there is only\r\n # one byte of precision in the original uint8s anyway)\r\n faces = np.float32(faces)\r\n faces = faces - faces.min()\r\n faces /= faces.max()\r\n faces = faces.reshape((400, 64, 64)).transpose(0, 2, 1)\r\n # 10 images per class, 400 images total, each class is contiguous.\r\n target = np.array([i // 10 for i in range(400)])\r\n if shuffle:\r\n random_state = check_random_state(random_state)\r\n order = random_state.permutation(len(faces))\r\n faces = faces[order]\r\n target = target[order]\r\n faces_vectorized = faces.reshape(len(faces), -1)\r\n\r\n module_path = dirname(__file__)\r\n with open(join(module_path, 'descr', 'olivetti_faces.rst')) as rst_file:\r\n fdescr = rst_file.read()\r\n\r\n if return_X_y:\r\n return faces_vectorized, target\r\n\r\n return Bunch(data=faces_vectorized,\r\n images=faces,\r\n target=target,\r\n DESCR=fdescr)\r\n", "\"\"\" test fancy indexing & misc \"\"\"\r\n\r\nfrom datetime import datetime\r\nimport re\r\nimport weakref\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nfrom pandas.core.dtypes.common import is_float_dtype, is_integer_dtype\r\n\r\nimport pandas as pd\r\nfrom pandas import DataFrame, Index, NaT, Series\r\nimport pandas._testing as tm\r\nfrom pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice\r\nfrom pandas.tests.indexing.common import _mklbl\r\n\r\n# ------------------------------------------------------------------------\r\n# Indexing test cases\r\n\r\n\r\nclass TestFancy:\r\n \"\"\" pure get/set item & fancy indexing \"\"\"\r\n\r\n def test_setitem_ndarray_1d(self):\r\n # GH5508\r\n\r\n # len of indexer vs length of the 1d ndarray\r\n df = DataFrame(index=Index(np.arange(1, 11)))\r\n df[\"foo\"] = np.zeros(10, dtype=np.float64)\r\n df[\"bar\"] = np.zeros(10, dtype=complex)\r\n\r\n # invalid\r\n with pytest.raises(ValueError):\r\n df.loc[df.index[2:5], \"bar\"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])\r\n\r\n # valid\r\n df.loc[df.index[2:6], \"bar\"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])\r\n\r\n result = df.loc[df.index[2:6], \"bar\"]\r\n expected = Series(\r\n [2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6], name=\"bar\"\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n # dtype getting changed?\r\n df = DataFrame(index=Index(np.arange(1, 11)))\r\n df[\"foo\"] = np.zeros(10, dtype=np.float64)\r\n df[\"bar\"] = np.zeros(10, dtype=complex)\r\n\r\n with pytest.raises(ValueError):\r\n df[2:5] = np.arange(1, 4) * 1j\r\n\r\n @pytest.mark.parametrize(\r\n \"obj\",\r\n [\r\n lambda i: Series(np.arange(len(i)), index=i),\r\n lambda i: DataFrame(np.random.randn(len(i), len(i)), index=i, columns=i),\r\n ],\r\n ids=[\"Series\", \"DataFrame\"],\r\n )\r\n @pytest.mark.parametrize(\r\n \"idxr, idxr_id\",\r\n [\r\n (lambda x: x, \"getitem\"),\r\n (lambda x: x.loc, \"loc\"),\r\n (lambda x: x.iloc, \"iloc\"),\r\n ],\r\n )\r\n def test_getitem_ndarray_3d(self, index, obj, idxr, idxr_id):\r\n # GH 25567\r\n obj = obj(index)\r\n idxr = idxr(obj)\r\n nd3 = np.random.randint(5, size=(2, 2, 2))\r\n\r\n msg = \"|\".join(\r\n [\r\n r\"Buffer has wrong number of dimensions \\(expected 1, got 3\\)\",\r\n \"Cannot index with multidimensional key\",\r\n r\"Wrong number of dimensions. values.ndim != ndim \\[3 != 1\\]\",\r\n \"Index data must be 1-dimensional\",\r\n \"positional indexers are out-of-bounds\",\r\n \"Indexing a MultiIndex with a multidimensional key is not implemented\",\r\n ]\r\n )\r\n\r\n potential_errors = (IndexError, ValueError, NotImplementedError)\r\n with pytest.raises(potential_errors, match=msg):\r\n with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):\r\n idxr[nd3]\r\n\r\n @pytest.mark.parametrize(\r\n \"obj\",\r\n [\r\n lambda i: Series(np.arange(len(i)), index=i),\r\n lambda i: DataFrame(np.random.randn(len(i), len(i)), index=i, columns=i),\r\n ],\r\n ids=[\"Series\", \"DataFrame\"],\r\n )\r\n @pytest.mark.parametrize(\r\n \"idxr, idxr_id\",\r\n [\r\n (lambda x: x, \"setitem\"),\r\n (lambda x: x.loc, \"loc\"),\r\n (lambda x: x.iloc, \"iloc\"),\r\n ],\r\n )\r\n def test_setitem_ndarray_3d(self, index, obj, idxr, idxr_id):\r\n # GH 25567\r\n obj = obj(index)\r\n idxr = idxr(obj)\r\n nd3 = np.random.randint(5, size=(2, 2, 2))\r\n\r\n if (len(index) == 0) and (idxr_id == \"iloc\") and isinstance(obj, pd.DataFrame):\r\n # gh-32896\r\n pytest.skip(\"This is currently failing. There's an xfailed test below.\")\r\n\r\n if idxr_id == \"iloc\":\r\n err = ValueError\r\n msg = f\"Cannot set values with ndim > {obj.ndim}\"\r\n elif (\r\n isinstance(index, pd.IntervalIndex)\r\n and idxr_id == \"setitem\"\r\n and obj.ndim == 1\r\n ):\r\n err = AttributeError\r\n msg = (\r\n \"'pandas._libs.interval.IntervalTree' object has no attribute 'get_loc'\"\r\n )\r\n else:\r\n err = ValueError\r\n msg = r\"Buffer has wrong number of dimensions \\(expected 1, got 3\\)|\"\r\n\r\n with pytest.raises(err, match=msg):\r\n idxr[nd3] = 0\r\n\r\n @pytest.mark.xfail(reason=\"gh-32896\")\r\n def test_setitem_ndarray_3d_does_not_fail_for_iloc_empty_dataframe(self):\r\n # when fixing this, please remove the pytest.skip in test_setitem_ndarray_3d\r\n i = Index([])\r\n obj = DataFrame(np.random.randn(len(i), len(i)), index=i, columns=i)\r\n nd3 = np.random.randint(5, size=(2, 2, 2))\r\n\r\n msg = f\"Cannot set values with ndim > {obj.ndim}\"\r\n with pytest.raises(ValueError, match=msg):\r\n obj.iloc[nd3] = 0\r\n\r\n def test_inf_upcast(self):\r\n # GH 16957\r\n # We should be able to use np.inf as a key\r\n # np.inf should cause an index to convert to float\r\n\r\n # Test with np.inf in rows\r\n df = DataFrame(columns=[0])\r\n df.loc[1] = 1\r\n df.loc[2] = 2\r\n df.loc[np.inf] = 3\r\n\r\n # make sure we can look up the value\r\n assert df.loc[np.inf, 0] == 3\r\n\r\n result = df.index\r\n expected = pd.Float64Index([1, 2, np.inf])\r\n tm.assert_index_equal(result, expected)\r\n\r\n # Test with np.inf in columns\r\n df = DataFrame()\r\n df.loc[0, 0] = 1\r\n df.loc[1, 1] = 2\r\n df.loc[0, np.inf] = 3\r\n\r\n result = df.columns\r\n expected = pd.Float64Index([0, 1, np.inf])\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_setitem_dtype_upcast(self):\r\n\r\n # GH3216\r\n df = DataFrame([{\"a\": 1}, {\"a\": 3, \"b\": 2}])\r\n df[\"c\"] = np.nan\r\n assert df[\"c\"].dtype == np.float64\r\n\r\n df.loc[0, \"c\"] = \"foo\"\r\n expected = DataFrame(\r\n [{\"a\": 1, \"b\": np.nan, \"c\": \"foo\"}, {\"a\": 3, \"b\": 2, \"c\": np.nan}]\r\n )\r\n tm.assert_frame_equal(df, expected)\r\n\r\n # GH10280\r\n df = DataFrame(\r\n np.arange(6, dtype=\"int64\").reshape(2, 3),\r\n index=list(\"ab\"),\r\n columns=[\"foo\", \"bar\", \"baz\"],\r\n )\r\n\r\n for val in [3.14, \"wxyz\"]:\r\n left = df.copy()\r\n left.loc[\"a\", \"bar\"] = val\r\n right = DataFrame(\r\n [[0, val, 2], [3, 4, 5]],\r\n index=list(\"ab\"),\r\n columns=[\"foo\", \"bar\", \"baz\"],\r\n )\r\n\r\n tm.assert_frame_equal(left, right)\r\n assert is_integer_dtype(left[\"foo\"])\r\n assert is_integer_dtype(left[\"baz\"])\r\n\r\n left = DataFrame(\r\n np.arange(6, dtype=\"int64\").reshape(2, 3) / 10.0,\r\n index=list(\"ab\"),\r\n columns=[\"foo\", \"bar\", \"baz\"],\r\n )\r\n left.loc[\"a\", \"bar\"] = \"wxyz\"\r\n\r\n right = DataFrame(\r\n [[0, \"wxyz\", 0.2], [0.3, 0.4, 0.5]],\r\n index=list(\"ab\"),\r\n columns=[\"foo\", \"bar\", \"baz\"],\r\n )\r\n\r\n tm.assert_frame_equal(left, right)\r\n assert is_float_dtype(left[\"foo\"])\r\n assert is_float_dtype(left[\"baz\"])\r\n\r\n def test_dups_fancy_indexing(self):\r\n\r\n # GH 3455\r\n\r\n df = tm.makeCustomDataframe(10, 3)\r\n df.columns = [\"a\", \"a\", \"b\"]\r\n result = df[[\"b\", \"a\"]].columns\r\n expected = Index([\"b\", \"a\", \"a\"])\r\n tm.assert_index_equal(result, expected)\r\n\r\n # across dtypes\r\n df = DataFrame([[1, 2, 1.0, 2.0, 3.0, \"foo\", \"bar\"]], columns=list(\"aaaaaaa\"))\r\n df.head()\r\n str(df)\r\n result = DataFrame([[1, 2, 1.0, 2.0, 3.0, \"foo\", \"bar\"]])\r\n result.columns = list(\"aaaaaaa\")\r\n\r\n # TODO(wesm): unused?\r\n df_v = df.iloc[:, 4] # noqa\r\n res_v = result.iloc[:, 4] # noqa\r\n\r\n tm.assert_frame_equal(df, result)\r\n\r\n # GH 3561, dups not in selected order\r\n df = DataFrame(\r\n {\"test\": [5, 7, 9, 11], \"test1\": [4.0, 5, 6, 7], \"other\": list(\"abcd\")},\r\n index=[\"A\", \"A\", \"B\", \"C\"],\r\n )\r\n rows = [\"C\", \"B\"]\r\n expected = DataFrame(\r\n {\"test\": [11, 9], \"test1\": [7.0, 6], \"other\": [\"d\", \"c\"]}, index=rows\r\n )\r\n result = df.loc[rows]\r\n tm.assert_frame_equal(result, expected)\r\n\r\n result = df.loc[Index(rows)]\r\n tm.assert_frame_equal(result, expected)\r\n\r\n rows = [\"C\", \"B\", \"E\"]\r\n with pytest.raises(KeyError, match=\"with any missing labels\"):\r\n df.loc[rows]\r\n\r\n # see GH5553, make sure we use the right indexer\r\n rows = [\"F\", \"G\", \"H\", \"C\", \"B\", \"E\"]\r\n with pytest.raises(KeyError, match=\"with any missing labels\"):\r\n df.loc[rows]\r\n\r\n # List containing only missing label\r\n dfnu = DataFrame(np.random.randn(5, 3), index=list(\"AABCD\"))\r\n with pytest.raises(\r\n KeyError,\r\n match=re.escape(\r\n \"\\\"None of [Index(['E'], dtype='object')] are in the [index]\\\"\"\r\n ),\r\n ):\r\n dfnu.loc[[\"E\"]]\r\n\r\n # ToDo: check_index_type can be True after GH 11497\r\n\r\n # GH 4619; duplicate indexer with missing label\r\n df = DataFrame({\"A\": [0, 1, 2]})\r\n with pytest.raises(KeyError, match=\"with any missing labels\"):\r\n df.loc[[0, 8, 0]]\r\n\r\n df = DataFrame({\"A\": list(\"abc\")})\r\n with pytest.raises(KeyError, match=\"with any missing labels\"):\r\n df.loc[[0, 8, 0]]\r\n\r\n # non unique with non unique selector\r\n df = DataFrame({\"test\": [5, 7, 9, 11]}, index=[\"A\", \"A\", \"B\", \"C\"])\r\n with pytest.raises(KeyError, match=\"with any missing labels\"):\r\n df.loc[[\"A\", \"A\", \"E\"]]\r\n\r\n def test_dups_fancy_indexing2(self):\r\n # GH 5835\r\n # dups on index and missing values\r\n df = DataFrame(np.random.randn(5, 5), columns=[\"A\", \"B\", \"B\", \"B\", \"A\"])\r\n\r\n with pytest.raises(KeyError, match=\"with any missing labels\"):\r\n df.loc[:, [\"A\", \"B\", \"C\"]]\r\n\r\n # GH 6504, multi-axis indexing\r\n df = DataFrame(\r\n np.random.randn(9, 2), index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=[\"a\", \"b\"]\r\n )\r\n\r\n expected = df.iloc[0:6]\r\n result = df.loc[[1, 2]]\r\n tm.assert_frame_equal(result, expected)\r\n\r\n expected = df\r\n result = df.loc[:, [\"a\", \"b\"]]\r\n tm.assert_frame_equal(result, expected)\r\n\r\n expected = df.iloc[0:6, :]\r\n result = df.loc[[1, 2], [\"a\", \"b\"]]\r\n tm.assert_frame_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"case\", [lambda s: s, lambda s: s.loc])\r\n def test_duplicate_int_indexing(self, case):\r\n # GH 17347\r\n s = pd.Series(range(3), index=[1, 1, 3])\r\n expected = s[1]\r\n result = case(s)[[1]]\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_indexing_mixed_frame_bug(self):\r\n\r\n # GH3492\r\n df = DataFrame(\r\n {\"a\": {1: \"aaa\", 2: \"bbb\", 3: \"ccc\"}, \"b\": {1: 111, 2: 222, 3: 333}}\r\n )\r\n\r\n # this works, new column is created correctly\r\n df[\"test\"] = df[\"a\"].apply(lambda x: \"_\" if x == \"aaa\" else x)\r\n\r\n # this does not work, ie column test is not changed\r\n idx = df[\"test\"] == \"_\"\r\n temp = df.loc[idx, \"a\"].apply(lambda x: \"-----\" if x == \"aaa\" else x)\r\n df.loc[idx, \"test\"] = temp\r\n assert df.iloc[0, 2] == \"-----\"\r\n\r\n def test_multitype_list_index_access(self):\r\n # GH 10610\r\n df = DataFrame(np.random.random((10, 5)), columns=[\"a\"] + [20, 21, 22, 23])\r\n\r\n with pytest.raises(KeyError, match=re.escape(\"'[-8, 26] not in index'\")):\r\n df[[22, 26, -8]]\r\n assert df[21].shape[0] == df.shape[0]\r\n\r\n def test_set_index_nan(self):\r\n\r\n # GH 3586\r\n df = DataFrame(\r\n {\r\n \"PRuid\": {\r\n 17: \"nonQC\",\r\n 18: \"nonQC\",\r\n 19: \"nonQC\",\r\n 20: \"10\",\r\n 21: \"11\",\r\n 22: \"12\",\r\n 23: \"13\",\r\n 24: \"24\",\r\n 25: \"35\",\r\n 26: \"46\",\r\n 27: \"47\",\r\n 28: \"48\",\r\n 29: \"59\",\r\n 30: \"10\",\r\n },\r\n \"QC\": {\r\n 17: 0.0,\r\n 18: 0.0,\r\n 19: 0.0,\r\n 20: np.nan,\r\n 21: np.nan,\r\n 22: np.nan,\r\n 23: np.nan,\r\n 24: 1.0,\r\n 25: np.nan,\r\n 26: np.nan,\r\n 27: np.nan,\r\n 28: np.nan,\r\n 29: np.nan,\r\n 30: np.nan,\r\n },\r\n \"data\": {\r\n 17: 7.9544899999999998,\r\n 18: 8.0142609999999994,\r\n 19: 7.8591520000000008,\r\n 20: 0.86140349999999999,\r\n 21: 0.87853110000000001,\r\n 22: 0.8427041999999999,\r\n 23: 0.78587700000000005,\r\n 24: 0.73062459999999996,\r\n 25: 0.81668560000000001,\r\n 26: 0.81927080000000008,\r\n 27: 0.80705009999999999,\r\n 28: 0.81440240000000008,\r\n 29: 0.80140849999999997,\r\n 30: 0.81307740000000006,\r\n },\r\n \"year\": {\r\n 17: 2006,\r\n 18: 2007,\r\n 19: 2008,\r\n 20: 1985,\r\n 21: 1985,\r\n 22: 1985,\r\n 23: 1985,\r\n 24: 1985,\r\n 25: 1985,\r\n 26: 1985,\r\n 27: 1985,\r\n 28: 1985,\r\n 29: 1985,\r\n 30: 1986,\r\n },\r\n }\r\n ).reset_index()\r\n\r\n result = (\r\n df.set_index([\"year\", \"PRuid\", \"QC\"])\r\n .reset_index()\r\n .reindex(columns=df.columns)\r\n )\r\n tm.assert_frame_equal(result, df)\r\n\r\n def test_multi_assign(self):\r\n\r\n # GH 3626, an assignment of a sub-df to a df\r\n df = DataFrame(\r\n {\r\n \"FC\": [\"a\", \"b\", \"a\", \"b\", \"a\", \"b\"],\r\n \"PF\": [0, 0, 0, 0, 1, 1],\r\n \"col1\": list(range(6)),\r\n \"col2\": list(range(6, 12)),\r\n }\r\n )\r\n df.iloc[1, 0] = np.nan\r\n df2 = df.copy()\r\n\r\n mask = ~df2.FC.isna()\r\n cols = [\"col1\", \"col2\"]\r\n\r\n dft = df2 * 2\r\n dft.iloc[3, 3] = np.nan\r\n\r\n expected = DataFrame(\r\n {\r\n \"FC\": [\"a\", np.nan, \"a\", \"b\", \"a\", \"b\"],\r\n \"PF\": [0, 0, 0, 0, 1, 1],\r\n \"col1\": Series([0, 1, 4, 6, 8, 10]),\r\n \"col2\": [12, 7, 16, np.nan, 20, 22],\r\n }\r\n )\r\n\r\n # frame on rhs\r\n df2.loc[mask, cols] = dft.loc[mask, cols]\r\n tm.assert_frame_equal(df2, expected)\r\n\r\n df2.loc[mask, cols] = dft.loc[mask, cols]\r\n tm.assert_frame_equal(df2, expected)\r\n\r\n # with an ndarray on rhs\r\n # coerces to float64 because values has float64 dtype\r\n # GH 14001\r\n expected = DataFrame(\r\n {\r\n \"FC\": [\"a\", np.nan, \"a\", \"b\", \"a\", \"b\"],\r\n \"PF\": [0, 0, 0, 0, 1, 1],\r\n \"col1\": [0.0, 1.0, 4.0, 6.0, 8.0, 10.0],\r\n \"col2\": [12, 7, 16, np.nan, 20, 22],\r\n }\r\n )\r\n df2 = df.copy()\r\n df2.loc[mask, cols] = dft.loc[mask, cols].values\r\n tm.assert_frame_equal(df2, expected)\r\n df2.loc[mask, cols] = dft.loc[mask, cols].values\r\n tm.assert_frame_equal(df2, expected)\r\n\r\n # broadcasting on the rhs is required\r\n df = DataFrame(\r\n dict(\r\n A=[1, 2, 0, 0, 0],\r\n B=[0, 0, 0, 10, 11],\r\n C=[0, 0, 0, 10, 11],\r\n D=[3, 4, 5, 6, 7],\r\n )\r\n )\r\n\r\n expected = df.copy()\r\n mask = expected[\"A\"] == 0\r\n for col in [\"A\", \"B\"]:\r\n expected.loc[mask, col] = df[\"D\"]\r\n\r\n df.loc[df[\"A\"] == 0, [\"A\", \"B\"]] = df[\"D\"]\r\n tm.assert_frame_equal(df, expected)\r\n\r\n def test_setitem_list(self):\r\n\r\n # GH 6043\r\n # iloc with a list\r\n df = DataFrame(index=[0, 1], columns=[0])\r\n df.iloc[1, 0] = [1, 2, 3]\r\n df.iloc[1, 0] = [1, 2]\r\n\r\n result = DataFrame(index=[0, 1], columns=[0])\r\n result.iloc[1, 0] = [1, 2]\r\n\r\n tm.assert_frame_equal(result, df)\r\n\r\n # iloc with an object\r\n class TO:\r\n def __init__(self, value):\r\n self.value = value\r\n\r\n def __str__(self) -> str:\r\n return f\"[{self.value}]\"\r\n\r\n __repr__ = __str__\r\n\r\n def __eq__(self, other) -> bool:\r\n return self.value == other.value\r\n\r\n def view(self):\r\n return self\r\n\r\n df = DataFrame(index=[0, 1], columns=[0])\r\n df.iloc[1, 0] = TO(1)\r\n df.iloc[1, 0] = TO(2)\r\n\r\n result = DataFrame(index=[0, 1], columns=[0])\r\n result.iloc[1, 0] = TO(2)\r\n\r\n tm.assert_frame_equal(result, df)\r\n\r\n # remains object dtype even after setting it back\r\n df = DataFrame(index=[0, 1], columns=[0])\r\n df.iloc[1, 0] = TO(1)\r\n df.iloc[1, 0] = np.nan\r\n result = DataFrame(index=[0, 1], columns=[0])\r\n\r\n tm.assert_frame_equal(result, df)\r\n\r\n def test_string_slice(self):\r\n # GH 14424\r\n # string indexing against datetimelike with object\r\n # dtype should properly raises KeyError\r\n df = DataFrame([1], Index([pd.Timestamp(\"2011-01-01\")], dtype=object))\r\n assert df.index.is_all_dates\r\n with pytest.raises(KeyError, match=\"'2011'\"):\r\n df[\"2011\"]\r\n\r\n with pytest.raises(KeyError, match=\"'2011'\"):\r\n df.loc[\"2011\", 0]\r\n\r\n df = DataFrame()\r\n assert not df.index.is_all_dates\r\n with pytest.raises(KeyError, match=\"'2011'\"):\r\n df[\"2011\"]\r\n\r\n with pytest.raises(KeyError, match=\"'2011'\"):\r\n df.loc[\"2011\", 0]\r\n\r\n def test_astype_assignment(self):\r\n\r\n # GH4312 (iloc)\r\n df_orig = DataFrame(\r\n [[\"1\", \"2\", \"3\", \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\r\n )\r\n\r\n df = df_orig.copy()\r\n df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)\r\n expected = DataFrame(\r\n [[1, 2, \"3\", \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\r\n )\r\n tm.assert_frame_equal(df, expected)\r\n\r\n df = df_orig.copy()\r\n df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)\r\n expected = DataFrame(\r\n [[1, 2, \"3\", \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\r\n )\r\n tm.assert_frame_equal(df, expected)\r\n\r\n # GH5702 (loc)\r\n df = df_orig.copy()\r\n df.loc[:, \"A\"] = df.loc[:, \"A\"].astype(np.int64)\r\n expected = DataFrame(\r\n [[1, \"2\", \"3\", \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\r\n )\r\n tm.assert_frame_equal(df, expected)\r\n\r\n df = df_orig.copy()\r\n df.loc[:, [\"B\", \"C\"]] = df.loc[:, [\"B\", \"C\"]].astype(np.int64)\r\n expected = DataFrame(\r\n [[\"1\", 2, 3, \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\r\n )\r\n tm.assert_frame_equal(df, expected)\r\n\r\n # full replacements / no nans\r\n df = DataFrame({\"A\": [1.0, 2.0, 3.0, 4.0]})\r\n df.iloc[:, 0] = df[\"A\"].astype(np.int64)\r\n expected = DataFrame({\"A\": [1, 2, 3, 4]})\r\n tm.assert_frame_equal(df, expected)\r\n\r\n df = DataFrame({\"A\": [1.0, 2.0, 3.0, 4.0]})\r\n df.loc[:, \"A\"] = df[\"A\"].astype(np.int64)\r\n expected = DataFrame({\"A\": [1, 2, 3, 4]})\r\n tm.assert_frame_equal(df, expected)\r\n\r\n def test_index_type_coercion(self):\r\n\r\n # GH 11836\r\n # if we have an index type and set it with something that looks\r\n # to numpy like the same, but is actually, not\r\n # (e.g. setting with a float or string '0')\r\n # then we need to coerce to object\r\n\r\n # integer indexes\r\n for s in [Series(range(5)), Series(range(5), index=range(1, 6))]:\r\n\r\n assert s.index.is_integer()\r\n\r\n for indexer in [lambda x: x.loc, lambda x: x]:\r\n s2 = s.copy()\r\n indexer(s2)[0.1] = 0\r\n assert s2.index.is_floating()\r\n assert indexer(s2)[0.1] == 0\r\n\r\n s2 = s.copy()\r\n indexer(s2)[0.0] = 0\r\n exp = s.index\r\n if 0 not in s:\r\n exp = Index(s.index.tolist() + [0])\r\n tm.assert_index_equal(s2.index, exp)\r\n\r\n s2 = s.copy()\r\n indexer(s2)[\"0\"] = 0\r\n assert s2.index.is_object()\r\n\r\n for s in [Series(range(5), index=np.arange(5.0))]:\r\n\r\n assert s.index.is_floating()\r\n\r\n for idxr in [lambda x: x.loc, lambda x: x]:\r\n\r\n s2 = s.copy()\r\n idxr(s2)[0.1] = 0\r\n assert s2.index.is_floating()\r\n assert idxr(s2)[0.1] == 0\r\n\r\n s2 = s.copy()\r\n idxr(s2)[0.0] = 0\r\n tm.assert_index_equal(s2.index, s.index)\r\n\r\n s2 = s.copy()\r\n idxr(s2)[\"0\"] = 0\r\n assert s2.index.is_object()\r\n\r\n\r\nclass TestMisc:\r\n def test_float_index_to_mixed(self):\r\n df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})\r\n df[\"a\"] = 10\r\n tm.assert_frame_equal(\r\n DataFrame({0.0: df[0.0], 1.0: df[1.0], \"a\": [10] * 10}), df\r\n )\r\n\r\n def test_float_index_non_scalar_assignment(self):\r\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [3, 4, 5]}, index=[1.0, 2.0, 3.0])\r\n df.loc[df.index[:2]] = 1\r\n expected = DataFrame({\"a\": [1, 1, 3], \"b\": [1, 1, 5]}, index=df.index)\r\n tm.assert_frame_equal(expected, df)\r\n\r\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [3, 4, 5]}, index=[1.0, 2.0, 3.0])\r\n df2 = df.copy()\r\n df.loc[df.index] = df.loc[df.index]\r\n tm.assert_frame_equal(df, df2)\r\n\r\n def test_float_index_at_iat(self):\r\n s = Series([1, 2, 3], index=[0.1, 0.2, 0.3])\r\n for el, item in s.items():\r\n assert s.at[el] == item\r\n for i in range(len(s)):\r\n assert s.iat[i] == i + 1\r\n\r\n def test_mixed_index_assignment(self):\r\n # GH 19860\r\n s = Series([1, 2, 3, 4, 5], index=[\"a\", \"b\", \"c\", 1, 2])\r\n s.at[\"a\"] = 11\r\n assert s.iat[0] == 11\r\n s.at[1] = 22\r\n assert s.iat[3] == 22\r\n\r\n def test_mixed_index_no_fallback(self):\r\n # GH 19860\r\n s = Series([1, 2, 3, 4, 5], index=[\"a\", \"b\", \"c\", 1, 2])\r\n with pytest.raises(KeyError, match=\"^0$\"):\r\n s.at[0]\r\n with pytest.raises(KeyError, match=\"^4$\"):\r\n s.at[4]\r\n\r\n def test_rhs_alignment(self):\r\n # GH8258, tests that both rows & columns are aligned to what is\r\n # assigned to. covers both uniform data-type & multi-type cases\r\n def run_tests(df, rhs, right):\r\n # label, index, slice\r\n lbl_one, idx_one, slice_one = list(\"bcd\"), [1, 2, 3], slice(1, 4)\r\n lbl_two, idx_two, slice_two = [\"joe\", \"jolie\"], [1, 2], slice(1, 3)\r\n\r\n left = df.copy()\r\n left.loc[lbl_one, lbl_two] = rhs\r\n tm.assert_frame_equal(left, right)\r\n\r\n left = df.copy()\r\n left.iloc[idx_one, idx_two] = rhs\r\n tm.assert_frame_equal(left, right)\r\n\r\n left = df.copy()\r\n left.iloc[slice_one, slice_two] = rhs\r\n tm.assert_frame_equal(left, right)\r\n\r\n xs = np.arange(20).reshape(5, 4)\r\n cols = [\"jim\", \"joe\", \"jolie\", \"joline\"]\r\n df = DataFrame(xs, columns=cols, index=list(\"abcde\"))\r\n\r\n # right hand side; permute the indices and multiplpy by -2\r\n rhs = -2 * df.iloc[3:0:-1, 2:0:-1]\r\n\r\n # expected `right` result; just multiply by -2\r\n right = df.copy()\r\n right.iloc[1:4, 1:3] *= -2\r\n\r\n # run tests with uniform dtypes\r\n run_tests(df, rhs, right)\r\n\r\n # make frames multi-type & re-run tests\r\n for frame in [df, rhs, right]:\r\n frame[\"joe\"] = frame[\"joe\"].astype(\"float64\")\r\n frame[\"jolie\"] = frame[\"jolie\"].map(\"@{0}\".format)\r\n\r\n run_tests(df, rhs, right)\r\n\r\n def test_str_label_slicing_with_negative_step(self):\r\n SLC = pd.IndexSlice\r\n\r\n def assert_slices_equivalent(l_slc, i_slc):\r\n tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])\r\n\r\n if not idx.is_integer:\r\n # For integer indices, .loc and plain getitem are position-based.\r\n tm.assert_series_equal(s[l_slc], s.iloc[i_slc])\r\n tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])\r\n\r\n for idx in [_mklbl(\"A\", 20), np.arange(20) + 100, np.linspace(100, 150, 20)]:\r\n idx = Index(idx)\r\n s = Series(np.arange(20), index=idx)\r\n assert_slices_equivalent(SLC[idx[9] :: -1], SLC[9::-1])\r\n assert_slices_equivalent(SLC[: idx[9] : -1], SLC[:8:-1])\r\n assert_slices_equivalent(SLC[idx[13] : idx[9] : -1], SLC[13:8:-1])\r\n assert_slices_equivalent(SLC[idx[9] : idx[13] : -1], SLC[:0])\r\n\r\n def test_slice_with_zero_step_raises(self):\r\n s = Series(np.arange(20), index=_mklbl(\"A\", 20))\r\n with pytest.raises(ValueError, match=\"slice step cannot be zero\"):\r\n s[::0]\r\n with pytest.raises(ValueError, match=\"slice step cannot be zero\"):\r\n s.loc[::0]\r\n\r\n def test_indexing_assignment_dict_already_exists(self):\r\n df = DataFrame({\"x\": [1, 2, 6], \"y\": [2, 2, 8], \"z\": [-5, 0, 5]}).set_index(\"z\")\r\n expected = df.copy()\r\n rhs = dict(x=9, y=99)\r\n df.loc[5] = rhs\r\n expected.loc[5] = [9, 99]\r\n tm.assert_frame_equal(df, expected)\r\n\r\n def test_indexing_dtypes_on_empty(self):\r\n # Check that .iloc returns correct dtypes GH9983\r\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [\"b\", \"b2\", \"b3\"]})\r\n df2 = df.iloc[[], :]\r\n\r\n assert df2.loc[:, \"a\"].dtype == np.int64\r\n tm.assert_series_equal(df2.loc[:, \"a\"], df2.iloc[:, 0])\r\n\r\n @pytest.mark.parametrize(\"size\", [5, 999999, 1000000])\r\n def test_range_in_series_indexing(self, size):\r\n # range can cause an indexing error\r\n # GH 11652\r\n s = Series(index=range(size), dtype=np.float64)\r\n s.loc[range(1)] = 42\r\n tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0]))\r\n\r\n s.loc[range(2)] = 43\r\n tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))\r\n\r\n @pytest.mark.parametrize(\r\n \"slc\",\r\n [\r\n pd.IndexSlice[:, :],\r\n pd.IndexSlice[:, 1],\r\n pd.IndexSlice[1, :],\r\n pd.IndexSlice[[1], [1]],\r\n pd.IndexSlice[1, [1]],\r\n pd.IndexSlice[[1], 1],\r\n pd.IndexSlice[1],\r\n pd.IndexSlice[1, 1],\r\n slice(None, None, None),\r\n [0, 1],\r\n np.array([0, 1]),\r\n Series([0, 1]),\r\n ],\r\n )\r\n def test_non_reducing_slice(self, slc):\r\n df = DataFrame([[0, 1], [2, 3]])\r\n\r\n tslice_ = _non_reducing_slice(slc)\r\n assert isinstance(df.loc[tslice_], DataFrame)\r\n\r\n def test_list_slice(self):\r\n # like dataframe getitem\r\n slices = [[\"A\"], Series([\"A\"]), np.array([\"A\"])]\r\n df = DataFrame({\"A\": [1, 2], \"B\": [3, 4]}, index=[\"A\", \"B\"])\r\n expected = pd.IndexSlice[:, [\"A\"]]\r\n for subset in slices:\r\n result = _non_reducing_slice(subset)\r\n tm.assert_frame_equal(df.loc[result], df.loc[expected])\r\n\r\n def test_maybe_numeric_slice(self):\r\n df = DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"], \"C\": [True, False]})\r\n result = _maybe_numeric_slice(df, slice_=None)\r\n expected = pd.IndexSlice[:, [\"A\"]]\r\n assert result == expected\r\n\r\n result = _maybe_numeric_slice(df, None, include_bool=True)\r\n expected = pd.IndexSlice[:, [\"A\", \"C\"]]\r\n result = _maybe_numeric_slice(df, [1])\r\n expected = [1]\r\n assert result == expected\r\n\r\n def test_partial_boolean_frame_indexing(self):\r\n # GH 17170\r\n df = DataFrame(\r\n np.arange(9.0).reshape(3, 3), index=list(\"abc\"), columns=list(\"ABC\")\r\n )\r\n index_df = DataFrame(1, index=list(\"ab\"), columns=list(\"AB\"))\r\n result = df[index_df.notnull()]\r\n expected = DataFrame(\r\n np.array([[0.0, 1.0, np.nan], [3.0, 4.0, np.nan], [np.nan] * 3]),\r\n index=list(\"abc\"),\r\n columns=list(\"ABC\"),\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_no_reference_cycle(self):\r\n df = DataFrame({\"a\": [0, 1], \"b\": [2, 3]})\r\n for name in (\"loc\", \"iloc\", \"at\", \"iat\"):\r\n getattr(df, name)\r\n wr = weakref.ref(df)\r\n del df\r\n assert wr() is None\r\n\r\n\r\nclass TestSeriesNoneCoercion:\r\n EXPECTED_RESULTS = [\r\n # For numeric series, we should coerce to NaN.\r\n ([1, 2, 3], [np.nan, 2, 3]),\r\n ([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),\r\n # For datetime series, we should coerce to NaT.\r\n (\r\n [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],\r\n [NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)],\r\n ),\r\n # For objects, we should preserve the None value.\r\n ([\"foo\", \"bar\", \"baz\"], [None, \"bar\", \"baz\"]),\r\n ]\r\n\r\n @pytest.mark.parametrize(\"start_data,expected_result\", EXPECTED_RESULTS)\r\n def test_coercion_with_setitem(self, start_data, expected_result):\r\n start_series = Series(start_data)\r\n start_series[0] = None\r\n\r\n expected_series = Series(expected_result)\r\n tm.assert_series_equal(start_series, expected_series)\r\n\r\n @pytest.mark.parametrize(\"start_data,expected_result\", EXPECTED_RESULTS)\r\n def test_coercion_with_loc_setitem(self, start_data, expected_result):\r\n start_series = Series(start_data)\r\n start_series.loc[0] = None\r\n\r\n expected_series = Series(expected_result)\r\n tm.assert_series_equal(start_series, expected_series)\r\n\r\n @pytest.mark.parametrize(\"start_data,expected_result\", EXPECTED_RESULTS)\r\n def test_coercion_with_setitem_and_series(self, start_data, expected_result):\r\n start_series = Series(start_data)\r\n start_series[start_series == start_series[0]] = None\r\n\r\n expected_series = Series(expected_result)\r\n tm.assert_series_equal(start_series, expected_series)\r\n\r\n @pytest.mark.parametrize(\"start_data,expected_result\", EXPECTED_RESULTS)\r\n def test_coercion_with_loc_and_series(self, start_data, expected_result):\r\n start_series = Series(start_data)\r\n start_series.loc[start_series == start_series[0]] = None\r\n\r\n expected_series = Series(expected_result)\r\n tm.assert_series_equal(start_series, expected_series)\r\n\r\n\r\nclass TestDataframeNoneCoercion:\r\n EXPECTED_SINGLE_ROW_RESULTS = [\r\n # For numeric series, we should coerce to NaN.\r\n ([1, 2, 3], [np.nan, 2, 3]),\r\n ([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),\r\n # For datetime series, we should coerce to NaT.\r\n (\r\n [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],\r\n [NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)],\r\n ),\r\n # For objects, we should preserve the None value.\r\n ([\"foo\", \"bar\", \"baz\"], [None, \"bar\", \"baz\"]),\r\n ]\r\n\r\n @pytest.mark.parametrize(\"expected\", EXPECTED_SINGLE_ROW_RESULTS)\r\n def test_coercion_with_loc(self, expected):\r\n start_data, expected_result = expected\r\n\r\n start_dataframe = DataFrame({\"foo\": start_data})\r\n start_dataframe.loc[0, [\"foo\"]] = None\r\n\r\n expected_dataframe = DataFrame({\"foo\": expected_result})\r\n tm.assert_frame_equal(start_dataframe, expected_dataframe)\r\n\r\n @pytest.mark.parametrize(\"expected\", EXPECTED_SINGLE_ROW_RESULTS)\r\n def test_coercion_with_setitem_and_dataframe(self, expected):\r\n start_data, expected_result = expected\r\n\r\n start_dataframe = DataFrame({\"foo\": start_data})\r\n start_dataframe[start_dataframe[\"foo\"] == start_dataframe[\"foo\"][0]] = None\r\n\r\n expected_dataframe = DataFrame({\"foo\": expected_result})\r\n tm.assert_frame_equal(start_dataframe, expected_dataframe)\r\n\r\n @pytest.mark.parametrize(\"expected\", EXPECTED_SINGLE_ROW_RESULTS)\r\n def test_none_coercion_loc_and_dataframe(self, expected):\r\n start_data, expected_result = expected\r\n\r\n start_dataframe = DataFrame({\"foo\": start_data})\r\n start_dataframe.loc[start_dataframe[\"foo\"] == start_dataframe[\"foo\"][0]] = None\r\n\r\n expected_dataframe = DataFrame({\"foo\": expected_result})\r\n tm.assert_frame_equal(start_dataframe, expected_dataframe)\r\n\r\n def test_none_coercion_mixed_dtypes(self):\r\n start_dataframe = DataFrame(\r\n {\r\n \"a\": [1, 2, 3],\r\n \"b\": [1.0, 2.0, 3.0],\r\n \"c\": [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],\r\n \"d\": [\"a\", \"b\", \"c\"],\r\n }\r\n )\r\n start_dataframe.iloc[0] = None\r\n\r\n exp = DataFrame(\r\n {\r\n \"a\": [np.nan, 2, 3],\r\n \"b\": [np.nan, 2.0, 3.0],\r\n \"c\": [NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)],\r\n \"d\": [None, \"b\", \"c\"],\r\n }\r\n )\r\n tm.assert_frame_equal(start_dataframe, exp)\r\n\r\n\r\ndef test_extension_array_cross_section():\r\n # A cross-section of a homogeneous EA should be an EA\r\n df = pd.DataFrame(\r\n {\r\n \"A\": pd.core.arrays.integer_array([1, 2]),\r\n \"B\": pd.core.arrays.integer_array([3, 4]),\r\n },\r\n index=[\"a\", \"b\"],\r\n )\r\n expected = pd.Series(\r\n pd.core.arrays.integer_array([1, 3]), index=[\"A\", \"B\"], name=\"a\"\r\n )\r\n result = df.loc[\"a\"]\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = df.iloc[0]\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_extension_array_cross_section_converts():\r\n # all numeric columns -> numeric series\r\n df = pd.DataFrame(\r\n {\"A\": pd.array([1, 2], dtype=\"Int64\"), \"B\": np.array([1, 2])}, index=[\"a\", \"b\"],\r\n )\r\n result = df.loc[\"a\"]\r\n expected = pd.Series([1, 1], dtype=\"Int64\", index=[\"A\", \"B\"], name=\"a\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = df.iloc[0]\r\n tm.assert_series_equal(result, expected)\r\n\r\n # mixed columns -> object series\r\n df = pd.DataFrame(\r\n {\"A\": pd.array([1, 2], dtype=\"Int64\"), \"B\": np.array([\"a\", \"b\"])},\r\n index=[\"a\", \"b\"],\r\n )\r\n result = df.loc[\"a\"]\r\n expected = pd.Series([1, \"a\"], dtype=object, index=[\"A\", \"B\"], name=\"a\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = df.iloc[0]\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_readonly_indices():\r\n # GH#17192 iloc with read-only array raising TypeError\r\n df = pd.DataFrame({\"data\": np.ones(100, dtype=\"float64\")})\r\n indices = np.array([1, 3, 6])\r\n indices.flags.writeable = False\r\n\r\n result = df.iloc[indices]\r\n expected = df.loc[[1, 3, 6]]\r\n tm.assert_frame_equal(result, expected)\r\n\r\n result = df[\"data\"].iloc[indices]\r\n expected = df[\"data\"].loc[[1, 3, 6]]\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_1tuple_without_multiindex():\r\n ser = pd.Series(range(5))\r\n key = (slice(3),)\r\n\r\n result = ser[key]\r\n expected = ser[key[0]]\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_duplicate_index_mistyped_key_raises_keyerror():\r\n # GH#29189 float_index.get_loc(None) should raise KeyError, not TypeError\r\n ser = pd.Series([2, 5, 6, 8], index=[2.0, 4.0, 4.0, 5.0])\r\n with pytest.raises(KeyError):\r\n ser[None]\r\n\r\n with pytest.raises(KeyError):\r\n ser.index.get_loc(None)\r\n\r\n with pytest.raises(KeyError):\r\n ser.index._engine.get_loc(None)\r\n\r\n\r\ndef test_setitem_with_bool_mask_and_values_matching_n_trues_in_length():\r\n # GH 30567\r\n ser = pd.Series([None] * 10)\r\n mask = [False] * 3 + [True] * 5 + [False] * 2\r\n ser[mask] = range(5)\r\n result = ser\r\n expected = pd.Series([None] * 3 + list(range(5)) + [None] * 2).astype(\"object\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_missing_labels_inside_loc_matched_in_error_message():\r\n # GH34272\r\n s = pd.Series({\"a\": 1, \"b\": 2, \"c\": 3})\r\n error_message_regex = \"missing_0.*missing_1.*missing_2\"\r\n with pytest.raises(KeyError, match=error_message_regex):\r\n s.loc[[\"a\", \"b\", \"missing_0\", \"c\", \"missing_1\", \"missing_2\"]]\r\n\r\n\r\ndef test_many_missing_labels_inside_loc_error_message_limited():\r\n # GH34272\r\n n = 10000\r\n missing_labels = [f\"missing_{label}\" for label in range(n)]\r\n s = pd.Series({\"a\": 1, \"b\": 2, \"c\": 3})\r\n # regex checks labels between 4 and 9995 are replaced with ellipses\r\n error_message_regex = \"missing_4.*\\\\.\\\\.\\\\..*missing_9995\"\r\n with pytest.raises(KeyError, match=error_message_regex):\r\n s.loc[[\"a\", \"c\"] + missing_labels]\r\n\r\n\r\ndef test_long_text_missing_labels_inside_loc_error_message_limited():\r\n # GH34272\r\n s = pd.Series({\"a\": 1, \"b\": 2, \"c\": 3})\r\n missing_labels = [f\"long_missing_label_text_{i}\" * 5 for i in range(3)]\r\n # regex checks for very long labels there are new lines between each\r\n error_message_regex = \"long_missing_label_text_0.*\\\\\\\\n.*long_missing_label_text_1\"\r\n with pytest.raises(KeyError, match=error_message_regex):\r\n s.loc[[\"a\", \"c\"] + missing_labels]\r\n\r\n\r\ndef test_setitem_categorical():\r\n # https://github.com/pandas-dev/pandas/issues/35369\r\n df = pd.DataFrame({\"h\": pd.Series(list(\"mn\")).astype(\"category\")})\r\n df.h = df.h.cat.reorder_categories([\"n\", \"m\"])\r\n expected = pd.DataFrame(\r\n {\"h\": pd.Categorical([\"m\", \"n\"]).reorder_categories([\"n\", \"m\"])}\r\n )\r\n tm.assert_frame_equal(df, expected)\r\n", "\"\"\"\r\ntimedelta support tools\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\nfrom pandas._libs.tslibs import NaT\r\nfrom pandas._libs.tslibs.timedeltas import Timedelta, parse_timedelta_unit\r\n\r\nfrom pandas.core.dtypes.common import is_list_like\r\nfrom pandas.core.dtypes.generic import ABCIndexClass, ABCSeries\r\n\r\nfrom pandas.core.arrays.timedeltas import sequence_to_td64ns\r\n\r\n\r\ndef to_timedelta(arg, unit=None, errors=\"raise\"):\r\n \"\"\"\r\n Convert argument to timedelta.\r\n\r\n Timedeltas are absolute differences in times, expressed in difference\r\n units (e.g. days, hours, minutes, seconds). This method converts\r\n an argument from a recognized timedelta format / value into\r\n a Timedelta type.\r\n\r\n Parameters\r\n ----------\r\n arg : str, timedelta, list-like or Series\r\n The data to be converted to timedelta.\r\n unit : str, optional\r\n Denotes the unit of the arg for numeric `arg`. Defaults to ``\"ns\"``.\r\n\r\n Possible values:\r\n\r\n * 'W'\r\n * 'D' / 'days' / 'day'\r\n * 'hours' / 'hour' / 'hr' / 'h'\r\n * 'm' / 'minute' / 'min' / 'minutes' / 'T'\r\n * 'S' / 'seconds' / 'sec' / 'second'\r\n * 'ms' / 'milliseconds' / 'millisecond' / 'milli' / 'millis' / 'L'\r\n * 'us' / 'microseconds' / 'microsecond' / 'micro' / 'micros' / 'U'\r\n * 'ns' / 'nanoseconds' / 'nano' / 'nanos' / 'nanosecond' / 'N'\r\n\r\n .. versionchanged:: 1.1.0\r\n\r\n Must not be specified when `arg` context strings and\r\n ``errors=\"raise\"``.\r\n\r\n errors : {'ignore', 'raise', 'coerce'}, default 'raise'\r\n - If 'raise', then invalid parsing will raise an exception.\r\n - If 'coerce', then invalid parsing will be set as NaT.\r\n - If 'ignore', then invalid parsing will return the input.\r\n\r\n Returns\r\n -------\r\n timedelta64 or numpy.array of timedelta64\r\n Output type returned if parsing succeeded.\r\n\r\n See Also\r\n --------\r\n DataFrame.astype : Cast argument to a specified dtype.\r\n to_datetime : Convert argument to datetime.\r\n convert_dtypes : Convert dtypes.\r\n\r\n Examples\r\n --------\r\n Parsing a single string to a Timedelta:\r\n\r\n >>> pd.to_timedelta('1 days 06:05:01.00003')\r\n Timedelta('1 days 06:05:01.000030')\r\n >>> pd.to_timedelta('15.5us')\r\n Timedelta('0 days 00:00:00.000015500')\r\n\r\n Parsing a list or array of strings:\r\n\r\n >>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan'])\r\n TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015500', NaT],\r\n dtype='timedelta64[ns]', freq=None)\r\n\r\n Converting numbers by specifying the `unit` keyword argument:\r\n\r\n >>> pd.to_timedelta(np.arange(5), unit='s')\r\n TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02',\r\n '0 days 00:00:03', '0 days 00:00:04'],\r\n dtype='timedelta64[ns]', freq=None)\r\n >>> pd.to_timedelta(np.arange(5), unit='d')\r\n TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],\r\n dtype='timedelta64[ns]', freq=None)\r\n \"\"\"\r\n if unit is not None:\r\n unit = parse_timedelta_unit(unit)\r\n\r\n if errors not in (\"ignore\", \"raise\", \"coerce\"):\r\n raise ValueError(\"errors must be one of 'ignore', 'raise', or 'coerce'}\")\r\n\r\n if unit in {\"Y\", \"y\", \"M\"}:\r\n raise ValueError(\r\n \"Units 'M', 'Y', and 'y' are no longer supported, as they do not \"\r\n \"represent unambiguous timedelta values durations.\"\r\n )\r\n\r\n if arg is None:\r\n return arg\r\n elif isinstance(arg, ABCSeries):\r\n values = _convert_listlike(arg._values, unit=unit, errors=errors)\r\n return arg._constructor(values, index=arg.index, name=arg.name)\r\n elif isinstance(arg, ABCIndexClass):\r\n return _convert_listlike(arg, unit=unit, errors=errors, name=arg.name)\r\n elif isinstance(arg, np.ndarray) and arg.ndim == 0:\r\n # extract array scalar and process below\r\n arg = arg.item()\r\n elif is_list_like(arg) and getattr(arg, \"ndim\", 1) == 1:\r\n return _convert_listlike(arg, unit=unit, errors=errors)\r\n elif getattr(arg, \"ndim\", 1) > 1:\r\n raise TypeError(\r\n \"arg must be a string, timedelta, list, tuple, 1-d array, or Series\"\r\n )\r\n\r\n if isinstance(arg, str) and unit is not None:\r\n raise ValueError(\"unit must not be specified if the input is/contains a str\")\r\n\r\n # ...so it must be a scalar value. Return scalar.\r\n return _coerce_scalar_to_timedelta_type(arg, unit=unit, errors=errors)\r\n\r\n\r\ndef _coerce_scalar_to_timedelta_type(r, unit=\"ns\", errors=\"raise\"):\r\n \"\"\"Convert string 'r' to a timedelta object.\"\"\"\r\n try:\r\n result = Timedelta(r, unit)\r\n except ValueError:\r\n if errors == \"raise\":\r\n raise\r\n elif errors == \"ignore\":\r\n return r\r\n\r\n # coerce\r\n result = NaT\r\n\r\n return result\r\n\r\n\r\ndef _convert_listlike(arg, unit=None, errors=\"raise\", name=None):\r\n \"\"\"Convert a list of objects to a timedelta index object.\"\"\"\r\n if isinstance(arg, (list, tuple)) or not hasattr(arg, \"dtype\"):\r\n # This is needed only to ensure that in the case where we end up\r\n # returning arg (errors == \"ignore\"), and where the input is a\r\n # generator, we return a useful list-like instead of a\r\n # used-up generator\r\n arg = np.array(list(arg), dtype=object)\r\n\r\n try:\r\n value = sequence_to_td64ns(arg, unit=unit, errors=errors, copy=False)[0]\r\n except ValueError:\r\n if errors == \"ignore\":\r\n return arg\r\n else:\r\n # This else-block accounts for the cases when errors='raise'\r\n # and errors='coerce'. If errors == 'raise', these errors\r\n # should be raised. If errors == 'coerce', we shouldn't\r\n # expect any errors to be raised, since all parsing errors\r\n # cause coercion to pd.NaT. However, if an error / bug is\r\n # introduced that causes an Exception to be raised, we would\r\n # like to surface it.\r\n raise\r\n\r\n from pandas import TimedeltaIndex\r\n\r\n value = TimedeltaIndex(value, unit=\"ns\", name=name)\r\n return value\r\n", "import numpy as np\r\nfrom scipy import sparse as sp\r\nfrom scipy import stats\r\n\r\nimport pytest\r\n\r\nfrom sklearn.svm._bounds import l1_min_c\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.svm._newrand import set_seed_wrap, bounded_rand_int_wrap\r\n\r\nfrom sklearn.utils._testing import assert_raise_message\r\n\r\n\r\ndense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]\r\nsparse_X = sp.csr_matrix(dense_X)\r\n\r\nY1 = [0, 1, 1, 1]\r\nY2 = [2, 1, 0, 0]\r\n\r\n\r\[email protected]('loss', ['squared_hinge', 'log'])\r\[email protected]('X_label', ['sparse', 'dense'])\r\[email protected]('Y_label', ['two-classes', 'multi-class'])\r\[email protected]('intercept_label', ['no-intercept', 'fit-intercept'])\r\ndef test_l1_min_c(loss, X_label, Y_label, intercept_label):\r\n Xs = {'sparse': sparse_X, 'dense': dense_X}\r\n Ys = {'two-classes': Y1, 'multi-class': Y2}\r\n intercepts = {'no-intercept': {'fit_intercept': False},\r\n 'fit-intercept': {'fit_intercept': True,\r\n 'intercept_scaling': 10}}\r\n\r\n X = Xs[X_label]\r\n Y = Ys[Y_label]\r\n intercept_params = intercepts[intercept_label]\r\n check_l1_min_c(X, Y, loss, **intercept_params)\r\n\r\n\r\ndef test_l1_min_c_l2_loss():\r\n # loss='l2' should raise ValueError\r\n assert_raise_message(ValueError, \"loss type not in\",\r\n l1_min_c, dense_X, Y1, loss=\"l2\")\r\n\r\n\r\ndef check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):\r\n min_c = l1_min_c(X, y, loss=loss, fit_intercept=fit_intercept,\r\n intercept_scaling=intercept_scaling)\r\n\r\n clf = {\r\n 'log': LogisticRegression(penalty='l1', solver='liblinear'),\r\n 'squared_hinge': LinearSVC(loss='squared_hinge',\r\n penalty='l1', dual=False),\r\n }[loss]\r\n\r\n clf.fit_intercept = fit_intercept\r\n clf.intercept_scaling = intercept_scaling\r\n\r\n clf.C = min_c\r\n clf.fit(X, y)\r\n assert (np.asarray(clf.coef_) == 0).all()\r\n assert (np.asarray(clf.intercept_) == 0).all()\r\n\r\n clf.C = min_c * 1.01\r\n clf.fit(X, y)\r\n assert ((np.asarray(clf.coef_) != 0).any() or\r\n (np.asarray(clf.intercept_) != 0).any())\r\n\r\n\r\ndef test_ill_posed_min_c():\r\n X = [[0, 0], [0, 0]]\r\n y = [0, 1]\r\n with pytest.raises(ValueError):\r\n l1_min_c(X, y)\r\n\r\n\r\ndef test_unsupported_loss():\r\n with pytest.raises(ValueError):\r\n l1_min_c(dense_X, Y1, loss='l1')\r\n\r\n\r\n_MAX_UNSIGNED_INT = 4294967295\r\n\r\n\r\[email protected]('seed, val',\r\n [(None, 81),\r\n (0, 54),\r\n (_MAX_UNSIGNED_INT, 9)])\r\ndef test_newrand_set_seed(seed, val):\r\n \"\"\"Test that `set_seed` produces deterministic results\"\"\"\r\n if seed is not None:\r\n set_seed_wrap(seed)\r\n x = bounded_rand_int_wrap(100)\r\n assert x == val, f'Expected {val} but got {x} instead'\r\n\r\n\r\[email protected]('seed',\r\n [-1, _MAX_UNSIGNED_INT + 1])\r\ndef test_newrand_set_seed_overflow(seed):\r\n \"\"\"Test that `set_seed_wrap` is defined for unsigned 32bits ints\"\"\"\r\n with pytest.raises(OverflowError):\r\n set_seed_wrap(seed)\r\n\r\n\r\[email protected]('range_, n_pts',\r\n [(_MAX_UNSIGNED_INT, 10000), (100, 25)])\r\ndef test_newrand_bounded_rand_int(range_, n_pts):\r\n \"\"\"Test that `bounded_rand_int` follows a uniform distribution\"\"\"\r\n n_iter = 100\r\n ks_pvals = []\r\n uniform_dist = stats.uniform(loc=0, scale=range_)\r\n # perform multiple samplings to make chance of outlier sampling negligible\r\n for _ in range(n_iter):\r\n # Deterministic random sampling\r\n sample = [bounded_rand_int_wrap(range_) for _ in range(n_pts)]\r\n res = stats.kstest(sample, uniform_dist.cdf)\r\n ks_pvals.append(res.pvalue)\r\n # Null hypothesis = samples come from an uniform distribution.\r\n # Under the null hypothesis, p-values should be uniformly distributed\r\n # and not concentrated on low values\r\n # (this may seem counter-intuitive but is backed by multiple refs)\r\n # So we can do two checks:\r\n\r\n # (1) check uniformity of p-values\r\n uniform_p_vals_dist = stats.uniform(loc=0, scale=1)\r\n res_pvals = stats.kstest(ks_pvals, uniform_p_vals_dist.cdf)\r\n assert res_pvals.pvalue > 0.05, (\r\n \"Null hypothesis rejected: generated random numbers are not uniform.\"\r\n \" Details: the (meta) p-value of the test of uniform distribution\"\r\n f\" of p-values is {res_pvals.pvalue} which is not > 0.05\")\r\n\r\n # (2) (safety belt) check that 90% of p-values are above 0.05\r\n min_10pct_pval = np.percentile(ks_pvals, q=10)\r\n # lower 10th quantile pvalue <= 0.05 means that the test rejects the\r\n # null hypothesis that the sample came from the uniform distribution\r\n assert min_10pct_pval > 0.05, (\r\n \"Null hypothesis rejected: generated random numbers are not uniform. \"\r\n f\"Details: lower 10th quantile p-value of {min_10pct_pval} not > 0.05.\"\r\n )\r\n\r\n\r\[email protected]('range_',\r\n [-1, _MAX_UNSIGNED_INT + 1])\r\ndef test_newrand_bounded_rand_int_limits(range_):\r\n \"\"\"Test that `bounded_rand_int_wrap` is defined for unsigned 32bits ints\"\"\"\r\n with pytest.raises(OverflowError):\r\n bounded_rand_int_wrap(range_)\r\n", "import numpy as np\r\nfrom scipy import sparse as sp\r\n\r\nfrom . import is_scalar_nan\r\nfrom .fixes import _object_dtype_isnan\r\n\r\n\r\ndef _get_dense_mask(X, value_to_mask):\r\n if is_scalar_nan(value_to_mask):\r\n if X.dtype.kind == \"f\":\r\n Xt = np.isnan(X)\r\n elif X.dtype.kind in (\"i\", \"u\"):\r\n # can't have NaNs in integer array.\r\n Xt = np.zeros(X.shape, dtype=bool)\r\n else:\r\n # np.isnan does not work on object dtypes.\r\n Xt = _object_dtype_isnan(X)\r\n else:\r\n Xt = X == value_to_mask\r\n\r\n return Xt\r\n\r\n\r\ndef _get_mask(X, value_to_mask):\r\n \"\"\"Compute the boolean mask X == value_to_mask.\r\n\r\n Parameters\r\n ----------\r\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\r\n Input data, where ``n_samples`` is the number of samples and\r\n ``n_features`` is the number of features.\r\n\r\n value_to_mask : {int, float}\r\n The value which is to be masked in X.\r\n\r\n Returns\r\n -------\r\n X_mask : {ndarray, sparse matrix} of shape (n_samples, n_features)\r\n Missing mask.\r\n \"\"\"\r\n if not sp.issparse(X):\r\n # For all cases apart of a sparse input where we need to reconstruct\r\n # a sparse output\r\n return _get_dense_mask(X, value_to_mask)\r\n\r\n Xt = _get_dense_mask(X.data, value_to_mask)\r\n\r\n sparse_constructor = (sp.csr_matrix if X.format == 'csr'\r\n else sp.csc_matrix)\r\n Xt_sparse = sparse_constructor(\r\n (Xt, X.indices.copy(), X.indptr.copy()), shape=X.shape, dtype=bool\r\n )\r\n\r\n return Xt_sparse\r\n" ]
[ [ "pandas._testing.assert_almost_equal", "numpy.dot", "pandas.util._test_decorators.skip_if_np_lt", "pandas.Series", "numpy.allclose", "numpy.arange", "numpy.repeat", "numpy.ptp", "pandas.Timedelta", "numpy.random.randn", "pandas.date_range", "pandas._testing.assert_series_equal", "numpy.sum", "numpy.random.randint" ], [ "numpy.dot", "numpy.product", "numpy.minimum", "numpy.sqrt", "numpy.asarray", "numpy.max", "numpy.zeros_like", "numpy.divide", "scipy.sparse.coo_matrix", "scipy.sparse.issparse", "numpy.arange", "numpy.linalg.multi_dot", "numpy.finfo", "numpy.full", "numpy.zeros", "numpy.log", "numpy.multiply", "numpy.atleast_2d", "numpy.sum", "numpy.maximum", "numpy.abs", "numpy.shape", "numpy.empty" ], [ "sklearn.utils._testing.ignore_warnings", "sklearn.tree.DecisionTreeClassifier", "sklearn.utils._testing.raises", "sklearn.utils._testing.create_memmap_backed_data", "sklearn.discriminant_analysis.LinearDiscriminantAnalysis", "sklearn.utils._testing.assert_raises_regex", "numpy.arange", "sklearn.utils._testing.assert_allclose_dense_sparse", "sklearn.utils._testing.assert_raise_message", "sklearn.utils.metaestimators.if_delegate_has_method", "sklearn.utils._testing.check_docstring_parameters", "scipy.sparse.csc_matrix", "sklearn.utils._testing.TempMemmap", "sklearn.utils._testing._convert_container", "sklearn.utils._testing.assert_raises", "sklearn.utils._testing.set_random_state", "sklearn.utils._testing.assert_warns", "numpy.ones", "numpy.testing.assert_array_equal", "sklearn.utils.deprecation.deprecated", "sklearn.utils._testing.assert_no_warnings" ], [ "pandas._testing.assert_numpy_array_equal", "pandas.Series", "numpy.asarray", "pandas.Index", "numpy.array", "pandas._testing.assert_index_equal" ], [ "numpy.ix_" ], [ "pandas.io.formats.format.EngFormatter", "numpy.sqrt", "pandas.DataFrame", "pandas.io.formats.format.set_eng_float_format", "pandas._testing.reset_display_options" ], [ "pandas.Series", "numpy.sqrt", "pandas.offsets.Day", "pandas._testing.getMixedTypeDict", "pandas.DataFrame", "numpy.random.randn", "pandas.isna", "pandas._testing.assert_frame_equal", "numpy.exp", "numpy.where", "pandas._testing.makeTimeSeries", "pandas.Index", "pandas.DatetimeIndex", "pandas._testing.assert_series_equal", "numpy.repeat", "numpy.isclose", "pandas.concat", "pandas.Categorical", "pandas.Timedelta", "pandas.date_range", "numpy.errstate", "pandas._testing.is_number", "pandas.CategoricalIndex", "numpy.abs", "numpy.ones", "pandas._testing.assert_categorical_equal", "pandas.Period", "pandas.to_timedelta", "pandas.Timestamp" ], [ "pandas.timedelta_range", "pandas._testing.equalContents", "pandas.TimedeltaIndex", "numpy.arange", "pandas.offsets.Minute", "pandas._testing.assert_copy", "pandas._testing.assert_attr_equal", "pandas.offsets.Hour", "pandas._testing.assert_index_equal" ], [ "pandas._testing.assert_numpy_array_equal", "pandas._libs.tslibs.parsing.try_parse_dates", "pandas._libs.tslibs.parsing._guess_datetime_format", "pandas._libs.tslibs.parsing.parse_time_string", "pandas._libs.tslibs.parsing._does_string_look_like_datetime", "numpy.array" ], [ "pandas.Timedelta", "pandas.Timestamp", "pandas.Interval" ], [ "numpy.split", "scipy.sparse.issparse", "numpy.linspace", "numpy.min", "numpy.asarray", "numpy.unique", "numpy.issubdtype", "numpy.clip", "numpy.finfo", "numpy.concatenate", "numpy.max", "numpy.all", "numpy.mean", "numpy.zeros_like", "scipy.sparse.vstack", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "pandas.Series", "pandas.Categorical", "pandas.Timedelta", "numpy.datetime64", "pandas.DataFrame", "pandas.Period", "pandas._testing.assert_series_equal", "pandas.Timestamp", "numpy.random.randint" ], [ "pandas.core.dtypes.common.is_bool_dtype", "pandas.core.dtypes.common.pandas_dtype", "pandas.core.dtypes.common.is_extension_array_dtype", "pandas.core.dtypes.common.is_scalar", "numpy.dtype", "pandas.core.dtypes.missing.na_value_for_dtype", "pandas.core.dtypes.common.is_object_dtype", "pandas.core.dtypes.missing.isna", "pandas.core.dtypes.common.is_string_dtype", "numpy.find_common_type", "numpy.array" ], [ "pandas._testing.assert_numpy_array_equal", "pandas.Series", "pandas.Timestamp", "pandas.api.types.is_scalar", "pandas.date_range", "numpy.argsort", "numpy.array" ], [ "pandas._testing.assert_produces_warning", "pandas._testing.assert_numpy_array_equal", "pandas.RangeIndex", "numpy.arange", "pandas.Index", "pandas.Float64Index", "numpy.array", "numpy.zeros", "pandas._testing.assert_index_equal" ], [ "sklearn.metrics.roc_auc_score", "sklearn.utils._testing.ignore_warnings", "sklearn.ensemble._iforest._average_path_length", "sklearn.datasets.load_diabetes", "numpy.max", "numpy.arange", "sklearn.utils._testing.assert_warns_message", "sklearn.utils._testing.assert_array_equal", "numpy.log", "numpy.min", "sklearn.datasets.load_iris", "sklearn.model_selection.train_test_split", "numpy.int64", "sklearn.model_selection.ParameterGrid", "numpy.array", "numpy.random.RandomState", "sklearn.ensemble.IsolationForest", "numpy.log2", "numpy.sort", "numpy.ones", "sklearn.utils._testing.assert_array_almost_equal", "sklearn.utils.check_random_state" ], [ "pandas.CategoricalIndex", "pandas.concat", "pandas._testing.assert_produces_warning", "numpy.random.random", "pandas.Series", "pandas.interval_range", "pandas.DataFrame", "numpy.round", "numpy.random.rand", "pandas.date_range", "pandas._testing.assert_frame_equal", "pandas._testing.assert_index_equal" ], [ "numpy.zeros", "numpy.flatnonzero" ], [ "pandas.core.missing.isna", "pandas.core.dtypes.inference.is_array_like", "pandas._libs.lib.is_string_array", "pandas.core.dtypes.common.pandas_dtype", "numpy.asarray", "numpy.empty_like", "pandas._libs.lib.is_scalar", "pandas._libs.lib.ensure_string_array", "pandas.arrays.BooleanArray", "pandas._libs.lib.memory_usage_of_objects", "pandas.core.arrays.IntegerArray", "pandas.value_counts", "pandas.core.indexers.check_array_indexer", "numpy.array", "pandas.compat.set_function_name", "pandas.core.construction.extract_array" ], [ "pandas.MultiIndex.from_product", "pandas.MultiIndex.from_arrays" ], [ "numpy.dot", "numpy.sqrt", "numpy.asarray", "numpy.vstack", "numpy.concatenate", "numpy.fill_diagonal", "numpy.var", "numpy.exp", "sklearn.neighbors.DistanceMetric.get_metric", "scipy.sparse.issparse", "numpy.clip", "numpy.arange", "numpy.diag_indices_from", "numpy.zeros", "scipy.spatial.distance.cdist", "scipy.sparse.csr_matrix", "numpy.atleast_2d", "numpy.cov", "numpy.tanh", "numpy.maximum", "numpy.abs", "scipy.spatial.distance.pdist", "numpy.prod", "numpy.empty" ], [ "numpy.array", "pandas.concat", "pandas.to_datetime", "pandas.Series", "pandas.PeriodIndex", "pandas.Timestamp", "pandas.period_range", "numpy.arange", "pandas.DataFrame", "pandas.DatetimeIndex", "pandas.Timedelta", "numpy.datetime64", "pandas.Period", "pandas.date_range", "pandas._testing.assert_series_equal", "pandas._testing.assert_frame_equal" ], [ "pandas.to_datetime", "pandas._testing.assert_equal", "pandas.DataFrame", "numpy.datetime64", "numpy.array" ], [ "pandas.Series", "pandas.PeriodIndex", "pandas.core.dtypes.dtypes.DatetimeTZDtype", "pandas.DataFrame", "pandas.core.arrays.IntervalArray.from_breaks", "pandas.core.arrays.SparseArray", "pandas.core.dtypes.common.is_datetime64_dtype", "pandas._testing.assert_frame_equal", "pandas._testing.assert_numpy_array_equal", "pandas.Index", "pandas.DatetimeIndex", "pandas._testing.assert_extension_array_equal", "pandas.core.arrays.integer_array", "pandas.core.dtypes.common.is_timedelta64_dtype", "pandas.Categorical", "pandas.array", "pandas.Timedelta", "pandas.core.arrays.PandasArray", "pandas.core.arrays.period_array", "pandas.MultiIndex.from_product", "pandas.Interval", "pandas.core.dtypes.dtypes.PeriodDtype", "numpy.array", "pandas._testing.assert_equal", "pandas.TimedeltaIndex", "numpy.shares_memory", "numpy.datetime64", "pandas.IntervalIndex.from_breaks", "pandas.Period", "pandas.Timestamp" ], [ "sklearn.config_context", "sklearn.get_config", "sklearn.utils._testing.assert_raises", "sklearn.set_config" ], [ "pandas.concat", "pandas.io.excel.ExcelWriter", "numpy.linspace", "pandas.io.excel._OpenpyxlWriter", "pandas.io.formats.excel.ExcelCell", "pandas.DataFrame", "pandas._testing.ensure_clean", "pandas.io.excel._OpenpyxlWriter._convert_to_style_kwargs" ], [ "scipy.sparse.coo_matrix", "pandas.core.arrays.sparse.scipy_sparse._coo_to_sparse_series", "pandas.Series", "pandas.core.arrays.sparse.scipy_sparse._sparse_series_to_coo", "pandas._libs.sparse.IntIndex", "pandas.compat._optional.import_optional_dependency", "pandas.core.accessor.delegate_names", "pandas.DataFrame", "pandas.core.dtypes.cast.find_common_type", "numpy.concatenate", "pandas.core.arrays.sparse.dtype.SparseDtype", "pandas.core.indexes.api.ensure_index", "pandas.DataFrame._from_arrays", "pandas.core.arrays.sparse.array.SparseArray._simple_new", "pandas.core.indexes.base.default_index" ], [ "pandas._libs.lib.item_from_zerodim" ], [ "pandas._testing.assert_almost_equal", "pandas.Series", "numpy.linspace", "pandas.tests.plotting.common.TestPlotBase.setup_method", "matplotlib.rcdefaults", "numpy.random.randn", "pandas._testing.RNGContext", "pandas._testing.makeDateIndex", "pandas._testing.makeStringSeries", "numpy.random.randint", "pandas.plotting._matplotlib.style._get_standard_colors", "pandas._testing.assert_numpy_array_equal", "numpy.arange", "pandas._testing.makeTimeSeries", "matplotlib.pyplot.gcf", "matplotlib.pyplot.subplot", "pandas._testing.assert_produces_warning", "numpy.int_", "numpy.delete", "pandas._testing.makePeriodSeries", "pandas._testing.close", "numpy.random.rand", "pandas.date_range", "numpy.array", "pandas.tests.plotting.common._check_plot_works", "pandas.tseries.offsets.CustomBusinessDay", "numpy.random.uniform" ], [ "numpy.dot", "numpy.sqrt", "numpy.linspace", "sklearn.utils._testing.ignore_warnings", "sklearn.decomposition.sparse_encode", "numpy.all", "sklearn.base.clone", "numpy.exp", "sklearn.decomposition.dict_learning", "numpy.flatnonzero", "sklearn.utils._testing.assert_array_equal", "numpy.logical_not", "sklearn.utils._testing.TempMemmap", "sklearn.decomposition.MiniBatchDictionaryLearning", "sklearn.decomposition.SparseCoder", "numpy.random.rand", "numpy.testing.assert_allclose", "sklearn.decomposition.dict_learning_online", "numpy.array", "numpy.random.RandomState", "sklearn.decomposition.DictionaryLearning", "numpy.sum", "sklearn.utils.estimator_checks.check_transformer_general", "sklearn.utils.estimator_checks.check_transformer_data_not_an_array", "sklearn.utils.check_array", "sklearn.utils.estimator_checks.check_transformers_unfitted", "sklearn.utils._testing.assert_array_almost_equal", "numpy.empty" ], [ "scipy.io.matlab.loadmat", "numpy.float32" ], [ "pandas.Series", "numpy.linspace", "pandas.DataFrame", "pandas.tests.indexing.common._mklbl", "numpy.random.randn", "pandas.core.indexing._non_reducing_slice", "pandas._testing.assert_frame_equal", "numpy.random.randint", "numpy.arange", "pandas.Index", "pandas._testing.makeCustomDataframe", "pandas.core.arrays.integer_array", "pandas.core.dtypes.common.is_float_dtype", "pandas._testing.assert_series_equal", "numpy.zeros", "pandas._testing.assert_index_equal", "pandas.core.dtypes.common.is_integer_dtype", "pandas._testing.assert_produces_warning", "pandas.Categorical", "pandas.array", "pandas.Float64Index", "pandas.core.indexing._maybe_numeric_slice", "numpy.random.rand", "numpy.array", "numpy.random.random", "numpy.ones", "pandas.Timestamp" ], [ "pandas.core.arrays.timedeltas.sequence_to_td64ns", "pandas.core.dtypes.common.is_list_like", "pandas.TimedeltaIndex", "pandas._libs.tslibs.timedeltas.parse_timedelta_unit", "pandas._libs.tslibs.timedeltas.Timedelta" ], [ "scipy.stats.kstest", "sklearn.linear_model.LogisticRegression", "numpy.asarray", "scipy.sparse.csr_matrix", "numpy.percentile", "scipy.stats.uniform", "sklearn.svm._newrand.bounded_rand_int_wrap", "sklearn.svm.LinearSVC", "sklearn.svm._bounds.l1_min_c", "sklearn.utils._testing.assert_raise_message", "sklearn.svm._newrand.set_seed_wrap" ], [ "numpy.isnan", "numpy.zeros", "scipy.sparse.issparse" ] ]
dkdanielkost/Theano-Style-Transfer
[ "70438d3de51d059ea2129119a8cfcc86d2b403a9" ]
[ "src/vgg19/theano_model/vgg19_model.py" ]
[ "import numpy\nimport os\nimport numpy as np\nimport logging\nfrom theano.tensor.signal import pool\nfrom theano.tensor.nnet.abstract_conv import bilinear_upsampling\nimport joblib\nfrom theano.tensor.nnet import conv2d\nfrom theano.tensor.nnet import relu,softmax\nimport theano\nimport theano.tensor as T\n\nfrom theano.tensor.signal.pool import pool_2d\ncwd = os.path.dirname(os.path.realpath(__file__))\n\n'''\n['convolution2d_4_weights',\n 'dense_1_weights',\n 'dense_2_weights',\n 'convolution2d_8_weights',\n 'convolution2d_5_weights',\n 'convolution2d_13_weights',\n 'convolution2d_7_weights',\n 'convolution2d_15_weights',\n 'convolution2d_59_weights',\n 'convolution2d_14_weights',\n 'convolution2d_16_weights',\n 'dense_1_weights',\n 'convolution2d_6_weights',\n 'convolution2d_3_weights',\n 'convolution2d_10_weights',\n 'convolution2d_1_weights',\n 'convolution2d_10_weights',\n 'convolution2d_60_weights',\n 'convolution2d_2_weights']\n'''\n\n\n# Read in layer weights and save to a dictionary\n# Read in layer weights and save to a dictionary\n\ncwd = os.getcwd()\ndirect = os.path.join(cwd,'theano_model','weights')\n# weights_layer_paths = joblib.load(os.path.join(cwd,'weight_names','layer_names_weights'))\nlayer_weights = {}\nfor layer_weight_path in range(16):\n# head,layer_name = os.path.split(layer_weight_path)\n layer_weights[str(layer_weight_path) + '_w'] = joblib.load(os.path.join(direct,str(layer_weight_path) + '_w'))\n\n\n# Read in bias weights and save to a dictionary\nfor bias_layer_path in range(16):\n layer_weights[str(bias_layer_path) + '_b'] = joblib.load(os.path.join(direct,str(bias_layer_path) + '_b'))\n\n\ndef drop(input, p=0.5):\n \"\"\"\n :type input: numpy.array\n :param input: layer or weight matrix on which dropout is applied\n\n :type p: float or double between 0. and 1.\n :param p: p probability of NOT dropping out a unit, therefore (1.-p) is the drop rate.\n\n \"\"\"\n rng = numpy.random.RandomState(1234)\n srng = T.shared_randomstreams.RandomStreams(rng.randint(999999))\n mask = srng.binomial(n=1, p=p, size=input.shape, dtype=theano.config.floatX)\n return input * mask\n\n\nclass DropoutHiddenLayer(object):\n def __init__(self, is_train, input, W=None, b=None,\n activation=relu, p=0.5):\n self.input = input\n\n self.W = W\n self.b = b\n\n lin_output = T.dot(input, self.W) + self.b\n\n self.output = activation(lin_output)\n # train_output = drop(output, p)\n # self.output = T.switch(T.neq(is_train, 0), train_output, p * output)\n self.params = [self.W, self.b]\n\n\nclass VGG19_conv2d_layer(object):\n \"\"\"Pool Layer of a convolutional network \"\"\"\n\n def __init__(self, input, layer_name,image_shape,\n activation=relu, border_mode=(2,2)):\n self.activation = activation\n self.input = input\n\n self.W = theano.shared(value=np.array(layer_weights[layer_name + '_w'],\n dtype=theano.config.floatX),\n borrow=True)\n self.b = theano.shared(value=np.array(layer_weights[layer_name + '_b'],\n dtype=theano.config.floatX\n )\n , borrow=True)\n\n self.conv_out = conv2d(\n input=input,\n input_shape=image_shape,\n filters=self.W,\n filter_shape=layer_weights[layer_name + '_w'].shape,\n border_mode=border_mode\n )\n\n self.output = activation(self.conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))\n self.params = [self.W, self.b]\n self.input = input\n\nclass VGG19(object):\n\n def __init__(self,input_image_shape,pool_method = 'average_exc_pad'):\n IMAGE_H = input_image_shape[2]\n IMAGE_W = input_image_shape[3]\n self.input = theano.tensor.tensor4('input')\n self.conv1_1 = VGG19_conv2d_layer(input=self.input,\n layer_name='0',\n image_shape=input_image_shape,\n border_mode=(1,1))\n\n self.conv1_2 = VGG19_conv2d_layer(input=self.conv1_1.output,\n layer_name='1',\n image_shape=(None, 64, IMAGE_H, IMAGE_W),\n border_mode=(1,1))\n self.pool1 = pool_2d(\n input = self.conv1_2.output,\n ds = (2,2),\n mode = pool_method,\n ignore_border=True,\n st = (2,2))\n\n self.conv2_1 = VGG19_conv2d_layer(input=self.pool1,\n layer_name='2',\n image_shape=(None, 64, IMAGE_H/2, IMAGE_W/2),\n border_mode=(1,1))\n\n self.conv2_2 = VGG19_conv2d_layer(input=self.conv2_1.output,\n layer_name='3',\n image_shape=(None, 128, IMAGE_H/2, IMAGE_W/2),\n border_mode=(1,1))\n\n self.pool2 = pool_2d(\n input=self.conv2_2.output,\n ds=(2, 2),\n mode=pool_method,\n ignore_border=True,\n st=(2, 2))\n\n self.conv3_1 = VGG19_conv2d_layer(input=self.pool2,\n layer_name='4',\n image_shape=(None, 128, IMAGE_H/4, IMAGE_W/4),\n border_mode=(1, 1))\n self.conv3_2= VGG19_conv2d_layer(input=self.conv3_1.output,\n layer_name='5',\n image_shape=(None, 128, IMAGE_H/4, IMAGE_W/4),\n border_mode=(1, 1))\n self.conv3_3 = VGG19_conv2d_layer(input=self.conv3_2.output,\n layer_name='6',\n image_shape=(None, 128, IMAGE_H/4, IMAGE_W/4),\n border_mode=(1, 1))\n\n self.conv3_4 = VGG19_conv2d_layer(input=self.conv3_3.output,\n layer_name='7',\n image_shape=(None, 128, IMAGE_H/4, IMAGE_W/4),\n border_mode=(1, 1))\n\n self.pool3 = pool_2d(\n input=self.conv3_4.output,\n ds=(2, 2),\n mode=pool_method,\n ignore_border=True,\n st=(2, 2))\n\n self.conv4_1 = VGG19_conv2d_layer(input=self.pool3,\n layer_name='8',\n image_shape=(None, 512, IMAGE_H/8, IMAGE_W/8),\n border_mode=(1, 1))\n self.conv4_2 = VGG19_conv2d_layer(input=self.conv4_1.output,\n layer_name='9',\n image_shape=(None, 512, IMAGE_H/8, IMAGE_W/8),\n border_mode=(1, 1))\n self.conv4_3 = VGG19_conv2d_layer(input=self.conv4_2.output,\n layer_name='10',\n image_shape=(None, 512, IMAGE_H/8, IMAGE_W/8),\n border_mode=(1, 1))\n\n self.conv4_4 = VGG19_conv2d_layer(input=self.conv4_3.output,\n layer_name='11',\n image_shape=(None, 512, IMAGE_H/8, IMAGE_W/8),\n border_mode=(1, 1))\n\n self.pool4 = pool_2d(\n input=self.conv4_4.output,\n ds=(2, 2),\n mode=pool_method,\n ignore_border=True,\n st=(2, 2))\n\n self.conv5_1 = VGG19_conv2d_layer(input=self.pool4,\n layer_name='12',\n image_shape=(None, 512, IMAGE_H/16, IMAGE_W/16),\n border_mode=(1, 1))\n self.conv5_2 = VGG19_conv2d_layer(input=self.conv5_1.output,\n layer_name='13',\n image_shape=(None, 512, IMAGE_H/16, IMAGE_W/16),\n border_mode=(1, 1))\n self.conv5_3 = VGG19_conv2d_layer(input=self.conv5_2.output,\n layer_name='14',\n image_shape=(None, 512, IMAGE_H/16, IMAGE_W/16),\n border_mode=(1, 1))\n\n self.conv5_4 = VGG19_conv2d_layer(input=self.conv5_3.output,\n layer_name='15',\n image_shape=(None, 512, IMAGE_H/16, IMAGE_W/16),\n border_mode=(1, 1))\n\n self.pool5 = pool_2d(\n input=self.conv5_4.output,\n ds=(2, 2),\n mode=pool_method,\n ignore_border=True,\n st=(2, 2))\n\n # self.dense_1_input= self.pool5.flatten(2)\n #\n # self.dense_1 = DropoutHiddenLayer(is_train = numpy.cast['int32'](0),\n # input = self.dense_1_input,\n # W=layer_weights['dense_1_weights'],\n # b=layer_weights['dense_1_bias'],)\n #\n # self.dense_2 = DropoutHiddenLayer(is_train=numpy.cast['int32'](0),\n # input=self.dense_1.output,\n # W=layer_weights['dense_2_weights'],\n # b=layer_weights['dense_2_bias'], )\n #\n # self.dense_3 = DropoutHiddenLayer(is_train=numpy.cast['int32'](0),\n # input=self.dense_2.output,\n # W=layer_weights['dense_3_weights'],\n # b=layer_weights['dense_3_bias'],\n # activation=softmax )\n\n# model.add(Flatten())\n# model.add(Dense(4096, activation='relu'))\n# model.add(Dropout(0.5))\n# model.add(Dense(4096, activation='relu'))\n# model.add(Dropout(0.5))\n# model.add(Dense(1000, activation='softmax'))\n\n\nclass VGG16(object):\n\n def __init__(self,input_image_shape,pool_method = 'max'):\n self.input = theano.tensor.tensor4('input')\n self.conv1_1 = VGG19_conv2d_layer(input=self.input,\n layer_name='convolution2d_1',\n image_shape=input_image_shape,\n border_mode=(1,1))\n\n self.conv1_2 = VGG19_conv2d_layer(input=self.conv1_1.output,\n layer_name='convolution2d_2',\n image_shape=(None, 64, 224, 224),\n border_mode=(1,1))\n self.pool1 = pool_2d(\n input = self.conv1_2.output,\n ds = (2,2),\n mode = pool_method,\n ignore_border=True,\n st = (2,2))\n\n self.conv2_1 = VGG19_conv2d_layer(input=self.pool1,\n layer_name='convolution2d_3',\n image_shape=(None, 64, 112, 112),\n border_mode=(1,1))\n\n self.conv2_2 = VGG19_conv2d_layer(input=self.conv2_1.output,\n layer_name='convolution2d_4',\n image_shape=(None, 128, 112, 112),\n border_mode=(1,1))\n\n self.pool2 = pool_2d(\n input=self.conv2_2.output,\n ds=(2, 2),\n mode=pool_method,\n ignore_border=True,\n st=(2, 2))\n\n self.conv3_1 = VGG19_conv2d_layer(input=self.pool2,\n layer_name='convolution2d_5',\n image_shape=(None, 128, 56, 56),\n border_mode=(1, 1))\n self.conv3_2= VGG19_conv2d_layer(input=self.conv3_1.output,\n layer_name='convolution2d_6',\n image_shape=(None, 128, 56, 56),\n border_mode=(1, 1))\n self.conv3_3 = VGG19_conv2d_layer(input=self.conv3_2.output,\n layer_name='convolution2d_7',\n image_shape=(None, 128, 56, 56),\n border_mode=(1, 1))\n\n self.conv3_4 = VGG19_conv2d_layer(input=self.conv3_3.output,\n layer_name='convolution2d_8',\n image_shape=(None, 128, 56, 56),\n border_mode=(1, 1))\n\n self.pool3 = pool_2d(\n input=self.conv3_4.output,\n ds=(2, 2),\n mode=pool_method,\n ignore_border=True,\n st=(2, 2))\n\n self.conv4_1 = VGG19_conv2d_layer(input=self.pool3,\n layer_name='convolution2d_9',\n image_shape=(None, 512, 28, 28),\n border_mode=(1, 1))\n self.conv4_2 = VGG19_conv2d_layer(input=self.conv4_1.output,\n layer_name='convolution2d_10',\n image_shape=(None, 512, 28, 28),\n border_mode=(1, 1))\n self.conv4_3 = VGG19_conv2d_layer(input=self.conv4_2.output,\n layer_name='convolution2d_11',\n image_shape=(None, 512, 28, 28),\n border_mode=(1, 1))\n\n self.conv4_4 = VGG19_conv2d_layer(input=self.conv4_3.output,\n layer_name='convolution2d_12',\n image_shape=(None, 512, 28, 28),\n border_mode=(1, 1))\n\n self.pool4 = pool_2d(\n input=self.conv4_4.output,\n ds=(2, 2),\n mode=pool_method,\n ignore_border=True,\n st=(2, 2))\n\n self.conv5_1 = VGG19_conv2d_layer(input=self.pool4,\n layer_name='convolution2d_13',\n image_shape=(None, 512, 14, 14),\n border_mode=(1, 1))\n self.conv5_2 = VGG19_conv2d_layer(input=self.conv5_1.output,\n layer_name='convolution2d_14',\n image_shape=(None, 512, 14, 14),\n border_mode=(1, 1))\n self.conv5_3 = VGG19_conv2d_layer(input=self.conv5_2.output,\n layer_name='convolution2d_15',\n image_shape=(None, 512, 14, 14),\n border_mode=(1, 1))\n\n self.conv5_4 = VGG19_conv2d_layer(input=self.conv5_3.output,\n layer_name='convolution2d_16',\n image_shape=(None, 512, 14, 14),\n border_mode=(1, 1))\n" ]
[ [ "numpy.array", "numpy.random.RandomState" ] ]
jayantabh/haxball-imitation-learning
[ "fb02203dee6859443ac2bd4334144aacc9f16f89" ]
[ "bots/BasicBot.py" ]
[ "import replay\nimport torch\nimport os\n\nfrom bots import interactive\nfrom models.BasicModel import BasicModel\n\nclass BasicBot(interactive.Interactive):\n\n def __init__(self, channel_id, name):\n super().__init__(channel_id)\n\n # Load pre-trained model and set-up the bot\n self.model = BasicModel()\n path = os.path.join( os.getcwd(), 'saved_models', name )\n self.model.load_state_dict(torch.load(path, map_location=torch.device('cpu')))\n self.model.eval()\n\n def onUpdate(self):\n if self.player and len(self.game.players) == 2:\n # convert game state to tensor\n # tensor must be same format as how network was trained\n\n print(self.player.team)\n\n # forming input only works for two players currently\n state = [self.player.disc.x, self.player.disc.y, self.player.disc.vx, self.player.disc.vy]\n for player in self.game.players:\n if player.id != self.player.id:\n state.extend([player.disc.x, player.disc.y, player.disc.vx, player.disc.vy])\n\n state.extend([self.game.ball.x, self.game.ball.y, self.game.ball.vx, self.game.ball.vy])\n\n state_tensor = torch.tensor(state)\n\n # get output for model\n actions = self.model(state_tensor)\n actions = (actions > 0.5).tolist()\n \n # send input actions\n inputs = [replay.Input(1 << idx) for idx,x in enumerate(actions) if x != 0]\n self.setInput(*inputs)" ]
[ [ "torch.device", "torch.tensor" ] ]
hjmjohnson/MONAI
[ "dc7cd0ec25d4b27f321a31f13e707769922c66b3", "7cd65614da81eeff261a14abdf18bd07a20abfcc" ]
[ "tests/test_squeezedimd.py", "monai/transforms/utils.py" ]
[ "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy as np\nfrom parameterized import parameterized\nfrom monai.transforms import SqueezeDimd\n\n\nTEST_CASE_1 = [\n {\"keys\": [\"img\", \"seg\"], \"dim\": None},\n {\"img\": np.random.rand(1, 2, 1, 3), \"seg\": np.random.randint(0, 2, size=[1, 2, 1, 3])},\n (2, 3),\n]\n\nTEST_CASE_2 = [\n {\"keys\": [\"img\", \"seg\"], \"dim\": 2},\n {\"img\": np.random.rand(1, 2, 1, 8, 16), \"seg\": np.random.randint(0, 2, size=[1, 2, 1, 8, 16])},\n (1, 2, 8, 16),\n]\n\nTEST_CASE_3 = [\n {\"keys\": [\"img\", \"seg\"], \"dim\": -1},\n {\"img\": np.random.rand(1, 1, 16, 8, 1), \"seg\": np.random.randint(0, 2, size=[1, 1, 16, 8, 1])},\n (1, 1, 16, 8),\n]\n\nTEST_CASE_4 = [\n {\"keys\": [\"img\", \"seg\"]},\n {\"img\": np.random.rand(1, 2, 1, 3), \"seg\": np.random.randint(0, 2, size=[1, 2, 1, 3])},\n (2, 3),\n]\n\nTEST_CASE_5 = [\n {\"keys\": [\"img\", \"seg\"], \"dim\": -2},\n {\"img\": np.random.rand(1, 1, 16, 8, 1), \"seg\": np.random.randint(0, 2, size=[1, 1, 16, 8, 1])},\n]\n\nTEST_CASE_6 = [\n {\"keys\": [\"img\", \"seg\"], \"dim\": 0.5},\n {\"img\": np.random.rand(1, 1, 16, 8, 1), \"seg\": np.random.randint(0, 2, size=[1, 1, 16, 8, 1])},\n]\n\n\nclass TestSqueezeDim(unittest.TestCase):\n @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4])\n def test_shape(self, input_param, test_data, expected_shape):\n result = SqueezeDimd(**input_param)(test_data)\n self.assertTupleEqual(result[\"img\"].shape, expected_shape)\n self.assertTupleEqual(result[\"seg\"].shape, expected_shape)\n\n @parameterized.expand([TEST_CASE_5, TEST_CASE_6])\n def test_invalid_inputs(self, input_param, test_data):\n with self.assertRaises(AssertionError):\n result = SqueezeDimd(**input_param)(test_data)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport random\nimport warnings\nfrom typing import Optional, Union, Callable\n\nimport torch\nimport numpy as np\nfrom skimage import measure\n\nfrom monai.utils.misc import ensure_tuple\n\n\ndef rand_choice(prob=0.5):\n \"\"\"Returns True if a randomly chosen number is less than or equal to `prob`, by default this is a 50/50 chance.\"\"\"\n return random.random() <= prob\n\n\ndef img_bounds(img):\n \"\"\"Returns the minimum and maximum indices of non-zero lines in axis 0 of `img`, followed by that for axis 1.\"\"\"\n ax0 = np.any(img, axis=0)\n ax1 = np.any(img, axis=1)\n return np.concatenate((np.where(ax0)[0][[0, -1]], np.where(ax1)[0][[0, -1]]))\n\n\ndef in_bounds(x, y, margin, maxx, maxy):\n \"\"\"Returns True if (x,y) is within the rectangle (margin, margin, maxx-margin, maxy-margin).\"\"\"\n return margin <= x < (maxx - margin) and margin <= y < (maxy - margin)\n\n\ndef is_empty(img):\n \"\"\"Returns True if `img` is empty, that is its maximum value is not greater than its minimum.\"\"\"\n return not (img.max() > img.min()) # use > instead of <= so that an image full of NaNs will result in True\n\n\ndef zero_margins(img, margin):\n \"\"\"Returns True if the values within `margin` indices of the edges of `img` in dimensions 1 and 2 are 0.\"\"\"\n if np.any(img[:, :, :margin]) or np.any(img[:, :, -margin:]):\n return False\n\n if np.any(img[:, :margin, :]) or np.any(img[:, -margin:, :]):\n return False\n\n return True\n\n\ndef rescale_array(arr, minv=0.0, maxv=1.0, dtype: Optional[np.dtype] = np.float32):\n \"\"\"Rescale the values of numpy array `arr` to be from `minv` to `maxv`.\"\"\"\n if dtype is not None:\n arr = arr.astype(dtype)\n\n mina = np.min(arr)\n maxa = np.max(arr)\n\n if mina == maxa:\n return arr * minv\n\n norm = (arr - mina) / (maxa - mina) # normalize the array first\n return (norm * (maxv - minv)) + minv # rescale by minv and maxv, which is the normalized array by default\n\n\ndef rescale_instance_array(arr: np.ndarray, minv: float = 0.0, maxv: float = 1.0, dtype: np.dtype = np.float32):\n \"\"\"Rescale each array slice along the first dimension of `arr` independently.\"\"\"\n out: np.ndarray = np.zeros(arr.shape, dtype)\n for i in range(arr.shape[0]):\n out[i] = rescale_array(arr[i], minv, maxv, dtype)\n\n return out\n\n\ndef rescale_array_int_max(arr: np.ndarray, dtype: np.dtype = np.uint16):\n \"\"\"Rescale the array `arr` to be between the minimum and maximum values of the type `dtype`.\"\"\"\n info: np.iinfo = np.iinfo(dtype)\n return rescale_array(arr, info.min, info.max).astype(dtype)\n\n\ndef copypaste_arrays(src, dest, srccenter, destcenter, dims):\n \"\"\"\n Calculate the slices to copy a sliced area of array `src` into array `dest`. The area has dimensions `dims` (use 0\n or None to copy everything in that dimension), the source area is centered at `srccenter` index in `src` and copied\n into area centered at `destcenter` in `dest`. The dimensions of the copied area will be clipped to fit within the\n source and destination arrays so a smaller area may be copied than expected. Return value is the tuples of slice\n objects indexing the copied area in `src`, and those indexing the copy area in `dest`.\n\n Example\n\n .. code-block:: python\n\n src = np.random.randint(0,10,(6,6))\n dest = np.zeros_like(src)\n srcslices, destslices = copypaste_arrays(src, dest, (3, 2),(2, 1),(3, 4))\n dest[destslices] = src[srcslices]\n print(src)\n print(dest)\n\n >>> [[9 5 6 6 9 6]\n [4 3 5 6 1 2]\n [0 7 3 2 4 1]\n [3 0 0 1 5 1]\n [9 4 7 1 8 2]\n [6 6 5 8 6 7]]\n [[0 0 0 0 0 0]\n [7 3 2 4 0 0]\n [0 0 1 5 0 0]\n [4 7 1 8 0 0]\n [0 0 0 0 0 0]\n [0 0 0 0 0 0]]\n\n \"\"\"\n srcslices = [slice(None)] * src.ndim\n destslices = [slice(None)] * dest.ndim\n\n for i, ss, ds, sc, dc, dim in zip(range(src.ndim), src.shape, dest.shape, srccenter, destcenter, dims):\n if dim:\n # dimension before midpoint, clip to size fitting in both arrays\n d1 = np.clip(dim // 2, 0, min(sc, dc))\n # dimension after midpoint, clip to size fitting in both arrays\n d2 = np.clip(dim // 2 + 1, 0, min(ss - sc, ds - dc))\n\n srcslices[i] = slice(sc - d1, sc + d2)\n destslices[i] = slice(dc - d1, dc + d2)\n\n return tuple(srcslices), tuple(destslices)\n\n\ndef resize_center(img, *resize_dims, fill_value=0):\n \"\"\"\n Resize `img` by cropping or expanding the image from the center. The `resize_dims` values are the output dimensions\n (or None to use original dimension of `img`). If a dimension is smaller than that of `img` then the result will be\n cropped and if larger padded with zeros, in both cases this is done relative to the center of `img`. The result is\n a new image with the specified dimensions and values from `img` copied into its center.\n \"\"\"\n resize_dims = tuple(resize_dims[i] or img.shape[i] for i in range(len(resize_dims)))\n\n dest = np.full(resize_dims, fill_value, img.dtype)\n half_img_shape = np.asarray(img.shape) // 2\n half_dest_shape = np.asarray(dest.shape) // 2\n\n srcslices, destslices = copypaste_arrays(img, dest, half_img_shape, half_dest_shape, resize_dims)\n dest[destslices] = img[srcslices]\n\n return dest\n\n\ndef one_hot(labels, num_classes):\n \"\"\"\n Converts label image `labels` to a one-hot vector with `num_classes` number of channels as last dimension.\n \"\"\"\n labels = labels % num_classes\n y = np.eye(num_classes)\n onehot = y[labels.flatten()]\n\n return onehot.reshape(tuple(labels.shape) + (num_classes,)).astype(labels.dtype)\n\n\ndef generate_pos_neg_label_crop_centers(\n label: np.ndarray,\n size,\n num_samples: int,\n pos_ratio: float,\n image: Optional[np.ndarray] = None,\n image_threshold: Union[int, float] = 0,\n rand_state: np.random.RandomState = np.random,\n):\n \"\"\"Generate valid sample locations based on image with option for specifying foreground ratio\n Valid: samples sitting entirely within image, expected input shape: [C, H, W, D] or [C, H, W]\n\n Args:\n label (numpy.ndarray): use the label data to get the foreground/background information.\n size (list or tuple): size of the ROIs to be sampled.\n num_samples (int): total sample centers to be generated.\n pos_ratio (float): ratio of total locations generated that have center being foreground.\n image (numpy.ndarray): if image is not None, use ``label = 0 & image > image_threshold``\n to select background. so the crop center will only exist on valid image area.\n image_threshold (int or float): if enabled image_key, use ``image > image_threshold`` to\n determine the valid image content area.\n rand_state (random.RandomState): numpy randomState object to align with other modules.\n \"\"\"\n max_size = label.shape[1:]\n assert len(max_size) == len(size), f\"expected size ({len(max_size)}) does not match label dim ({len(size)}).\"\n assert (np.subtract(max_size, size) >= 0).all(), \"proposed roi is larger than image itself.\"\n\n # Select subregion to assure valid roi\n valid_start = np.floor_divide(size, 2)\n valid_end = np.subtract(max_size + np.array(1), size / np.array(2)).astype(np.uint16) # add 1 for random\n # int generation to have full range on upper side, but subtract unfloored size/2 to prevent rounded range\n # from being too high\n for i in range(len(valid_start)): # need this because np.random.randint does not work with same start and end\n if valid_start[i] == valid_end[i]:\n valid_end[i] += 1\n\n # Prepare fg/bg indices\n label_flat = np.any(label, axis=0).ravel() # in case label has multiple dimensions\n fg_indices = np.nonzero(label_flat)[0]\n if image is not None:\n img_flat = np.any(image > image_threshold, axis=0).ravel()\n bg_indices = np.nonzero(np.logical_and(img_flat, ~label_flat))[0]\n else:\n bg_indices = np.nonzero(~label_flat)[0]\n\n if not len(fg_indices) or not len(bg_indices):\n if not len(fg_indices) and not len(bg_indices):\n raise ValueError(\"no sampling location available.\")\n warnings.warn(\n \"N foreground {}, N background {}, unable to generate class balanced samples.\".format(\n len(fg_indices), len(bg_indices)\n )\n )\n pos_ratio = 0 if not len(fg_indices) else 1\n\n centers = []\n for _ in range(num_samples):\n indices_to_use = fg_indices if rand_state.rand() < pos_ratio else bg_indices\n random_int = rand_state.randint(len(indices_to_use))\n center = np.unravel_index(indices_to_use[random_int], label.shape)\n center = center[1:]\n # shift center to range of valid centers\n center_ori = [c for c in center]\n for i, c in enumerate(center):\n center_i = c\n if c < valid_start[i]:\n center_i = valid_start[i]\n if c >= valid_end[i]:\n center_i = valid_end[i] - 1\n center_ori[i] = center_i\n centers.append(center_ori)\n\n return centers\n\n\ndef apply_transform(transform: Callable, data):\n \"\"\"\n Transform `data` with `transform`.\n If `data` is a list or tuple, each item of `data` will be transformed\n and this method returns a list of outcomes.\n otherwise transform will be applied once with `data` as the argument.\n\n Args:\n transform (callable): a callable to be used to transform `data`\n data (object): an object to be transformed.\n \"\"\"\n try:\n if isinstance(data, (list, tuple)):\n return [transform(item) for item in data]\n return transform(data)\n except Exception as e:\n raise Exception(f\"applying transform {transform}.\").with_traceback(e.__traceback__)\n\n\ndef create_grid(spatial_size, spacing=None, homogeneous: bool = True, dtype: np.dtype = float):\n \"\"\"\n compute a `spatial_size` mesh.\n\n Args:\n spatial_size (sequence of ints): spatial size of the grid.\n spacing (sequence of ints): same len as ``spatial_size``, defaults to 1.0 (dense grid).\n homogeneous (bool): whether to make homogeneous coordinates.\n dtype (type): output grid data type.\n \"\"\"\n spacing = spacing or tuple(1.0 for _ in spatial_size)\n ranges = [np.linspace(-(d - 1.0) / 2.0 * s, (d - 1.0) / 2.0 * s, int(d)) for d, s in zip(spatial_size, spacing)]\n coords = np.asarray(np.meshgrid(*ranges, indexing=\"ij\"), dtype=dtype)\n if not homogeneous:\n return coords\n return np.concatenate([coords, np.ones_like(coords[:1])])\n\n\ndef create_control_grid(spatial_shape, spacing, homogeneous: bool = True, dtype: Optional[np.dtype] = float):\n \"\"\"\n control grid with two additional point in each direction\n \"\"\"\n grid_shape = []\n for d, s in zip(spatial_shape, spacing):\n d = int(d)\n if d % 2 == 0:\n grid_shape.append(np.ceil((d - 1.0) / (2.0 * s) + 0.5) * 2.0 + 2.0)\n else:\n grid_shape.append(np.ceil((d - 1.0) / (2.0 * s)) * 2.0 + 3.0)\n return create_grid(grid_shape, spacing, homogeneous, dtype)\n\n\ndef create_rotate(spatial_dims: int, radians):\n \"\"\"\n create a 2D or 3D rotation matrix\n\n Args:\n spatial_dims (2|3): spatial rank\n radians (float or a sequence of floats): rotation radians\n when spatial_dims == 3, the `radians` sequence corresponds to\n rotation in the 1st, 2nd, and 3rd dim respectively.\n \"\"\"\n radians = ensure_tuple(radians)\n if spatial_dims == 2:\n if len(radians) >= 1:\n sin_, cos_ = np.sin(radians[0]), np.cos(radians[0])\n return np.array([[cos_, -sin_, 0.0], [sin_, cos_, 0.0], [0.0, 0.0, 1.0]])\n\n if spatial_dims == 3:\n affine = None\n if len(radians) >= 1:\n sin_, cos_ = np.sin(radians[0]), np.cos(radians[0])\n affine = np.array(\n [[1.0, 0.0, 0.0, 0.0], [0.0, cos_, -sin_, 0.0], [0.0, sin_, cos_, 0.0], [0.0, 0.0, 0.0, 1.0]]\n )\n if len(radians) >= 2:\n sin_, cos_ = np.sin(radians[1]), np.cos(radians[1])\n affine = affine @ np.array(\n [[cos_, 0.0, sin_, 0.0], [0.0, 1.0, 0.0, 0.0], [-sin_, 0.0, cos_, 0.0], [0.0, 0.0, 0.0, 1.0]]\n )\n if len(radians) >= 3:\n sin_, cos_ = np.sin(radians[2]), np.cos(radians[2])\n affine = affine @ np.array(\n [[cos_, -sin_, 0.0, 0.0], [sin_, cos_, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]\n )\n return affine\n\n raise ValueError(f\"create_rotate got spatial_dims={spatial_dims}, radians={radians}.\")\n\n\ndef create_shear(spatial_dims: int, coefs):\n \"\"\"\n create a shearing matrix\n Args:\n spatial_dims (int): spatial rank\n coefs (floats): shearing factors, defaults to 0.\n \"\"\"\n coefs = list(ensure_tuple(coefs))\n if spatial_dims == 2:\n while len(coefs) < 2:\n coefs.append(0.0)\n return np.array([[1, coefs[0], 0.0], [coefs[1], 1.0, 0.0], [0.0, 0.0, 1.0]])\n if spatial_dims == 3:\n while len(coefs) < 6:\n coefs.append(0.0)\n return np.array(\n [\n [1.0, coefs[0], coefs[1], 0.0],\n [coefs[2], 1.0, coefs[3], 0.0],\n [coefs[4], coefs[5], 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ]\n )\n raise NotImplementedError\n\n\ndef create_scale(spatial_dims: int, scaling_factor):\n \"\"\"\n create a scaling matrix\n Args:\n spatial_dims (int): spatial rank\n scaling_factor (floats): scaling factors, defaults to 1.\n \"\"\"\n scaling_factor = list(ensure_tuple(scaling_factor))\n while len(scaling_factor) < spatial_dims:\n scaling_factor.append(1.0)\n return np.diag(scaling_factor[:spatial_dims] + [1.0])\n\n\ndef create_translate(spatial_dims: int, shift):\n \"\"\"\n create a translation matrix\n Args:\n spatial_dims (int): spatial rank\n shift (floats): translate factors, defaults to 0.\n \"\"\"\n shift = ensure_tuple(shift)\n affine = np.eye(spatial_dims + 1)\n for i, a in enumerate(shift[:spatial_dims]):\n affine[i, spatial_dims] = a\n return affine\n\n\ndef generate_spatial_bounding_box(\n img: np.ndarray, select_fn: Callable = lambda x: x > 0, channel_indexes=None, margin: int = 0\n):\n \"\"\"\n generate the spatial bounding box of foreground in the image with start-end positions.\n Users can define arbitrary function to select expected foreground from the whole image or specified channels.\n And it can also add margin to every dim of the bounding box.\n\n Args:\n img (ndarrary): source image to generate bounding box from.\n select_fn (Callable): function to select expected foreground, default is to select values > 0.\n channel_indexes (int, tuple or list): if defined, select foreground only on the specified channels\n of image. if None, select foreground on the whole image.\n margin (int): add margin to all dims of the bounding box.\n \"\"\"\n assert isinstance(margin, int), \"margin must be int type.\"\n data = img[[*(ensure_tuple(channel_indexes))]] if channel_indexes is not None else img\n data = np.any(select_fn(data), axis=0)\n nonzero_idx = np.nonzero(data)\n\n box_start = list()\n box_end = list()\n for i in range(data.ndim):\n assert len(nonzero_idx[i]) > 0, f\"did not find nonzero index at spatial dim {i}\"\n box_start.append(max(0, np.min(nonzero_idx[i]) - margin))\n box_end.append(min(data.shape[i], np.max(nonzero_idx[i]) + margin + 1))\n return box_start, box_end\n\n\ndef get_largest_connected_component_mask(img, connectivity: Optional[int] = None):\n \"\"\"\n Gets the largest connected component mask of an image.\n\n Args:\n img: Image to get largest connected component from. Shape is (batch_size, spatial_dim1 [, spatial_dim2, ...])\n connectivity (int): Maximum number of orthogonal hops to consider a pixel/voxel as a neighbor.\n Accepted values are ranging from 1 to input.ndim. If ``None``, a full\n connectivity of ``input.ndim`` is used.\n \"\"\"\n img_arr = img.detach().cpu().numpy()\n largest_cc = np.zeros(shape=img_arr.shape, dtype=img_arr.dtype)\n for i, item in enumerate(img_arr):\n item = measure.label(item, connectivity=connectivity)\n if item.max() != 0:\n largest_cc[i, ...] = item == (np.argmax(np.bincount(item.flat)[1:]) + 1)\n return torch.as_tensor(largest_cc, device=img.device)\n" ]
[ [ "numpy.random.rand", "numpy.random.randint" ], [ "numpy.diag", "numpy.asarray", "numpy.max", "numpy.any", "numpy.iinfo", "numpy.where", "numpy.ones_like", "numpy.eye", "numpy.subtract", "numpy.full", "numpy.sin", "numpy.ceil", "numpy.unravel_index", "numpy.zeros", "numpy.nonzero", "numpy.min", "numpy.floor_divide", "numpy.logical_and", "numpy.array", "numpy.meshgrid", "torch.as_tensor", "numpy.cos", "numpy.bincount" ] ]
massimo-nocentini/cagd
[ "baec0824951ebc17e23e16e71339dd8fd79b11c2" ]
[ "surfaces/trianglesDrawing.py" ]
[ "\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport trianglesCore as tc\n\ndef draw(*surfaces, figure_size_tuple=(15,15)):\n\n sizex, sizey = figure_size_tuple\n matplotlib.rcParams['figure.figsize'] = [sizex, sizey]\n\n # necessary adjustment if `draw` is used for only one patch\n if len(surfaces) is 2 and not isinstance(surfaces[0], tuple):\n surface, triangles = surfaces\n surfaces = [(surface, triangles)]\n\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1, projection='3d')\n\n for surface, triangles in surfaces:\n x, y, z = surface[0,:],surface[1,:],surface[2,:]\n ax.plot_trisurf(x, y, z,\n triangles=triangles, cmap=plt.cm.Spectral)#, edgecolor='none')\n\n return fig, ax\n\n\ndef draw_repeated_degree_elevation(\n control_net, snapshots=None, degrees=None, formatting_string=\"Order {}:\"):\n\n order, control_net = control_net\n\n if snapshots:\n def drawer(print_handler):\n nonlocal order, control_net\n runs = 2\n snapshots_list = [int(np.ceil(l)) for l in np.logspace(0,runs,num=snapshots)]\n s = 0\n for i in range(1, (10**runs)+1):\n order, control_net = tc.degree_elevation(order, control_net)\n if i == snapshots_list[s]:\n print_handler(order)\n s += 1\n elif degrees:\n def drawer(print_handler):\n nonlocal order, control_net, degrees\n degrees = sorted(degrees)\n\n for d, degree in enumerate(degrees):\n if degree > order-1: break\n\n for i in range(max(degrees)+1):\n order, control_net = tc.degree_elevation(order, control_net)\n if order-1 is degrees[d]:\n print_handler(order)\n d += 1\n if d == len(degrees): break\n\n\n def print_handler(order):\n\n if formatting_string is not False: print(formatting_string.format(order))\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n xs = control_net[0,:]\n ys = control_net[1,:]\n zs = control_net[2,:]\n ax.scatter(xs, ys, zs, c='r', marker='o')\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n\n plt.show()\n\n drawer(print_handler) # finally draw some pictures\n\n return order, control_net\n" ]
[ [ "numpy.logspace", "numpy.ceil", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
mingewang/pytorch_deep_learning_by_example
[ "83c9e12364a359b9ef77f0645ca7815e9e817f58" ]
[ "basic/basic_example1.py" ]
[ "# -*- coding: utf-8 -*-\nimport torch\n\n# N is batch size; D_in is input dimension;\n# H is hidden dimension; D_out is output dimension.\nN, D_in, H, D_out = 64, 1000, 100, 10\n\n# Create random Tensors to hold inputs and outputs\nx = torch.randn(N, D_in)\ny = torch.randn(N, D_out)\n\n# Use the nn package to define our model and loss function.\nmodel = torch.nn.Sequential(\n torch.nn.Linear(D_in, H),\n torch.nn.ReLU(),\n torch.nn.Linear(H, D_out),\n)\nloss_fn = torch.nn.MSELoss(reduction='sum')\n\n# Use the optim package to define an Optimizer that will update the weights of\n# the model for us. Here we will use Adam; the optim package contains many other\n# optimization algoriths. The first argument to the Adam constructor tells the\n# optimizer which Tensors it should update.\nlearning_rate = 1e-4\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\nepochs = 5 \n\nfor t in range(500):\n # Forward pass: compute predicted y by passing x to the model.\n y_pred = model(x)\n\n # Compute and print loss.\n loss = loss_fn(y_pred, y)\n print(t, loss.item())\n\n # Before the backward pass, use the optimizer object to zero all of the\n # gradients for the variables it will update (which are the learnable\n # weights of the model). This is because by default, gradients are\n # accumulated in buffers( i.e, not overwritten) whenever .backward()\n # is called. Checkout docs of torch.autograd.backward for more details.\n optimizer.zero_grad()\n\n # Backward pass: compute gradient of the loss with respect to model\n # parameters\n loss.backward()\n\n # Calling the step function on an Optimizer makes an update to its\n # parameters\n optimizer.step()\n" ]
[ [ "torch.nn.Linear", "torch.randn", "torch.nn.ReLU", "torch.nn.MSELoss" ] ]
lambertsbennett/scikit-multiflow
[ "bc714fd5ee4f0a486adc00ec6ae39eafa64f81cc", "bc714fd5ee4f0a486adc00ec6ae39eafa64f81cc" ]
[ "src/skmultiflow/trees/stacked_single_target_hoeffding_tree_regressor.py", "src/skmultiflow/drift_detection/kswin.py" ]
[ "from operator import attrgetter\n\nimport numpy as np\n\nfrom skmultiflow.core import MultiOutputMixin\nfrom skmultiflow.trees import iSOUPTreeRegressor\nfrom skmultiflow.utils import get_dimensions\nfrom skmultiflow.trees.split_criterion import IntraClusterVarianceReductionSplitCriterion\n\nfrom skmultiflow.trees.nodes import LearningNode\nfrom skmultiflow.trees.nodes import SSTActiveLearningNode\nfrom skmultiflow.trees.nodes import SSTActiveLearningNodeAdaptive\nfrom skmultiflow.trees.nodes import SSTInactiveLearningNode\nfrom skmultiflow.trees.nodes import SSTInactiveLearningNodeAdaptive\n\n\nclass StackedSingleTargetHoeffdingTreeRegressor(iSOUPTreeRegressor, MultiOutputMixin):\n \"\"\"Stacked Single-target Hoeffding Tree regressor.\n\n Implementation of the Stacked Single-target Hoeffding Tree (SST-HT) method\n for multi-target regression as proposed by S. M. Mastelini, S. Barbon Jr.,\n and A. C. P. L. F. de Carvalho [1]_.\n\n Parameters\n ----------\n max_byte_size: int (default=33554432)\n Maximum memory consumed by the tree.\n memory_estimate_period: int (default=1000000)\n Number of instances between memory consumption checks.\n grace_period: int (default=200)\n Number of instances a leaf should observe between split attempts.\n split_confidence: float (default=0.0000001)\n Allowed error in split decision, a value closer to 0 takes longer to\n decide.\n tie_threshold: float (default=0.05)\n Threshold below which a split will be forced to break ties.\n binary_split: boolean (default=False)\n If True, only allow binary splits.\n stop_mem_management: boolean (default=False)\n If True, stop growing as soon as memory limit is hit.\n remove_poor_atts: boolean (default=False)\n If True, disable poor attributes.\n no_preprune: boolean (default=False)\n If True, disable pre-pruning.\n leaf_prediction: string (default='perceptron')\n | Prediction mechanism used at leafs.\n | 'perceptron' - Stacked perceptron\n | 'adaptive' - Adaptively chooses between the best predictor (mean,\n perceptron or stacked perceptron)\n nb_threshold: int (default=0)\n Number of instances a leaf should observe before allowing Naive Bayes.\n nominal_attributes: list, optional\n List of Nominal attributes. If emtpy, then assume that all attributes\n are numerical.\n learning_ratio_perceptron: float\n The learning rate of the perceptron.\n learning_ratio_decay: float\n Decay multiplier for the learning rate of the perceptron\n learning_ratio_const: Bool\n If False the learning ratio will decay with the number of examples seen\n random_state: int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`. Used when leaf_prediction is 'perceptron'.\n\n References\n ----------\n .. [1] Mastelini, S. M., Barbon Jr, S., de Carvalho, A. C. P. L. F. (2019).\n \"Online Multi-target regression trees with stacked leaf models\". arXiv\n preprint arXiv:1903.12483.\n\n Examples\n --------\n >>> # Imports\n >>> from skmultiflow.data import RegressionGenerator\n >>> from skmultiflow.trees import StackedSingleTargetHoeffdingTreeRegressor\n >>> import numpy as np\n >>>\n >>> # Setup a data stream\n >>> n_targets = 3\n >>> stream = RegressionGenerator(n_targets=n_targets, random_state=1, n_samples=200)\n >>>\n >>> # Setup the Stacked Single-target Hoeffding Tree Regressor\n >>> sst_ht = StackedSingleTargetHoeffdingTreeRegressor()\n >>>\n >>> # Auxiliary variables to control loop and track performance\n >>> n_samples = 0\n >>> max_samples = 200\n >>> y_pred = np.zeros((max_samples, n_targets))\n >>> y_true = np.zeros((max_samples, n_targets))\n >>>\n >>> # Run test-then-train loop for max_samples and while there is data\n >>> while n_samples < max_samples and stream.has_more_samples():\n >>> X, y = stream.next_sample()\n >>> y_true[n_samples] = y[0]\n >>> y_pred[n_samples] = sst_ht.predict(X)[0]\n >>> sst_ht.partial_fit(X, y)\n >>> n_samples += 1\n >>>\n >>> # Display results\n >>> print('Stacked Single-target Hoeffding Tree regressor example')\n >>> print('{} samples analyzed.'.format(n_samples))\n >>> print('Mean absolute error: {}'.format(np.mean(np.abs(y_true - y_pred))))\n \"\"\"\n\n # =====================================================================\n # == Stacked Single-target Hoeffding Regression Tree implementation ===\n # =====================================================================\n\n def __init__(self,\n max_byte_size=33554432,\n memory_estimate_period=1000000,\n grace_period=200,\n split_confidence=0.0000001,\n tie_threshold=0.05,\n binary_split=False,\n stop_mem_management=False,\n remove_poor_atts=False,\n leaf_prediction='perceptron',\n no_preprune=False,\n nb_threshold=0,\n nominal_attributes=None,\n learning_ratio_perceptron=0.02,\n learning_ratio_decay=0.001,\n learning_ratio_const=True,\n random_state=None):\n super().__init__(max_byte_size=max_byte_size,\n memory_estimate_period=memory_estimate_period,\n grace_period=grace_period,\n split_confidence=split_confidence,\n tie_threshold=tie_threshold,\n binary_split=binary_split,\n stop_mem_management=stop_mem_management,\n remove_poor_atts=remove_poor_atts,\n no_preprune=no_preprune,\n leaf_prediction=leaf_prediction,\n nb_threshold=nb_threshold,\n nominal_attributes=nominal_attributes)\n self.split_criterion = 'icvr' # intra cluster variance reduction\n self.learning_ratio_perceptron = learning_ratio_perceptron\n self.learning_ratio_decay = learning_ratio_decay\n self.learning_ratio_const = learning_ratio_const\n self.random_state = random_state\n\n self._tree_root = None\n self._decision_node_cnt = 0\n self._active_leaf_node_cnt = 0\n self._inactive_leaf_node_cnt = 0\n self._inactive_leaf_byte_size_estimate = 0.0\n self._active_leaf_byte_size_estimate = 0.0\n self._byte_size_estimate_overhead_fraction = 1.0\n self._growth_allowed = True\n self._train_weight_seen_by_model = 0.0\n\n self.examples_seen = 0\n self.sum_of_values = 0.0\n self.sum_of_squares = 0.0\n self.sum_of_attribute_values = 0.0\n self.sum_of_attribute_squares = 0.0\n\n # To add the n_targets property once\n self._n_targets_set = False\n\n @property\n def leaf_prediction(self):\n return self._leaf_prediction\n\n @leaf_prediction.setter\n def leaf_prediction(self, leaf_prediction):\n if leaf_prediction not in {self._PERCEPTRON, self._ADAPTIVE}:\n print(\"Invalid leaf_prediction option {}', will use default '{}'\".\n format(leaf_prediction, self._PERCEPTRON))\n self._leaf_prediction = self._PERCEPTRON\n else:\n self._leaf_prediction = leaf_prediction\n\n def _get_predictors_faded_error(self, X):\n \"\"\"Get the faded error of the leaf corresponding to the pased instance.\n\n Parameters\n ----------\n X: numpy.ndarray of length equal to the number of features.\n Instance attributes.\n\n Returns\n -------\n dict (predictor, fmae)\n \"\"\"\n fmaes = {}\n if self._tree_root is not None:\n found_node = self._tree_root.filter_instance_to_leaf(X, None, -1)\n leaf_node = found_node.node\n if leaf_node is None:\n leaf_node = found_node.parent\n if isinstance(leaf_node, LearningNode):\n fmaes['mean'] = leaf_node.fMAE_M\n fmaes['perceptron'] = leaf_node.fMAE_P\n fmaes['stacked_perceptron'] = leaf_node.fMAE_SP\n else:\n # If the found node is not a learning node, give preference to\n # the mean predictor\n fmaes['mean'] = np.zeros(self._n_targets)\n fmaes['perceptron'] = np.full(self._n_targets, np.Inf)\n fmaes['stacked_perceptron'] = np.full(self._n_targets, np.Inf)\n\n return fmaes\n\n def _new_learning_node(self, initial_class_observations=None, parent_node=None,\n is_active_node=True):\n \"\"\"Create a new learning node. The type of learning node depends on\n the tree configuration.\n \"\"\"\n if initial_class_observations is None:\n initial_class_observations = {}\n\n if is_active_node:\n if self.leaf_prediction == self._PERCEPTRON:\n return SSTActiveLearningNode(\n initial_class_observations,\n parent_node,\n random_state=self.random_state\n )\n elif self.leaf_prediction == self._ADAPTIVE:\n new_node = SSTActiveLearningNodeAdaptive(\n initial_class_observations,\n parent_node,\n random_state=self.random_state\n )\n # Resets faded errors\n new_node.fMAE_M = np.zeros(self._n_targets, dtype=np.float64)\n new_node.fMAE_P = np.zeros(self._n_targets, dtype=np.float64)\n new_node.fMAE_SP = np.zeros(self._n_targets, dtype=np.float64)\n return new_node\n else:\n if self.leaf_prediction == self._PERCEPTRON:\n return SSTInactiveLearningNode(\n initial_class_observations,\n parent_node,\n random_state=parent_node.random_state\n )\n elif self.leaf_prediction == self._ADAPTIVE:\n new_node = SSTInactiveLearningNodeAdaptive(\n initial_class_observations,\n parent_node,\n random_state=parent_node.random_state\n )\n new_node.fMAE_M = parent_node.fMAE_M\n new_node.fMAE_P = parent_node.fMAE_P\n new_node.fMAE_SP = parent_node.fMAE_SP\n return new_node\n\n def predict(self, X):\n \"\"\"Predicts the target value using mean class or the perceptron.\n\n Parameters\n ----------\n X: numpy.ndarray of shape (n_samples, n_features)\n Samples for which we want to predict the labels.\n\n Returns\n -------\n list\n Predicted target values.\n \"\"\"\n r, _ = get_dimensions(X)\n\n try:\n predictions = np.zeros((r, self._n_targets), dtype=np.float64)\n except AttributeError:\n return [0.0]\n for i in range(r):\n if self.leaf_prediction == self._PERCEPTRON:\n if self.examples_seen > 1:\n perceptron_weights = self.get_weights_for_instance(X[i])\n if perceptron_weights is None:\n # Instance was sorted to a non-learning node: use\n # mean prediction\n votes = self.get_votes_for_instance(X[i]).copy()\n number_of_examples_seen = votes[0]\n sum_of_values = votes[1]\n predictions[i] = sum_of_values / number_of_examples_seen\n continue\n\n normalized_sample = self.normalize_sample(X[i])\n normalized_base_prediction = np.matmul(\n perceptron_weights[0], normalized_sample\n )\n normalized_meta_prediction = np.matmul(\n perceptron_weights[1],\n np.append(normalized_base_prediction, 1.0)\n )\n mean = self.sum_of_values / self.examples_seen\n variance = (self.sum_of_squares -\n (self.sum_of_values *\n self.sum_of_values) /\n self.examples_seen) / (self.examples_seen - 1)\n sd = np.sqrt(variance, out=np.zeros_like(variance),\n where=variance >= 0.0)\n # Samples are normalized using just one sd, as proposed in\n # the iSoup-Tree method\n predictions[i] = normalized_meta_prediction * sd + mean\n elif self.leaf_prediction == self._ADAPTIVE:\n if self.examples_seen > 1:\n # Mean predictor\n votes = self.get_votes_for_instance(X[i]).copy()\n number_of_examples_seen = votes[0]\n sum_of_values = votes[1]\n pred_M = sum_of_values / number_of_examples_seen\n\n # Perceptron variants\n perceptron_weights = self.get_weights_for_instance(X[i])\n if perceptron_weights is None:\n # Instance was sorted to a non-learning node: use\n # mean prediction\n predictions[i] = pred_M\n continue\n else:\n normalized_sample = self.normalize_sample(X[i])\n\n # Standard perceptron\n normalized_base_prediction = np.matmul(\n perceptron_weights[0], normalized_sample\n )\n # Stacked perceptron\n normalized_meta_prediction = np.matmul(\n perceptron_weights[1],\n np.append(normalized_base_prediction, 1.0)\n )\n\n mean = self.sum_of_values / self.examples_seen\n variance = (self.sum_of_squares -\n (self.sum_of_values *\n self.sum_of_values) /\n self.examples_seen) / (self.examples_seen - 1)\n sd = np.sqrt(variance, out=np.zeros_like(variance),\n where=variance >= 0.0)\n\n pred_P = normalized_base_prediction * sd + mean\n pred_SP = normalized_meta_prediction * sd + mean\n\n # Gets faded errors for the related leaf predictors\n fmae = self._get_predictors_faded_error(X[i])\n\n # Selects, for each target, the best current performer\n for j in range(self._n_targets):\n b_pred = np.argmin([fmae['mean'][j],\n fmae['perceptron'][j],\n fmae['stacked_perceptron'][j]]\n )\n\n if b_pred == 0:\n # If all the expected errors are the same,\n # use the standard perceptron\n if fmae['mean'][j] == fmae['perceptron'][j] \\\n == fmae['stacked_perceptron'][j]:\n predictions[i, j] = pred_P[j]\n # Otherwise, use the simplest approach\n else:\n predictions[i, j] = pred_M[j]\n else:\n if b_pred == 1:\n # Use the stacked perceptron if its expected\n # error is the same than the error for the\n # standard perceptron\n if fmae['perceptron'][j] == \\\n fmae['stacked_perceptron'][j]:\n predictions[i, j] = pred_SP[j]\n else:\n predictions[i, j] = pred_P[j]\n else:\n predictions[i, j] = pred_SP[j]\n\n return predictions\n", "import numpy as np\nfrom scipy import stats\nfrom skmultiflow.drift_detection.base_drift_detector import BaseDriftDetector\n\nclass KSWIN(BaseDriftDetector):\n r\"\"\" Kolmogorov-Smirnov Windowing method for concept drift detection.\n\n Parameters\n ----------\n alpha: float (default=0.005)\n Probability for the test statistic of the Kolmogorov-Smirnov-Test\n The alpha parameter is very sensitive, therefore should be set\n below 0.01.\n\n window_size: float (default=100)\n Size of the sliding window\n\n stat_size: float (default=30)\n Size of the statistic window\n\n data: numpy.ndarray of shape (n_samples, 1) (default=None,optional)\n Already collected data to avoid cold start.\n\n Notes\n -----\n KSWIN (Kolmogorov-Smirnov Windowing) [1]_ is a concept change detection method based\n on the Kolmogorov-Smirnov (KS) statistical test. KS-test is a statistical test with\n no assumption of underlying data distribution. KSWIN can monitor data or performance\n distributions. Note that the detector accepts one dimensional input as array.\n\n KSWIN maintains a sliding window :math:`\\Psi` of fixed size :math:`n` (window_size). The\n last :math:`r` (stat_size) samples of :math:`\\Psi` are assumed to represent the last\n concept considered as :math:`R`. From the first :math:`n-r` samples of :math:`\\Psi`,\n :math:`r` samples are uniformly drawn, representing an approximated last concept :math:`W`.\n\n The KS-test is performed on the windows :math:`R` and :math:`W` of the same size. KS\n -test compares the distance of the empirical cumulative data distribution :math:`dist(R,W)`.\n\n A concept drift is detected by KSWIN if:\n\n * :math:`dist(R,W) > \\sqrt{-\\frac{ln\\alpha}{r}}`\n\n -> The difference in empirical data distributions between the windows :math:`R` and :math:`W`\n is too large as that R and W come from the same distribution.\n\n References\n ----------\n .. [1] Christoph Raab, Moritz Heusinger, Frank-Michael Schleif, Reactive\n Soft Prototype Computing for Concept Drift Streams, Neurocomputing, 2020,\n\n Examples\n --------\n >>> # Imports\n >>> import numpy as np\n >>> from skmultiflow.data.sea_generator import SEAGenerator\n >>> from skmultiflow.drift_detection import KSWIN\n >>> import numpy as np\n >>> # Initialize KSWIN and a data stream\n >>> kswin = KSWIN(alpha=0.01)\n >>> stream = SEAGenerator(classification_function = 2,\n >>> random_state = 112, balance_classes = False,noise_percentage = 0.28)\n >>> # Store detections\n >>> detections = []\n >>> # Process stream via KSWIN and print detections\n >>> for i in range(1000):\n >>> data = stream.next_sample(10)\n >>> batch = data[0][0][0]\n >>> kswin.add_element(batch)\n >>> if kswin.detected_change():\n >>> print(\"\\rIteration {}\".format(i))\n >>> print(\"\\r KSWINReject Null Hyptheses\")\n >>> detections.append(i)\n >>> print(\"Number of detections: \"+str(len(detections)))\n \"\"\"\n def __init__(self, alpha=0.005, window_size=100, stat_size=30, data=None):\n super().__init__()\n self.window_size = window_size\n self.stat_size = stat_size\n self.alpha = alpha\n self.change_detected = False\n self.p_value = 0\n self.n = 0\n if self.alpha < 0 or self.alpha > 1:\n raise ValueError(\"Alpha must be between 0 and 1\")\n\n if self.window_size < 0:\n raise ValueError(\"window_size must be greater than 0\")\n\n if self.window_size < self.stat_size:\n raise ValueError(\"stat_size must be smaller than window_size\")\n\n if type(data) != np.ndarray or type(data) is None:\n self.window = np.array([])\n else:\n self.window = data\n\n def add_element(self, input_value):\n \"\"\" Add element to sliding window\n\n Adds an element on top of the sliding window and removes\n the oldest one from the window. Afterwards, the KS-test\n is performed.\n\n Parameters\n ----------\n input_value: ndarray\n New data sample the sliding window should add.\n \"\"\"\n self.n += 1\n currentLength = self.window.shape[0]\n if currentLength >= self.window_size:\n self.window = np.delete(self.window,0)\n rnd_window = np.random.choice(self.window[:-self.stat_size], self.stat_size)\n\n (st, self.p_value) = stats.ks_2samp(rnd_window, self.window[-self.stat_size:],mode=\"exact\")\n\n if self.p_value <= self.alpha and st > 0.1:\n self.change_detected = True\n self.window = self.window[-self.stat_size:]\n else:\n self.change_detected = False\n else: # Not enough samples in sliding window for a valid test\n self.change_detected = False\n\n self.window = np.concatenate([self.window,[input_value]])\n\n def detected_change(self):\n \"\"\" Get detected change\n\n Returns\n -------\n bool\n Whether or not a drift occurred\n\n \"\"\"\n return self.change_detected\n\n def reset(self):\n \"\"\" reset\n\n Resets the change detector parameters.\n \"\"\"\n self.p_value = 0\n self.window = np.array([])\n self.change_detected = False\n" ]
[ [ "numpy.matmul", "numpy.full", "numpy.append", "numpy.zeros_like", "numpy.argmin", "numpy.zeros" ], [ "scipy.stats.ks_2samp", "numpy.random.choice", "numpy.concatenate", "numpy.delete", "numpy.array" ] ]
mrocklin/pygdf
[ "2de9407427da9497ebdf8951a12857be0fab31bb", "2de9407427da9497ebdf8951a12857be0fab31bb" ]
[ "pygdf/buffer.py", "pygdf/tests/test_multi.py" ]
[ "\nimport numpy as np\nfrom numba import cuda\n\nfrom . import cudautils, utils\nfrom .serialize import register_distributed_serializer\n\n\nclass Buffer(object):\n \"\"\"A 1D gpu buffer.\n \"\"\"\n _cached_ipch = None\n\n @classmethod\n def from_empty(cls, mem):\n \"\"\"From empty device array\n \"\"\"\n return cls(mem, size=0, capacity=mem.size)\n\n @classmethod\n def null(cls, dtype):\n \"\"\"Create a \"null\" buffer with a zero-sized device array.\n \"\"\"\n mem = cuda.device_array(0, dtype=dtype)\n return cls(mem, size=0, capacity=0)\n\n def __init__(self, mem, size=None, capacity=None, categorical=False):\n if size is None:\n if categorical:\n size = len(mem)\n else:\n size = mem.size\n if capacity is None:\n capacity = size\n self.mem = cudautils.to_device(mem)\n _BufferSentry(self.mem).ndim(1)\n self.size = size\n self.capacity = capacity\n self.dtype = self.mem.dtype\n\n def serialize(self, serialize, context=None):\n \"\"\"Called when dask.distributed is performing a serialization on this\n object.\n\n Do not use this directly. It is invoked by dask.distributed.\n\n Parameters\n ----------\n\n serialize : callable\n Used to serialize data that needs serialization .\n context : dict; optional\n If not ``None``, it contains information about the destination.\n\n Returns\n -------\n (header, frames)\n See custom serialization documentation in dask.distributed.\n \"\"\"\n from .serialize import should_use_ipc\n\n # Use destination info to determine if we should do IPC.\n use_ipc = should_use_ipc(context)\n header = {}\n # Should use IPC transfer\n if use_ipc:\n # Reuse IPC handle from previous call?\n if self._cached_ipch is not None:\n ipch = self._cached_ipch\n else:\n # Get new IPC handle\n ipch = self.to_gpu_array().get_ipc_handle()\n header['kind'] = 'ipc'\n header['mem'], frames = serialize(ipch)\n # Keep IPC handle alive\n self._cached_ipch = ipch\n # Not using IPC transfer\n else:\n header['kind'] = 'normal'\n # Serialize the buffer as a numpy array\n header['mem'], frames = serialize(self.to_array())\n return header, frames\n\n @classmethod\n def deserialize(cls, deserialize, header, frames):\n \"\"\"Called when dask.distributed is performing a deserialization for\n data of this class.\n\n Do not use this directly. It is invoked by dask.distributed.\n\n Parameters\n ----------\n\n deserialize : callable\n Used to deserialize data that needs further deserialization .\n header, frames : dict\n See custom serialization documentation in dask.distributed.\n\n Returns\n -------\n obj : Buffer\n Returns an instance of Buffer.\n \"\"\"\n # Using IPC?\n if header['kind'] == 'ipc':\n ipch = deserialize(header['mem'], frames)\n # Open IPC handle\n with ipch as data:\n # Copy remote data over\n mem = cuda.device_array_like(data)\n mem.copy_to_device(data)\n # Not using IPC\n else:\n # Deserialize the numpy array\n mem = deserialize(header['mem'], frames)\n mem.flags['WRITEABLE'] = True # XXX: hack for numba to work\n return Buffer(mem)\n\n def __reduce__(self):\n cpumem = self.to_array()\n # Note: pickled Buffer only stores *size* element.\n return type(self), (cpumem,)\n\n def __sizeof__(self):\n return int(self.mem.alloc_size)\n\n def __getitem__(self, arg):\n if isinstance(arg, slice):\n sliced = self.to_gpu_array()[arg]\n buf = Buffer(sliced)\n buf.dtype = self.dtype # for np.datetime64 support\n return buf\n elif isinstance(arg, int):\n arg = utils.normalize_index(arg, self.size)\n # the dtype argument is necessary for datetime64 support\n # because currently we can't pass datetime64 types into\n # cuda dev arrays, so the type of the cuda dev array is\n # an i64, and we view it as the dtype on the buffer\n return self.mem[arg].view(self.dtype)\n else:\n raise NotImplementedError(type(arg))\n\n @property\n def avail_space(self):\n return self.capacity - self.size\n\n def _sentry_capacity(self, size_needed):\n if size_needed > self.avail_space:\n raise MemoryError('insufficient space in buffer')\n\n def append(self, element):\n self._sentry_capacity(1)\n self.extend(np.asarray(element, dtype=self.dtype))\n\n def extend(self, array):\n needed = array.size\n self._sentry_capacity(needed)\n array = cudautils.astype(array, dtype=self.dtype)\n self.mem[self.size:].copy_to_device(array)\n self.size += needed\n\n def astype(self, dtype):\n if self.dtype == dtype:\n return self\n else:\n return Buffer(cudautils.astype(self.mem, dtype=dtype))\n\n def to_array(self):\n return self.to_gpu_array().copy_to_host()\n\n def to_gpu_array(self):\n return self.mem[:self.size]\n\n def copy(self):\n \"\"\"Deep copy the buffer\n \"\"\"\n return Buffer(mem=cudautils.copy_array(self.mem),\n size=self.size, capacity=self.capacity)\n\n def as_contiguous(self):\n out = Buffer(mem=cudautils.as_contiguous(self.mem),\n size=self.size, capacity=self.capacity)\n assert out.is_contiguous()\n return out\n\n def is_contiguous(self):\n return self.mem.is_c_contiguous()\n\n\nclass BufferSentryError(ValueError):\n pass\n\n\nclass _BufferSentry(object):\n def __init__(self, buf):\n self._buf = buf\n\n def dtype(self, dtype):\n if self._buf.dtype != dtype:\n raise BufferSentryError('dtype mismatch')\n return self\n\n def ndim(self, ndim):\n if self._buf.ndim != ndim:\n raise BufferSentryError('ndim mismatch')\n return self\n\n def contig(self):\n if not self._buf.is_c_contiguous():\n raise BufferSentryError('non contiguous')\n\n\nregister_distributed_serializer(Buffer)\n", "# Copyright (c) 2018, NVIDIA CORPORATION.\n\nimport pytest\n\nimport pandas as pd\nimport pygdf as gd\n\n\ndef make_frames(index=None):\n df = pd.DataFrame({'x': range(10),\n 'y': list(map(float, range(10))),\n 'z': list('abcde')*2})\n df.z = df.z.astype('category')\n df2 = pd.DataFrame({'x': range(10, 20),\n 'y': list(map(float, range(10, 20))),\n 'z': list('edcba')*2})\n df2.z = df2.z.astype('category')\n gdf = gd.DataFrame.from_pandas(df)\n gdf2 = gd.DataFrame.from_pandas(df2)\n if index:\n df = df.set_index(index)\n df2 = df2.set_index(index)\n gdf = gdf.set_index(index)\n gdf2 = gdf2.set_index(index)\n return df, df2, gdf, gdf2\n\n\[email protected]('index', [False, 'z', 'y'])\ndef test_concat(index):\n df, df2, gdf, gdf2 = make_frames(index)\n # Make empty frame\n gdf_empty1 = gdf2[:0]\n assert len(gdf_empty1) == 0\n df_empty1 = gdf_empty1.to_pandas()\n # DataFrame\n res = gd.concat([gdf, gdf2, gdf, gdf_empty1]).to_pandas()\n sol = pd.concat([df, df2, df, df_empty1])\n pd.util.testing.assert_frame_equal(res, sol, check_names=False)\n\n # Series\n for c in [i for i in ('x', 'y', 'z') if i != index]:\n res = gd.concat([gdf[c], gdf2[c], gdf[c]]).to_pandas()\n sol = pd.concat([df[c], df2[c], df[c]])\n pd.util.testing.assert_series_equal(res, sol, check_names=False)\n\n # Index\n res = gd.concat([gdf.index, gdf2.index]).to_pandas()\n sol = df.index.append(df2.index)\n pd.util.testing.assert_index_equal(res, sol, check_names=False)\n\n\ndef test_concat_errors():\n df, df2, gdf, gdf2 = make_frames()\n\n # No objs\n with pytest.raises(ValueError):\n gd.concat([])\n\n # Mismatched types\n with pytest.raises(ValueError):\n gd.concat([gdf, gdf.x])\n\n # Unknown type\n with pytest.raises(ValueError):\n gd.concat(['bar', 'foo'])\n\n # Mismatched column dtypes\n with pytest.raises(ValueError):\n gd.concat([gdf.x, gdf.y])\n with pytest.raises(ValueError):\n gd.concat([gdf.x, gdf.z])\n\n # Mismatched index dtypes\n gdf3 = gdf2.set_index('z')\n gdf2.drop_column('z')\n with pytest.raises(ValueError):\n gd.concat([gdf2, gdf3])\n\n # Mismatched columns\n with pytest.raises(ValueError):\n gd.concat([gdf, gdf2])\n\n\ndef test_concat_misordered_columns():\n df, df2, gdf, gdf2 = make_frames(False)\n gdf2 = gdf2[['z', 'x', 'y']]\n df2 = df2[['z', 'x', 'y']]\n\n res = gd.concat([gdf, gdf2]).to_pandas()\n sol = pd.concat([df, df2])\n\n pd.util.testing.assert_frame_equal(res, sol, check_names=False)\n" ]
[ [ "numpy.asarray" ], [ "pandas.util.testing.assert_frame_equal", "pandas.concat", "pandas.util.testing.assert_index_equal", "pandas.util.testing.assert_series_equal" ] ]
GuanlinLee/FPD-for-Adversarial-Robustness
[ "76b06cb8a68469f8ed4ed6bb5479ee86719175fb", "76b06cb8a68469f8ed4ed6bb5479ee86719175fb" ]
[ "ResNet-50/SVHN/train.py", "ResNet-101/CALTECH-101/whitebox_and_black.py" ]
[ "import torchvision\nimport torchvision.transforms as transforms\nimport torch\nimport torch.utils.data\nimport resnet\nfrom torch.autograd import Variable\nfrom torch import nn\n\nimport early_stop\nfrom tqdm import tqdm\n\nimport os,sys\nimport numpy as np\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\ntrain_globa_step=0\nval_globa_step=0\n\nwd=1e-50\nlearning_rate=1e-4\nepochs=100\nbatch_size=300\ntorch.backends.cudnn.benchmark = True\ntransform=transforms.Compose([\n torchvision.transforms.Resize((64,64)),\n torchvision.transforms.ToTensor(),\n \n ])\n\ntrainset = torchvision.datasets.SVHN(root='./data',split='train', download=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,\n shuffle=True, num_workers=16)\n\n\ntransform_test=transforms.Compose([torchvision.transforms.Resize((64,64)),\n transforms.ToTensor(),\n ])\n\ntestset = torchvision.datasets.SVHN(root='./data', split='test', download=True, transform=transform_test)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,\n shuffle=False, num_workers=16)\n\n\nn = resnet.resnet101().cuda()\n\nweight_p, bias_p = [],[]\nfor name, p in n.named_parameters():\n if 'bias' in name:\n bias_p += [p]\n else:\n weight_p += [p]\n\ntrans_params = list(map(id, n.trans_conv.parameters()))\nclass_params = list(map(id, n.group2.parameters()))\n\nbase_params = filter(lambda p: id(p) not in trans_params,\n n.parameters())\nbase_params = filter(lambda p: id(p) not in class_params,\n base_params)\n\n\n\nloss1 =nn.MSELoss()\nloss1.cuda()\nloss2=nn.CrossEntropyLoss()\nloss2.cuda()\noptimizer = torch.optim.Adam([{'params': base_params},\n {'params':n.trans_conv.parameters(),'lr':learning_rate},\n {'params':n.group2.parameters(),'lr':learning_rate}],\n lr=learning_rate,weight_decay=wd)\n\nopt = torch.optim.Adam([{'params': base_params},\n {'params':n.trans_conv.parameters(),'lr':learning_rate}],\n lr=learning_rate,weight_decay=wd)\n\nif os.path.exists('bestmodel_params.pkl'):\n checkpoint = torch.load('bestmodel_params.pkl')\n n.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['opt_state_dict'])\n opt.load_state_dict(checkpoint['opt_state_dict2'])\n\nsch=torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,factor=0.1,patience=10)\n\nes=early_stop.EarlyStopping('max',patience=20)\nfor epoch in range(epochs):\n loadertrain = tqdm(trainloader, desc='{} E{:03d}'.format('train', epoch), ncols=0)\n loadertest = tqdm(testloader, desc='{} E{:03d}'.format('test', epoch), ncols=0)\n epoch_loss = 0.0\n correct=0.0\n total=0.0\n total2=0.0\n correct2=0.0\n for x_train, y_train in loadertrain:\n n.train()\n x_train, y_train = Variable(x_train.cuda()),Variable(y_train.cuda())\n x_noise=torch.FloatTensor(x_train.size(0),3,64,64).uniform_(-0.01,0.01)\n x_noise=torch.clamp(x_noise,-0.01,0.01)\n x_train_noise=x_train+Variable(x_noise.cuda())\n y_pre,c_pre = n(x_train_noise)\n y_pre=y_pre.cuda()\n\n n.zero_grad()\n optimizer.zero_grad()\n loss = loss1(torch.mul(y_pre,1.0), torch.mul( x_train,1.0))\n if loss.item()>3:\n loss.backward(retain_graph=True)\n torch.nn.utils.clip_grad_norm_(n.parameters(), 5.0)\n opt.step()\n epoch_loss += loss.data.item()\n _, predicted = torch.max(c_pre.data, 1)\n total += y_train.size(0)\n correct += predicted.eq(y_train.data).cuda().sum()\n torch.cuda.empty_cache()\n else:\n loss_cl=loss2(c_pre,y_train)\n\n loss_sum=torch.mul(loss,1/1)+loss_cl\n loss_sum.backward(retain_graph=True)\n torch.nn.utils.clip_grad_norm_(n.parameters(), 5.0)\n optimizer.step()\n epoch_loss += loss_sum.data.item()\n _, predicted = torch.max(c_pre.data, 1)\n total += y_train.size(0)\n correct += predicted.eq(y_train.data).cuda().sum()\n\n train_globa_step+=1\n torch.cuda.empty_cache()\n if loss.item()<3:\n\n y_pre2, c_pre2 = n(y_pre)\n y_pre2 = y_pre2.cuda()\n\n n.zero_grad()\n optimizer.zero_grad()\n lossreg2 = loss1(torch.mul(y_pre2, 1.0), torch.mul( x_train, 1.0))\n loss_cl2 = loss2(c_pre2, y_train)\n _, predicted2 = torch.max(c_pre2.data, 1)\n total2 += y_train.size(0)\n correct2 += predicted2.eq(y_train.data).cuda().sum()\n loss_sum2 = torch.mul(lossreg2, 1 / 1) + loss_cl2\n loss_sum2.backward()\n torch.nn.utils.clip_grad_norm_(n.parameters(), 5.0)\n optimizer.step()\n torch.cuda.empty_cache()\n if train_globa_step% 20==0:\n\n n.eval()\n checkpoint = {\n 'state_dict': n.state_dict(),\n 'opt_state_dict': optimizer.state_dict(),\n 'opt_state_dict2':opt.state_dict(),\n 'epoch': epoch\n }\n\n torch.save(checkpoint, 'model_params.pkl')\n fmt = '{:.4f}'.format\n loadertrain.set_postfix(loss=fmt(loss.data.item()),\n\n acc=fmt(correct.item() / total * 100))\n\n if (epoch) % 1 ==0:\n test_loss = 0.0\n correct = 0.0\n total = 0.0\n n.eval()\n with torch.no_grad():\n for x_test, y_test in loadertest:\n x_test, y_test = Variable(x_test.cuda()), Variable(y_test.cuda())\n \n y_pre, c_pre = n(x_test)\n\n y_pre = y_pre.cuda()\n\n loss_cl = loss2(c_pre, y_test)\n loss = loss1(torch.mul(y_pre,1.0), torch.mul( x_test,1.0))\n\n loss_sum = torch.mul(loss,1/1) + loss_cl\n test_loss += loss_sum.data.item()\n _, predicted = torch.max(c_pre.data, 1)\n total += y_test.size(0)\n correct += predicted.eq(y_test.data).cuda().sum()\n val_globa_step+=1\n fmt = '{:.4f}'.format\n loadertest.set_postfix(loss=fmt(loss_sum.data.item()),\n\n acc=fmt(correct.item() / total * 100))\n sch.step(test_loss)\n fl=es.step(correct.item()/total*100, n,optimizer,opt,epoch)\n if fl:\n torch.cuda.empty_cache()\n sys.exit(0) \n torch.cuda.empty_cache()", "import torch.nn as nn\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nfrom art.data_generators import *\nfrom art.utils import *\nfrom art.classifiers import *\nfrom art.attacks import *\nimport numpy as np\nimport resnet as resnet\nimport argparse\nimport models\nfrom tqdm import tqdm\nparser = argparse.ArgumentParser()\nimport warnings\nimport scipy.misc\nfrom torchvision.datasets import ImageFolder\nfrom PIL import Image\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\nwarnings.filterwarnings(\"ignore\")\nparser.add_argument('--d', type=str, default='inf', help='attack based on which distance met:inf,l1,l2')\nparser.add_argument('--m', type=str, default='pgd',\n help='attack based on which method:fgsm,pgd,cw,boundary,deepfool,jsma,bim')\nparser.add_argument('--e', type=float, default=8/255.0,\n help='max distance between adv example and the ori:inf--0.3,l2--1.5')\nparser.add_argument('--a', type=str, default='w', help='attack method including whitebox(w) and blackbox(b)')\nparser.add_argument('--at', type=str, default=None,\n help='model under attack with which method to train:None, fgsm ,pgd')\nparser.add_argument('--atw', type=str, default=None,\n help='only in blackbox attack, which method helping model used:None, fgsm, pgd')\nargs = parser.parse_args()\nprint(args)\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"4\"\n# Hyper-parameters\nparam = {\n 'test_batch_size': 24,\n}\nbatch_size=24\n\ntransform= transforms.Compose([\n transforms.RandomResizedCrop(size=256, scale=(0.8, 1.0)),\n transforms.RandomRotation(degrees=15),\n transforms.ColorJitter(),\n transforms.RandomHorizontalFlip(),\n transforms.CenterCrop(size=224), # Image net standards\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225]) # Imagenet standards\n ])\n\ntransform_test= transforms.Compose([\n transforms.Resize(size=256),\n transforms.CenterCrop(size=224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n\nclass MyDataset(Dataset):\n def __init__(self, filenames, labels, transform):\n self.filenames = filenames\n self.labels = labels\n self.transform = transform\n\n def __len__(self):\n return len(self.filenames)\n\n def __getitem__(self, idx):\n image = Image.open(self.filenames[idx]).convert('RGB')\n image = self.transform(image)\n return image, self.labels[idx]\n\n\ndef split_Train_Val_Data(data_dir, ratio):\n \"\"\" the sum of ratio must equal to 1\"\"\"\n dataset = ImageFolder(data_dir) # data_dir精确到分类目录的上一级\n character = [[] for i in range(len(dataset.classes))]\n # print(dataset.class_to_idx)\n for x, y in dataset.samples: # 将数据按类标存放\n character[y].append(x)\n # print(dataset.samples)\n\n train_inputs, val_inputs = [], []\n train_labels, val_labels = [], []\n for i, data in enumerate(character): # data为一类图片\n num_sample_train = int(len(data) * ratio[0])\n # print(num_sample_train)\n num_sample_val = int(len(data) * ratio[1])\n num_val_index = num_sample_train + num_sample_val\n for x in data[:num_sample_train]:\n train_inputs.append(str(x))\n train_labels.append(i)\n for x in data[num_sample_train:num_val_index]:\n val_inputs.append(str(x))\n val_labels.append(i)\n # print(len(train_inputs))\n train_dataloader = DataLoader(MyDataset(train_inputs, train_labels, transform),\n batch_size=batch_size, shuffle=True, num_workers=16)\n val_dataloader = DataLoader(MyDataset(val_inputs, val_labels, transform_test),\n batch_size=batch_size, shuffle=True, num_workers=16)\n\n return train_dataloader, val_dataloader\n\n\ndata_dir = '/101_ObjectCategories'\ntrainloader, loader_test = split_Train_Val_Data(data_dir, [0.9, 0.1])\nprint(len(loader_test))\n\nclass res_m(nn.Module):\n def __init__(self, model1):\n super(res_m, self).__init__()\n self.m1 = model1\n\n def forward(self, input):\n _,y = self.m1(input)\n return y\n\n\nclass res_gen(nn.Module):\n def __init__(self, model1):\n super(res_gen, self).__init__()\n self.m1 = model1\n\n def forward(self, input):\n _,y = self.m1(input)\n return _\n\n\n\npydataloader=PyTorchDataGenerator(loader_test,866,param['test_batch_size'])\n\npylist=[]\nfor i in range(866//param['test_batch_size']):\n (x,y)=pydataloader.get_batch()\n pylist.append((x,y))\n# Setup model to be attacked\nif args.a=='w':\n net = resnet.resnet101().cuda()\n if args.at is None:\n checkpoint = torch.load('bestmodel_params.pkl')\n net.load_state_dict(checkpoint['state_dict'])\n else:\n checkpoint = torch.load('bestmodel_params_adv_train_%s.pkl'%args.at)\n net.load_state_dict(checkpoint['state_dict'])\n \n net.eval()\n res = res_m(net).eval()\n gen=res_gen(net).eval()\n loss = nn.CrossEntropyLoss().cuda()\n optimizer = torch.optim.Adam(res.parameters())\n fmodel = PyTorchClassifier(\n res,loss=loss,optimizer=optimizer,input_shape=(3,224,224),nb_classes=102,clip_values=(0.0, 1.0))\n \n evalmodel = PyTorchClassifier(\n res,loss=loss,optimizer=optimizer,input_shape=(3,224,224),nb_classes=102,clip_values=(0.0, 1.0))\n genmodel=PyTorchClassifier(\n gen,loss=loss,optimizer=optimizer,input_shape=(3,224,224),nb_classes=102,clip_values=(0.0, 1.0))\n\nelif args.a=='b':\n netblack = resnet.resnet101().cuda()\n net=models.resnext50().cuda()\n if args.atw is None:\n checkpoint = torch.load('bestmodel_params_resnet.pkl')\n net.load_state_dict(checkpoint['state_dict'])\n else:\n checkpoint = torch.load('bestmodel_params_resnet_adv_train_%s.pkl'%args.atw)\n net.load_state_dict(checkpoint['state_dict'])\n \n net.eval()\n \n if args.at is None:\n checkpoint = torch.load('bestmodel_params.pkl')\n netblack.load_state_dict(checkpoint['state_dict'])\n else:\n checkpoint = torch.load('bestmodel_params_adv_train_%s.pkl'%args.at)\n netblack.load_state_dict(checkpoint['state_dict'])\n res_black = res_m(netblack).eval()\n loss1 = nn.CrossEntropyLoss().cuda()\n optimizer1 = torch.optim.Adam(net.parameters())\n loss2 = nn.CrossEntropyLoss().cuda()\n optimizer2 = torch.optim.Adam(res_black.parameters())\n fmodel = PyTorchClassifier(\n net, loss=loss1,optimizer=optimizer1,input_shape=(3,224,224),nb_classes=102,clip_values=(0.0, 1.0))\n \n evalmodel = PyTorchClassifier(\n res_black, loss=loss2,optimizer=optimizer2,input_shape=(3,224,224),nb_classes=102,clip_values=(0.0, 1.0))\nelse:\n print('wrong attack type')\n exit(0)\n \nori_acc=0\nadv_acc=0\nloadertrain = tqdm(pylist, desc='{}'.format('attack'), ncols=0)\ncounter=0\nfor x_train, y_train in loadertrain:\n\n x_train.shape=(param['test_batch_size'],3,224,224)\n\n preds = np.argmax(fmodel.predict(x_train,batch_size=param['test_batch_size']), axis=1)\n preds.shape = (param['test_batch_size'])\n y=y_train.copy()\n y.shape = (param['test_batch_size'])\n y_train=to_categorical(y_train,102)\n acc_o = np.sum(preds == y)\n ori_acc+=acc_o\n \n# Craft adversarial samples with FGSM\n epsilon = args.e # Maximum perturbation\n if args.m=='fgsm':\n if args.d=='inf':\n adv_crafter = FastGradientMethod(fmodel,norm=np.inf,eps=epsilon,batch_size=param['test_batch_size'])\n elif args.d=='l2':\n adv_crafter = FastGradientMethod(fmodel,norm=2,eps=epsilon,batch_size=param['test_batch_size'])\n elif args.d=='l1':\n adv_crafter = FastGradientMethod(fmodel,norm=1,eps=epsilon,batch_size=param['test_batch_size'])\n else:\n print('wrong distance')\n exit(0)\n x_test_adv = adv_crafter.generate(x_train,y_train)\n elif args.m=='pgd':\n if args.d=='inf':\n adv_crafter = ProjectedGradientDescent(fmodel,norm=np.inf,eps=epsilon,eps_step=2/255.0, max_iter=40,batch_size=param['test_batch_size'])\n elif args.d=='l2':\n adv_crafter = ProjectedGradientDescent(fmodel,norm=2,eps=epsilon,batch_size=param['test_batch_size'])\n elif args.d=='l1':\n adv_crafter = ProjectedGradientDescent(fmodel,norm=1,eps=epsilon,batch_size=param['test_batch_size'])\n else:\n print('wrong distance')\n exit(0)\n x_test_adv = adv_crafter.generate(x_train,y_train)\n elif args.m=='boundary':\n if args.d=='inf':\n adv_crafter = HopSkipJump(fmodel,targeted=False,norm=np.inf,max_eval=100)\n elif args.d=='l2':\n adv_crafter = HopSkipJump(fmodel, targeted=False,norm=2,max_eval=100)\n else:\n print('wrong distance')\n exit(0)\n x_test_adv = adv_crafter.generate(x_train)\n elif args.m=='cw':\n if args.d=='l2':\n adv_crafter = CarliniL2Method(fmodel,batch_size=param['test_batch_size'])\n elif args.d=='inf':\n adv_crafter = CarliniLInfMethod(fmodel,eps=epsilon,batch_size=param['test_batch_size'])\n else:\n print('wrong distance')\n exit(0)\n x_test_adv = adv_crafter.generate(x_train,y_train)\n elif args.m=='deepfool':\n adv_crafter = DeepFool(fmodel,batch_size=param['test_batch_size'])\n x_test_adv = adv_crafter.generate(x_train,y_train)\n\t\n elif args.m=='jsma':\n adv_crafter = SaliencyMapMethod(fmodel, batch_size=param['test_batch_size'])\n x_test_adv = adv_crafter.generate(x_train, y_train)\n elif args.m=='bim':\n adv_crafter = BasicIterativeMethod(fmodel, batch_size=param['test_batch_size'])\n x_test_adv = adv_crafter.generate(x_train, y_train)\n elif args.m == 'zoo' and args.a == 'w':\n adv_crafter = ZooAttack(fmodel,nb_parallel=1024, batch_size=param['test_batch_size'])\n x_test_adv = adv_crafter.generate(x_train, y_train)\n elif args.m == 'zoo' and args.a == 'b':\n print('zoo used in --a w condition')\n exit(0)\n else:\n print('wrong method')\n exit(0)\n if x_test_adv is not None:\n\n preds = np.argmax(evalmodel.predict(x_test_adv), axis=1)\n preds.shape = (param['test_batch_size'])\n acc_a = np.sum(preds == y)\n adv_acc += acc_a\n else:\n preds = np.argmax(evalmodel.predict(x_train), axis=1)\n preds.shape = (param['test_batch_size'])\n acc_a = np.sum(preds == y)\n adv_acc += acc_a\n loadertrain.set_postfix(oriacc=ori_acc,\n\n advacc=adv_acc)\n\nprint(\"\\nTest accuracy: %.2f%%\" % (ori_acc/866 * 100))\nprint(\"\\nTest accuracy on adversarial sample: %.2f%%\" % (adv_acc/866 * 100))\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.max", "torch.load", "torch.utils.data.DataLoader", "torch.cuda.empty_cache", "torch.mul", "torch.no_grad", "torch.clamp", "torch.nn.MSELoss", "torch.save" ], [ "torch.nn.CrossEntropyLoss", "numpy.sum" ] ]
hirowgit/2B3_python_owl_logic_database_course
[ "81096b287c32a067aa11a9a37ae5a4c6a0d1301e" ]
[ "lec1_step3.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n## Python basics for novice data scientists, supported by Wagatsuma Lab@Kyutech \n#\n# The MIT License (MIT): Copyright (c) 2020 Hiroaki Wagatsuma and Wagatsuma Lab@Kyutech\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */\n#\n# # @Time : 2020-4-20 \n# # @Author : Hiroaki Wagatsuma\n# # @Site : https://github.com/hirowgit/2A_python_basic_course\n# # @IDE : Python 3.7.7 (default, Mar 10 2020, 15:43:27) [Clang 10.0.0 (clang-1000.11.45.5)] on darwin\n# # @File : lec1_step3.py \n\n\n# In[2]:\n\n\n# running without modules on mathematics\npi\n\n\n# In[3]:\n\n\n# module test: if you have an error when you run this code, you need to check the installation status of those modules\n\nimport math \nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n# In[4]:\n\n\nimport math \npi=math.pi\nprint(pi)\n\n\n# In[5]:\n\n\nx = np.arange(-3.14, 3.14, 0.1)\ny = np.sin(x)\nplt.plot(x, y)\n\n\n# In[6]:\n\n\ns = pd.Series([2, 4, 6, np.nan, 7, 9])\nprint(s)\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.plot", "numpy.arange", "pandas.Series", "numpy.sin" ] ]
nagasudhirpulla/wrldc_scada_mumbai_dashboard
[ "bc107ef47568781b588316f0c5c0c0d2a08adac8" ]
[ "src/services/scadaApiFetcher.py" ]
[ "import requests\nimport json\nimport datetime as dt\nfrom typing import Dict, Union, List, Optional\nfrom src.typeDefs.scadaApiDataSample import IScadaApiDataSample\nimport pandas as pd\nimport random\n\n\nclass ScadaApiFetcher():\n apiHost: str = ''\n apiPort: int = 80\n isDummyFetch: bool = False\n\n def __init__(self, apiHost: str, apiPort: int, isDummyFetch=False):\n self.apiHost = apiHost\n self.apiPort = apiPort\n self.isDummyFetch = isDummyFetch\n\n def fetchPntHistData(self, pnt: str, startTime: dt.datetime, endTime: dt.datetime, fetchStrategy: str = 'snap', sampleFreq: int = 300) -> List[IScadaApiDataSample]:\n if self.isDummyFetch:\n if (startTime > endTime) or (sampleFreq == 0):\n return []\n currTime = startTime\n dataRes: List[IScadaApiDataSample] = []\n while currTime <= endTime:\n dataRes.append(\n {\"timestamp\": currTime, \"dval\": random.randint(1, 100), \"status\": \"GOOD\"})\n currTime = currTime + dt.timedelta(seconds=sampleFreq)\n return dataRes\n startTimeStr = startTime.strftime('%d/%m/%Y/%H:%M:%S')\n endTimeStr = endTime.strftime('%d/%m/%Y/%H:%M:%S')\n # print(req_date_str)\n params: Dict[str, Union[int, str]] = dict(\n pnt=pnt,\n strtime=startTimeStr,\n endtime=endTimeStr,\n secs=sampleFreq,\n type=fetchStrategy\n )\n try:\n # http://host:80/api/values/history?pnt=pntId&strtime=12/12/2019/00:00:00&endtime=13/12/2019/00:00:00&secs=900&type=average\n r = requests.get(\n url=\"http://{0}:{1}/api/values/history\".format(self.apiHost, self.apiPort), params=params)\n resTxt = r.text\n if pd.isna(resTxt) or (resTxt == '') or (resTxt == '[]') or (resTxt == 'null'):\n return []\n data = json.loads(resTxt)\n return data\n except:\n return []\n\n def fetchPntRtData(self, pnt) -> Optional[float]:\n if self.isDummyFetch:\n return random.randrange(1, 100)\n params = dict(\n pnt=pnt,\n )\n try:\n # http://host:80/api/values/real?pnt=pntId&strtime=12/12/2019/00:00:00&endtime=13/12/2019/00:00:00&secs=900&type=average\n r = requests.get(\n url=\"http://{0}:{1}/api/values/real\".format(self.apiHost, self.apiPort), params=params)\n resTxt = r.text\n if pd.isna(resTxt) or (resTxt == 'null') or (resTxt == ''):\n return None\n resData: IScadaApiDataSample = json.loads(resTxt)\n return float(resData['dval'])\n except:\n return None\n" ]
[ [ "pandas.isna" ] ]
ksluck/Coadaptation
[ "aa16f277cd31c324a62c832ef2cef94e28d598b8" ]
[ "RL/soft_actor.py" ]
[ "from rlkit.torch.sac.policies import TanhGaussianPolicy\n# from rlkit.torch.sac.sac import SoftActorCritic\nfrom rlkit.torch.networks import FlattenMlp\nimport numpy as np\nfrom .rl_algorithm import RL_algorithm\nfrom rlkit.torch.sac.sac import SACTrainer as SoftActorCritic_rlkit\nimport rlkit.torch.pytorch_util as ptu\nimport torch\nimport utils\n\n# networks = {individual:, population:}\nclass SoftActorCritic(RL_algorithm):\n\n def __init__(self, config, env, replay, networks):\n \"\"\" Bascally a wrapper class for SAC from rlkit.\n\n Args:\n config: Configuration dictonary\n env: Environment\n replay: Replay buffer\n networks: dict containing two sub-dicts, 'individual' and 'population'\n which contain the networks.\n\n \"\"\"\n super().__init__(config, env, replay, networks)\n\n self._variant_pop = config['rl_algorithm_config']['algo_params_pop']\n self._variant_spec = config['rl_algorithm_config']['algo_params']\n\n self._ind_qf1 = networks['individual']['qf1']\n self._ind_qf2 = networks['individual']['qf2']\n self._ind_qf1_target = networks['individual']['qf1_target']\n self._ind_qf2_target = networks['individual']['qf2_target']\n self._ind_policy = networks['individual']['policy']\n\n self._pop_qf1 = networks['population']['qf1']\n self._pop_qf2 = networks['population']['qf2']\n self._pop_qf1_target = networks['population']['qf1_target']\n self._pop_qf2_target = networks['population']['qf2_target']\n self._pop_policy = networks['population']['policy']\n\n self._batch_size = config['rl_algorithm_config']['batch_size']\n self._nmbr_indiv_updates = config['rl_algorithm_config']['indiv_updates']\n self._nmbr_pop_updates = config['rl_algorithm_config']['pop_updates']\n\n self._algorithm_ind = SoftActorCritic_rlkit(\n env=self._env,\n policy=self._ind_policy,\n qf1=self._ind_qf1,\n qf2=self._ind_qf2,\n target_qf1=self._ind_qf1_target,\n target_qf2=self._ind_qf2_target,\n use_automatic_entropy_tuning = False,\n **self._variant_spec\n )\n\n self._algorithm_pop = SoftActorCritic_rlkit(\n env=self._env,\n policy=self._pop_policy,\n qf1=self._pop_qf1,\n qf2=self._pop_qf2,\n target_qf1=self._pop_qf1_target,\n target_qf2=self._pop_qf2_target,\n use_automatic_entropy_tuning = False,\n **self._variant_pop\n )\n\n # self._algorithm_ind.to(ptu.device)\n # self._algorithm_pop.to(ptu.device)\n\n def episode_init(self):\n \"\"\" Initializations to be done before the first episode.\n\n In this case basically creates a fresh instance of SAC for the\n individual networks and copies the values of the target network.\n \"\"\"\n self._algorithm_ind = SoftActorCritic_rlkit(\n env=self._env,\n policy=self._ind_policy,\n qf1=self._ind_qf1,\n qf2=self._ind_qf2,\n target_qf1=self._ind_qf1_target,\n target_qf2=self._ind_qf2_target,\n use_automatic_entropy_tuning = False,\n # alt_alpha = self._alt_alpha,\n **self._variant_spec\n )\n if self._config['rl_algorithm_config']['copy_from_gobal']:\n utils.copy_pop_to_ind(networks_pop=self._networks['population'], networks_ind=self._networks['individual'])\n # We have only to do this becasue the version of rlkit which we use\n # creates internally a target network\n # vf_dict = self._algorithm_pop.target_vf.state_dict()\n # self._algorithm_ind.target_vf.load_state_dict(vf_dict)\n # self._algorithm_ind.target_vf.eval()\n # self._algorithm_ind.to(ptu.device)\n\n def single_train_step(self, train_ind=True, train_pop=False):\n \"\"\" A single trianing step.\n\n Args:\n train_ind: Boolean. If true the individual networks will be trained.\n train_pop: Boolean. If true the population networks will be trained.\n \"\"\"\n if train_ind:\n # Get only samples from the species buffer\n self._replay.set_mode('species')\n # self._algorithm_ind.num_updates_per_train_call = self._variant_spec['num_updates_per_epoch']\n # self._algorithm_ind._try_to_train()\n for _ in range(self._nmbr_indiv_updates):\n batch = self._replay.random_batch(self._batch_size)\n self._algorithm_ind.train(batch)\n\n if train_pop:\n # Get only samples from the population buffer\n self._replay.set_mode('population')\n # self._algorithm_pop.num_updates_per_train_call = self._variant_pop['num_updates_per_epoch']\n # self._algorithm_pop._try_to_train()\n for _ in range(self._nmbr_pop_updates):\n batch = self._replay.random_batch(self._batch_size)\n self._algorithm_pop.train(batch)\n\n @staticmethod\n def create_networks(env, config):\n \"\"\" Creates all networks necessary for SAC.\n\n These networks have to be created before instantiating this class and\n used in the constructor.\n\n Args:\n config: A configuration dictonary containing population and\n individual networks\n\n Returns:\n A dictonary which contains the networks.\n \"\"\"\n network_dict = {\n 'individual' : SoftActorCritic._create_networks(env=env, config=config),\n 'population' : SoftActorCritic._create_networks(env=env, config=config),\n }\n return network_dict\n\n @staticmethod\n def _create_networks(env, config):\n \"\"\" Creates all networks necessary for SAC.\n\n These networks have to be created before instantiating this class and\n used in the constructor.\n\n TODO: Maybe this should be reworked one day...\n\n Args:\n config: A configuration dictonary.\n\n Returns:\n A dictonary which contains the networks.\n \"\"\"\n obs_dim = int(np.prod(env.observation_space.shape))\n action_dim = int(np.prod(env.action_space.shape))\n net_size = config['rl_algorithm_config']['net_size']\n hidden_sizes = [net_size] * config['rl_algorithm_config']['network_depth']\n # hidden_sizes = [net_size, net_size, net_size]\n qf1 = FlattenMlp(\n hidden_sizes=hidden_sizes,\n input_size=obs_dim + action_dim,\n output_size=1,\n ).to(device=ptu.device)\n qf2 = FlattenMlp(\n hidden_sizes=hidden_sizes,\n input_size=obs_dim + action_dim,\n output_size=1,\n ).to(device=ptu.device)\n qf1_target = FlattenMlp(\n hidden_sizes=hidden_sizes,\n input_size=obs_dim + action_dim,\n output_size=1,\n ).to(device=ptu.device)\n qf2_target = FlattenMlp(\n hidden_sizes=hidden_sizes,\n input_size=obs_dim + action_dim,\n output_size=1,\n ).to(device=ptu.device)\n policy = TanhGaussianPolicy(\n hidden_sizes=hidden_sizes,\n obs_dim=obs_dim,\n action_dim=action_dim,\n ).to(device=ptu.device)\n\n clip_value = 1.0\n for p in qf1.parameters():\n p.register_hook(lambda grad: torch.clamp(grad, -clip_value, clip_value))\n for p in qf2.parameters():\n p.register_hook(lambda grad: torch.clamp(grad, -clip_value, clip_value))\n for p in policy.parameters():\n p.register_hook(lambda grad: torch.clamp(grad, -clip_value, clip_value))\n\n return {'qf1' : qf1, 'qf2' : qf2, 'qf1_target' : qf1_target, 'qf2_target' : qf2_target, 'policy' : policy}\n\n @staticmethod\n def get_q_network(networks):\n \"\"\" Returns the q network from a dict of networks.\n\n This method extracts the q-network from the dictonary of networks\n created by the function create_networks.\n\n Args:\n networks: Dict containing the networks.\n\n Returns:\n The q-network as torch object.\n \"\"\"\n return networks['qf1']\n\n @staticmethod\n def get_policy_network(networks):\n \"\"\" Returns the policy network from a dict of networks.\n\n This method extracts the policy network from the dictonary of networks\n created by the function create_networks.\n\n Args:\n networks: Dict containing the networks.\n\n Returns:\n The policy network as torch object.\n \"\"\"\n return networks['policy']\n" ]
[ [ "torch.clamp", "numpy.prod" ] ]
MatKie/SGTPy
[ "8e98d92fedd2b07d834e547e5154ec8f70d80728", "8e98d92fedd2b07d834e547e5154ec8f70d80728", "8e98d92fedd2b07d834e547e5154ec8f70d80728" ]
[ "sgtpy/vrmie_mixtures/density_solver.py", "sgtpy/vrmie_pure/a2m_monomer.py", "sgtpy/vrmie_pure/ideal.py" ]
[ "from __future__ import division, print_function, absolute_import\nimport numpy as np\nfrom scipy.optimize import minimize_scalar, brentq\nfrom ..constants import Na\n\n\ndef dPsaft_fun(rho, x, temp_aux, saft):\n rhomolecular = Na * rho\n global Xass\n da, Xass = saft.d2afcn_drho_aux(x, rhomolecular, temp_aux, Xass)\n afcn, dafcn, d2afcn = da\n dPsaft = 2 * rhomolecular * dafcn + rhomolecular**2 * d2afcn\n return dPsaft\n\n\ndef Psaft_obj(rho, x, temp_aux, saft, Pspec):\n rhomolecular = Na * rho\n global Xass\n da, Xass = saft.dafcn_drho_aux(x, rhomolecular, temp_aux, Xass)\n afcn, dafcn = da\n Psaft = rhomolecular**2 * dafcn / Na\n return Psaft - Pspec\n\n\ndef density_newton_lim(rho_a, rho_b, x, temp_aux, P, Xass0, saft):\n rho = (rho_a + rho_b) / 2\n Psaft, dPsaft, Xass = saft.dP_drho_aux(x, rho, temp_aux, Xass0)\n for i in range(15):\n rho_old = rho\n FO = Psaft - P\n dFO = dPsaft\n drho = FO/dFO\n rho_new = rho - drho\n\n if FO > 0:\n rho_b = rho\n else:\n rho_a = rho\n\n if rho_a < rho_new < rho_b:\n rho = rho_new\n else:\n rho = (rho_a + rho_b) / 2\n\n if np.abs(rho - rho_old) < 1e-6:\n break\n Psaft, dPsaft, Xass = saft.dP_drho_aux(x, rho, temp_aux, Xass)\n return rho, Xass\n\n\ndef density_topliss(state, x, temp_aux, P, Xass0, saft):\n\n if state != 'L' and state != 'V':\n raise Warning(\"Not valid state. 'L' for liquid and 'V' for vapor.\")\n\n beta = temp_aux[0]\n # lower boundary a zero density\n rho_lb = 1e-5\n dP_lb = Na / beta\n\n # Upper boundary limit at infinity pressure\n etamax = 0.7405\n rho_lim = (6 * etamax) / np.dot(x, (saft.ms * np.pi * saft.sigma**3)) / Na\n ub_sucess = False\n rho_ub = 0.4 * rho_lim\n it = 0\n P_ub, dP_ub, Xass_ub = saft.dP_drho_aux(x, rho_ub, temp_aux, Xass0)\n while not ub_sucess and it < 5:\n it += 1\n P_ub, dP_ub, Xass_ub = saft.dP_drho_aux(x, rho_ub, temp_aux, Xass_ub)\n rho_ub += 0.15 * rho_lim\n ub_sucess = P_ub > P and dP_ub > 0\n\n # Derivative calculation at zero density\n rho_lb1 = 1e-4 * rho_lim\n P_lb1, dP_lb1, Xass_lb = saft.dP_drho_aux(x, rho_lb1, temp_aux, Xass0)\n d2P_lb1 = (dP_lb1 - dP_lb) / rho_lb1\n if d2P_lb1 > 0:\n flag = 3\n else:\n flag = 1\n\n global Xass\n Xass = Xass0\n\n # Stage 1\n bracket = [rho_lb, rho_ub]\n if flag == 1:\n # Found inflexion point\n sol_inf = minimize_scalar(dPsaft_fun, args=(x, temp_aux, saft),\n bounds=bracket, method='Bounded',\n options={'xatol': 1e-1})\n rho_inf = sol_inf.x\n dP_inf = sol_inf.fun\n if dP_inf > 0:\n flag = 3\n else:\n flag = 2\n\n # Stage 2\n if flag == 2:\n if state == 'L':\n bracket[0] = rho_inf\n elif state == 'V':\n bracket[1] = rho_inf\n rho_ext = brentq(dPsaft_fun, bracket[0], bracket[1],\n args=(x, temp_aux, saft), xtol=1e-2)\n P_ext, dP_ext, Xass = saft.dP_drho_aux(x, rho_ext, temp_aux, Xass)\n if P_ext > P and state == 'V':\n bracket[1] = rho_ext\n elif P_ext < P and state == 'L':\n bracket[0] = rho_ext\n else:\n flag = -1\n\n if flag == -1:\n rho = np.nan\n else:\n rho, Xass = density_newton_lim(bracket[0], bracket[1], x, temp_aux,\n P, Xass, saft)\n # rho = brentq(Psaft_obj, bracket[0], bracket[1],\n # args=(x, temp_aux, saft, P))\n\n return rho, Xass\n\n\ndef density_newton(rho0, x, temp_aux, P, Xass0, saft):\n\n rho = 1.*rho0\n Psaft, dPsaft, Xass = saft.dP_drho_aux(x, rho, temp_aux, Xass0)\n for i in range(15):\n FO = Psaft - P\n dFO = dPsaft\n drho = FO/dFO\n rho -= drho\n if np.abs(drho) < 1e-6:\n break\n Psaft, dPsaft, Xass = saft.dP_drho_aux(x, rho, temp_aux, Xass)\n return rho, Xass\n", "from __future__ import division, print_function, absolute_import\nimport numpy as np\n\n\n# Second pertubation Eq 36\ndef a2m(suma_a2, khs, xi, cte_a2m):\n a2 = khs*(1+xi)*suma_a2*cte_a2m\n return a2\n\n\ndef da2m_deta(suma_a2, dKhs, dXi, cte_a2m):\n\n khs, dkhs = dKhs\n xi, dx1 = dXi\n x1 = 1. + xi\n\n #sum1, dsum1 = np.matmul(da1sb, x0lambda)\n sum1, dsum1 = suma_a2\n # a2 = khs*x1*eps*c2*sum1/2.\n a2 = khs*x1*sum1*cte_a2m\n\n da2 = sum1*x1*dkhs + khs * x1 * dsum1 + khs * sum1 * dx1\n da2 *= cte_a2m\n return np.hstack([a2, da2])\n\n\ndef d2a2m_deta(suma_a2, d2Khs, d2Xi, cte_a2m):\n\n khs, dkhs, d2khs = d2Khs\n xi, dx1, d2x1 = d2Xi\n x1 = 1. + xi\n\n sum1, dsum1, d2sum1 = suma_a2\n\n a2 = khs*x1*sum1*cte_a2m\n\n da2 = sum1*x1*dkhs + khs * x1 * dsum1 + khs * sum1 * dx1\n da2 *= cte_a2m\n\n d2a2 = d2khs * sum1 * x1 + d2x1 * sum1 * khs + d2sum1 * khs * x1\n d2a2 += 2 * dkhs * dsum1 * x1\n d2a2 += 2 * sum1 * dkhs * dx1\n d2a2 += 2 * khs * dsum1 * dx1\n d2a2 *= cte_a2m\n return np.hstack([a2, da2, d2a2])\n\n\ndef da2m_new_deta(suma_a2, dKhs, cte_a2m):\n\n khs, dkhs = dKhs\n # sum1, dsum1 = np.matmul(da1sb, x0lambda)\n sum1, dsum1 = suma_a2\n da2 = cte_a2m*(dkhs*sum1 + khs * dsum1)\n\n return da2\n\n\ndef d2a2m_new_deta(suma_a2, d2Khs, cte_a2m):\n\n khs, dkhs, d2khs = d2Khs\n # sum1, dsum1, d2sum1 = np.matmul(d2a1sb, x0lambda)\n sum1, dsum1, d2sum1 = suma_a2\n # aux = cte_a2m\n da2 = cte_a2m*(dkhs*sum1 + khs * dsum1)\n\n d2a2 = 2 * dkhs * dsum1 + sum1 * d2khs + d2sum1 * khs\n d2a2 *= cte_a2m\n\n return np.hstack([da2, d2a2])\n\n\ndef d3a2m_new_deta(suma_a2, d3Khs, cte_a2m):\n\n khs, dkhs, d2khs, d3khs = d3Khs\n # sum1, dsum1, d2sum1, d3sum1 = np.matmul(d3a1sb, x0lambda)\n sum1, dsum1, d2sum1, d3sum1 = suma_a2\n # aux = cte_a2m\n da2 = cte_a2m*(dkhs*sum1 + khs * dsum1)\n\n d2a2 = 2 * dkhs * dsum1 + sum1 * d2khs + d2sum1 * khs\n d2a2 *= cte_a2m\n\n d3a2 = 3 * dsum1 * d2khs + 3 * dkhs * d2sum1\n d3a2 += khs * d3sum1 + d3khs * sum1\n d3a2 *= cte_a2m\n return np.hstack([da2, d2a2, d3a2])\n", "from __future__ import division, print_function, absolute_import\nimport numpy as np\n\n\nh = 6.626070150e-34 # J s\nme = 9.10938291e-31 # 1/Kg\n\n\n# Equation 68\ndef aideal(rho, beta):\n broglie_vol = h / np.sqrt(2*np.pi * me / beta)\n a = np.log(rho * broglie_vol**3) - 1\n return a\n\n\ndef daideal_drho(rho, beta):\n broglie_vol = h / np.sqrt(2*np.pi * me / beta)\n a = np.log(rho * broglie_vol**3) - 1\n da = 1./rho\n return np.hstack([a, da])\n\n\ndef d2aideal_drho(rho, beta):\n broglie_vol = h / np.sqrt(2*np.pi * me / beta)\n a = np.log(rho * broglie_vol**3) - 1\n da = 1./rho\n d2a = -1/rho**2\n return np.hstack([a, da, d2a])\n" ]
[ [ "numpy.dot", "scipy.optimize.brentq", "scipy.optimize.minimize_scalar", "numpy.abs" ], [ "numpy.hstack" ], [ "numpy.hstack", "numpy.log", "numpy.sqrt" ] ]
ml-jku/align-rudder
[ "26cf4b62a713e180063cefc2921981484ebb9165", "26cf4b62a713e180063cefc2921981484ebb9165", "26cf4b62a713e180063cefc2921981484ebb9165" ]
[ "align_rudder/run_eight_alignrudder.py", "align_rudder/run_four_bc.py", "align_rudder/run_eight_dqfd.py" ]
[ "import ray\nfrom ray import tune\nimport gym\nfrom align_rudder.learning.q_learning import Qlearning\nimport numpy as np\nimport random\nimport os\nimport pkg_resources\nimport shutil\n\nconfig = {\n 'env_id': 'align_rudder:EightRooms-v0', # environment for the experiment\n 'exp_name': 'align-rudder', # name of the experiment\n 'gamma': 1.0, # Discount factor for q learning algorithm\n 'total_timesteps': 10000000,\n 'max_episodes': 100000,\n 'learning_rate': 0.1,\n 'epsilon': 0.2, # exploration constant\n 'num_seq_store': 10, # max sequences to use for alignment or storing\n 'num_clusters': 15, # Number of clusters to use in k-means\n 'consensus_thresh': 0.9, # Threshold for consensus\n 'eval': 40,\n 'top_n': 12,\n 'rudder': True, # Use rudder or not\n 'mode': 'log', # 'log' or 'exp'\n 'stop_criteria': '80opt',\n 'enough_seq': 3, # How many sequences are enough for sequence alignment\n 'num_demo_use': tune.grid_search([2, 5, 10, 50, 100]), # number of demonstrations\n 'consensus_type': 'all', # Select between most common or threshold all sequences: all, most_common\n 'cluster_type': 'AP', # Use default clustering, SpectralClustering, AffinityPropogation: default, SC, AP\n 'seed': tune.grid_search([i for i in range(10)]), # Seed for experiment\n 'anneal_eps': 1.0, # annealing rate for exploration\n 'eps_lb': 0.0, # eps anneal lower bound\n 'rr_thresh': 0.005, # Inverse visitation freq below thresh, set rr to zero\n 'log_every': 10, # log every timesteps\n 'normalise_rr_by_max': True, # normalize rr by maximum reward in rr\n 'normalisation_scale': 10, # scale factor compared to original reward\n 'use_succ': True,\n 'use_demo': True,\n 'demo_path': 'demonstrations/eight_rooms.npy',\n 'update_alignment:': False,\n 'max_reward': 1,\n 'use_exp_replay': False,\n 'memory_len': 30000,\n 'init_mean': False,\n 'use_new_form': True\n}\n\n\ndef run(config):\n run_path = os.getcwd()\n env_id = config['env_id']\n env = gym.make(env_id)\n # set seed\n np.random.seed(config['seed'])\n random.seed(config['seed'])\n demo_path = pkg_resources.resource_filename(\"align_rudder\", config[\"demo_path\"])\n\n rl = Qlearning(env=env, eps=config['epsilon'], alpha=config['learning_rate'],\n total_timesteps=config['total_timesteps'],\n num_store_seq=config['num_seq_store'], rudder=config['rudder'], enough_seq=config['enough_seq'],\n num_clusters=config['num_clusters'], top_n=config['top_n'],\n consensus_type=config['consensus_type'],\n consensus_thresh=config['consensus_thresh'], cluster_type=config['cluster_type'],\n run_path=run_path,\n anneal_eps=config['anneal_eps'], eps_lb=config['eps_lb'], rr_thresh=config['rr_thresh'],\n log_every=config['log_every'], normalise_rr_by_max=config['normalise_rr_by_max'],\n normalisation_scale=config['normalisation_scale'], use_succ=config['use_succ'],\n use_demo=config['use_demo'],\n demo_path=demo_path,\n num_demo_use=config['num_demo_use'],\n max_episodes=config['max_episodes'], max_reward=config['max_reward'],\n mode=config['mode'],\n gamma=config['gamma'], stop_criteria=config['stop_criteria'], seed=config['seed'],\n init_mean=config['init_mean'], use_new_form=config['use_new_form'])\n\n rl.learn()\n\n\nif __name__ == \"__main__\":\n # clear output dir\n if os.path.exists(os.path.join(\"results\", \"eight_rooms_alignrudder\")):\n shutil.rmtree(os.path.join(\"results\", \"eight_rooms_alignrudder\"))\n\n ray.init(temp_dir='/tmp/ray-eight-align', log_to_driver=False)\n print(\"Starting Runs...\")\n # run(config)\n tune.run(run, config=config, local_dir=\"results/\", name=\"eight_rooms_alignrudder\")\n print(\"Finished!\")\n", "import os\nimport pkg_resources\nimport numpy as np\nimport random\nfrom ray import tune\nimport ray\nimport gym\nfrom align_rudder.learning.q_learning import Qlearning\nimport shutil\n\nconfig = {\n 'env_id': 'align_rudder:FourRooms-v0', # environment for the experiment\n 'exp_name': 'align-rudder-bc', # name of the experiment\n 'gamma': 1.0, # Discount factor for q learning algorithm\n 'total_timesteps': 10000000,\n 'max_episodes': 100000,\n 'learning_rate': 0.01,\n 'epsilon': 0.2, # exploration constant\n 'num_seq_store': 10, # max sequences to use for alignment or storing\n 'num_clusters': 10, # Number of clusters to use in k-means\n 'consensus_thresh': 0.9, # Threshold for consensus\n 'eval': 40,\n 'top_n': 12,\n 'rudder': False, # Use rudder or not\n 'mode': 'log',\n 'stop_criteria': '80opt',\n 'enough_seq': 3, # How many sequences are enough for sequence alignment\n 'num_demo_use': tune.grid_search([2, 5, 10, 50, 100]), # number of demonstrations\n 'consensus_type': 'all', # Select between most common or threshold all sequences: all, most_common\n 'cluster_type': 'AP', # Use default clustering, SpectralClustering, AffinityPropogation: default, SC, AP\n 'seed': tune.grid_search([i for i in range(10)]), # Seed for experiment\n 'anneal_eps': 1.0, # annealing rate for exploration\n 'eps_lb': 0.0, # eps anneal lower bound\n 'rr_thresh': 0.005, # Inverse visitation freq below thresh, set rr to zero\n 'log_every': 10, # log every timesteps\n 'normalise_rr_by_max': True, # normalize rr by maximum reward in rr\n 'normalisation_scale': 10, # scale factor compared to original reward\n 'use_succ': False,\n 'use_demo': True,\n 'demo_path': \"demonstrations/four_rooms.npy\",\n 'update_cluster_every': 500,\n 'update_alignment:': False,\n 'max_reward': 1,\n 'use_exp_replay': False,\n 'memory_len': 30000,\n 'init_mean': False\n}\n\n\ndef run(config):\n run_path = os.getcwd()\n env_id = config['env_id']\n env = gym.make(env_id)\n demo_path = pkg_resources.resource_filename(\"align_rudder\", config[\"demo_path\"])\n # set seed\n np.random.seed(config['seed'])\n random.seed(config['seed'])\n\n rl = Qlearning(env=env, eps=config['epsilon'], alpha=config['learning_rate'],\n total_timesteps=config['total_timesteps'],\n num_store_seq=config['num_seq_store'], rudder=config['rudder'], enough_seq=config['enough_seq'],\n num_clusters=config['num_clusters'], top_n=config['top_n'],\n consensus_type=config['consensus_type'],\n consensus_thresh=config['consensus_thresh'], cluster_type=config['cluster_type'],\n run_path=run_path,\n anneal_eps=config['anneal_eps'], eps_lb=config['eps_lb'], rr_thresh=config['rr_thresh'],\n log_every=config['log_every'], normalise_rr_by_max=config['normalise_rr_by_max'],\n normalisation_scale=config['normalisation_scale'], eval=config['eval'], use_succ=config['use_succ'],\n use_demo=config['use_demo'],\n demo_path=demo_path,\n num_demo_use=config['num_demo_use'],\n max_episodes=config['max_episodes'], max_reward=config['max_reward'],\n mode=config['mode'],\n gamma=config['gamma'], stop_criteria=config['stop_criteria'], seed=config['seed'],\n init_mean=config['init_mean'])\n rl.learn()\n\n\nif __name__ == \"__main__\":\n # clear output dir\n if os.path.exists(os.path.join(\"results\", \"four_rooms_bc\")):\n shutil.rmtree(os.path.join(\"results\", \"four_rooms_bc\"))\n\n ray.init(temp_dir='/tmp/ray-four-bc', log_to_driver=False)\n print(\"Starting Runs...\")\n tune.run(run, config=config, local_dir=\"results/\", name=\"four_rooms_bc\", resources_per_trial={'cpu': 1})\n print(\"Finished!\")\n", "import os\nimport pkg_resources\nimport numpy as np\nimport random\nfrom ray import tune\nimport ray\nimport gym\nfrom align_rudder.learning.q_learning_exp_replay import QlearningExpReplay\nimport shutil\n\nconfig = {\n 'env_id': 'align_rudder:EightRooms-v0', # environment for the experiment\n 'exp_name': 'align-rudder-dqfd', # name of the experiment\n 'gamma': 1.0, # Discount factor for q learning algorithm\n 'total_timesteps': 10000000,\n 'max_episodes': 100000,\n 'learning_rate': 0.01,\n 'epsilon': 0.2, # exploration constant\n 'num_seq_store': 10, # max sequences to use for alignment or storing\n 'num_clusters': 10, # Number of clusters to use in k-means\n 'consensus_thresh': 0.9, # Threshold for consensus\n 'eval': 40,\n 'top_n': 12,\n 'rudder': False, # Use rudder or not\n 'mode': 'log',\n 'stop_criteria': '80opt',\n 'enough_seq': 3, # How many sequences are enough for sequence alignment\n 'num_demo_use': tune.grid_search([2, 5, 10, 50, 100]), # number of demonstrations\n 'consensus_type': 'all', # Select between most common or threshold all sequences: all, most_common\n 'cluster_type': 'AP', # Use default clustering, SpectralClustering, AffinityPropogation: default, SC, AP\n 'seed': tune.grid_search([i for i in range(10)]), # Seed for experiment\n 'anneal_eps': 1.0, # annealing rate for exploration\n 'eps_lb': 0.0, # eps anneal lower bound\n 'rr_thresh': 0.005, # Inverse visitation freq below thresh, set rr to zero\n 'log_every': 10, # log every timesteps\n 'normalise_rr_by_max': True, # normalize rr by maximum reward in rr\n 'normalisation_scale': 10, # scale factor compared to original reward\n 'use_succ': False,\n 'use_demo': True,\n 'demo_path': 'demonstrations/eight_rooms.npy',\n 'update_cluster_every': 500,\n 'update_alignment:': False,\n 'max_reward': 1,\n 'use_exp_replay': True,\n 'memory_len': 30000,\n 'batch_exp': 10,\n 'margin': 0.8,\n 'nstep': 10,\n 'a1': 0.01,\n 'a2': 0.01,\n 'a3': 1,\n 'a4': 0.1,\n 'init_mean': False\n}\n\n\ndef run(config):\n run_path = os.getcwd()\n env_id = config['env_id']\n env = gym.make(env_id)\n demo_path = pkg_resources.resource_filename(\"align_rudder\", config[\"demo_path\"])\n # set seed\n np.random.seed(config['seed'])\n random.seed(config['seed'])\n\n rl = QlearningExpReplay(env=env, eps=config['epsilon'], alpha=config['learning_rate'],\n total_timesteps=config['total_timesteps'],\n num_store_seq=config['num_seq_store'], rudder=config['rudder'],\n enough_seq=config['enough_seq'],\n num_clusters=config['num_clusters'], top_n=config['top_n'],\n consensus_type=config['consensus_type'],\n consensus_thresh=config['consensus_thresh'], cluster_type=config['cluster_type'],\n run_path=run_path,\n anneal_eps=config['anneal_eps'], eps_lb=config['eps_lb'], rr_thresh=config['rr_thresh'],\n log_every=config['log_every'], normalise_rr_by_max=config['normalise_rr_by_max'],\n normalisation_scale=config['normalisation_scale'], use_succ=config['use_succ'],\n use_demo=config['use_demo'], eval=config['eval'], demo_path=demo_path,\n num_demo_use=config['num_demo_use'],\n max_episodes=config['max_episodes'], max_reward=config['max_reward'],\n gamma=config['gamma'], memory_len=config['memory_len'],\n batch=config['batch_exp'], margin=config['margin'], nstep=config['nstep'],\n seed=config['seed'],\n a1=config['a1'], a2=config['a2'], a3=config['a3'], a4=config['a4'],\n init_mean=config['init_mean'])\n\n rl.learn()\n\n\nif __name__ == \"__main__\":\n # clear output dir\n if os.path.exists(os.path.join(\"results\", \"eight_rooms_dqfd\")):\n shutil.rmtree(os.path.join(\"results\", \"eight_rooms_dqfd\"))\n\n ray.init(temp_dir='/tmp/ray-eight-dqfd', log_to_driver=False)\n print(\"Starting Runs...\")\n tune.run(run, config=config, local_dir=\"results/\", name=\"eight_rooms_dqfd\") # , resources_per_trial={'cpu': 1})\n print(\"Finished!\")\n" ]
[ [ "numpy.random.seed" ], [ "numpy.random.seed" ], [ "numpy.random.seed" ] ]
lisadunlap/explainable-nbdt
[ "e045bfd0b55b21fd87c9a233b73a0ca77672efff" ]
[ "nbdt/utils.py" ]
[ "'''Some helper functions for PyTorch, including:\n - get_mean_and_std: calculate the mean and std value of dataset.\n - msr_init: net parameter initialization.\n - progress_bar: progress bar mimic xlua.progress.\n'''\nimport os\nimport sys\nimport time\nimport math\nimport numpy as np\nfrom numpy import linalg as LA\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torchvision.transforms as transforms\nfrom gensim.models import Word2Vec\n\nfrom pathlib import Path\n\n# tree-generation consntants\nMETHODS = ('prune', 'wordnet', 'random', 'image', 'induced', 'self-induced', 'clustered', 'extra_paths', 'weighted',\n 'replace_node', 'insert_node', 'induced-attributes', 'prettify')\nDATASETS = ('CIFAR10', 'CIFAR10IncludeClasses', 'CIFAR100', 'TinyImagenet200', 'TinyImagenet200IncludeClasses', 'Imagenet1000',\n 'TinyImagenet200CombineClasses', 'MiniPlaces', 'AnimalsWithAttributes2', 'CUB2011', 'MiniImagenet')\n\nDATASET_TO_FOLDER_NAME = {\n 'CIFAR10': 'CIFAR10',\n 'CIFAR10ExcludeLabels': 'CIFAR10-zeroshot',\n 'CIFAR10ExcludeClasses': 'CIFAR10',\n 'CIFAR10IncludeLabels': 'CIFAR10-zeroshot',\n 'CIFAR10IncludeClasses': 'CIFAR10',\n 'CIFAR100': 'CIFAR100',\n 'TinyImagenet200': 'tiny-imagenet-200',\n 'TinyImagenet200IncludeClasses': 'tiny-imagenet-200-custom',\n 'Imagenet1000' : 'imagenet-1000',\n 'TinyImagenet200CombineClasses': 'tiny-imagenet-200-custom-combined',\n 'MiniPlaces': 'miniplaces',\n 'AnimalsWithAttributes2': 'Animals_with_Attributes2',\n 'CUB2011': 'CUB_200_2011',\n 'MiniImagenet': 'mini-imagenet'\n}\n\n# main script constants\nCIFAR10PATHSANITY = 'CIFAR10PathSanity'\n\nDEFAULT_CIFAR10_TREE = './data/CIFAR10/graph-wordnet-single.json'\nDEFAULT_CIFAR10_WNIDS = './data/CIFAR10/wnids.txt'\nDEFAULT_CIFAR100_TREE = './data/CIFAR100/graph-wordnet-single.json'\nDEFAULT_CIFAR100_WNIDS = './data/CIFAR100/wnids.txt'\nDEFAULT_TINYIMAGENET200_TREE = './data/tiny-imagenet-200/graph-wordnet-single.json'\nDEFAULT_TINYIMAGENET200_WNIDS = './data/tiny-imagenet-200/wnids.txt'\nDEFAULT_IMAGENET1000_TREE = './data/imagenet-1000/graph-wordnet-single.json'\nDEFAULT_IMAGENET1000_WNIDS = './data/imagenet-1000/wnids.txt'\nDEFAULT_MINIPLACES_TREE = '/data/miniplaces/graph-default.json'\nDEFAULT_MINIPLACES_WNID = './data/miniplaces/wnids.txt'\nDEFAULT_AWA2_TREE = '/data/Animals_with_Attributes2/graph-default.json'\nDEFAULT_AWA2_WNID = './data/Animals_with_Attributes2/wnids.txt'\nDEFAULT_CUB_TREE = '/data/CUB_200_2011/graph-default.json'\nDEFAULT_CUB_WNID = './data/CUB_200_2011/wnids.txt'\nDEFAULT_MiniImagenet_TREE = './data/mini-imagenet/graph-default.json'\nDEFAULT_MiniImagenet_WNID = './data/mini-imagenet/wnids.txt'\n\n\nDATASET_TO_PATHS = {\n 'CIFAR10': {\n 'path_graph': DEFAULT_CIFAR10_TREE,\n 'path_wnids': DEFAULT_CIFAR10_WNIDS\n },\n 'CIFAR100': {\n 'path_graph': DEFAULT_CIFAR100_TREE,\n 'path_wnids': DEFAULT_CIFAR100_WNIDS\n },\n 'TinyImagenet200': {\n 'path_graph': DEFAULT_TINYIMAGENET200_TREE,\n 'path_wnids': DEFAULT_TINYIMAGENET200_WNIDS\n },\n 'Imagenet1000': {\n 'path_graph': DEFAULT_IMAGENET1000_TREE,\n 'path_wnids': DEFAULT_IMAGENET1000_WNIDS\n },\n 'MiniPlaces': {\n 'path_graph': DEFAULT_MINIPLACES_TREE,\n 'path_wnids': DEFAULT_MINIPLACES_WNID\n },\n 'AnimalsWithAttributes2': {\n 'path_graph': DEFAULT_AWA2_TREE,\n 'path_wnids': DEFAULT_AWA2_WNID\n },\n 'CUB2011': {\n 'path_graph': DEFAULT_CUB_TREE,\n 'path_wnids': DEFAULT_CUB_WNID\n },\n 'MiniImagenet': {\n 'path_graph': DEFAULT_MiniImagenet_TREE,\n 'path_wnids': DEFAULT_MiniImagenet_WNID\n }\n}\n\nWORD2VEC_NAMES_TO_MODEL = {\n 'wiki': {\n 'name': 'glove-wiki-gigaword-300',\n 'dim': 300\n },\n 'wiki-300': {\n 'name': 'glove-wiki-gigaword-300',\n 'dim': 300\n },\n 'wiki-200': {\n 'name': 'glove-wiki-gigaword-200',\n 'dim': 200\n },\n 'wiki-100': {\n 'name': 'glove-wiki-gigaword-100',\n 'dim': 100\n },\n 'wiki-50': {\n 'name': 'glove-wiki-gigaword-50',\n 'dim': 50\n },\n\n 'twitter': {\n 'name': 'glove-twitter-200',\n 'dim': 200\n }\n}\n\ndef populate_kwargs(args, kwargs, object, name='Dataset', keys=(), globals={}):\n for key in keys:\n accepts_key = getattr(object, f'accepts_{key}', False)\n if not accepts_key:\n continue\n assert key in args or callable(accepts_key)\n\n value = getattr(args, key, None)\n if callable(accepts_key):\n kwargs[key] = accepts_key(**globals)\n Colors.cyan(f'{key}:\\t(callable)')\n elif accepts_key and value:\n kwargs[key] = value\n Colors.cyan(f'{key}:\\t{value}')\n elif value:\n Colors.red(\n f'Warning: {name} does not support custom '\n f'{key}: {value}')\n\n\ndef get_transform_from_name(dataset_name, dataset, input_size):\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n\n # , 'TinyImagenet200IncludeClasses'\n if dataset_name in ('TinyImagenet200', 'Imagenet1000', 'CUB2011'):\n default_input_size = 64 if 'TinyImagenet200' in dataset_name else 224\n input_size = input_size or default_input_size\n transform_train = dataset.transform_train(input_size)\n transform_test = dataset.transform_val(input_size)\n\n if dataset_name in ('MiniImagenet'):\n default_input_size = 84\n input_size = input_size or default_input_size\n transform_train = dataset.transform_train(input_size)\n transform_test = dataset.transform_val(input_size)\n # transform_train = transforms.Compose([\n # transforms.Resize(84),\n # transforms.RandomCrop(84, padding=8),\n # transforms.RandomHorizontalFlip(),\n # transforms.ToTensor(),\n # transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n # ])\n # transform_test = transforms.Compose([\n # transforms.ToTensor(),\n # transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n # ])\n\n if dataset_name in ('MiniPlaces', 'AnimalsWithAttributes2'):\n transform_train = dataset.transform_train()\n transform_test = dataset.transform_test()\n\n\n return transform_train, transform_test\n\n\nclass Colors:\n RED = '\\x1b[31m'\n GREEN = '\\x1b[32m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n CYAN = '\\x1b[36m'\n\n @classmethod\n def red(cls, *args):\n print(cls.RED + args[0], *args[1:], cls.ENDC)\n\n @classmethod\n def green(cls, *args):\n print(cls.GREEN + args[0], *args[1:], cls.ENDC)\n\n @classmethod\n def cyan(cls, *args):\n print(cls.CYAN + args[0], *args[1:], cls.ENDC)\n\n @classmethod\n def bold(cls, *args):\n print(cls.BOLD + args[0], *args[1:], cls.ENDC)\n\n\ndef get_mean_and_std(dataset):\n '''Compute the mean and std value of dataset.'''\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)\n mean = torch.zeros(3)\n std = torch.zeros(3)\n print('==> Computing mean and std..')\n for inputs, targets in dataloader:\n for i in range(3):\n mean[i] += inputs[:,i,:,:].mean()\n std[i] += inputs[:,i,:,:].std()\n mean.div_(len(dataset))\n std.div_(len(dataset))\n return mean, std\n\ndef init_params(net):\n '''Init layer parameters.'''\n for m in net.modules():\n if isinstance(m, nn.Conv2d):\n init.kaiming_normal(m.weight, mode='fan_out')\n if m.bias:\n init.constant(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant(m.weight, 1)\n init.constant(m.bias, 0)\n elif isinstance(m, nn.Linear):\n init.normal(m.weight, std=1e-3)\n if m.bias:\n init.constant(m.bias, 0)\n\n\ntry:\n _, term_width = os.popen('stty size', 'r').read().split()\n term_width = int(term_width)\nexcept Exception as e:\n print(e)\n term_width = 50\n\nTOTAL_BAR_LENGTH = 65.\nlast_time = time.time()\nbegin_time = last_time\ndef progress_bar(current, total, msg=None):\n global last_time, begin_time\n if current == 0:\n begin_time = time.time() # Reset for new bar.\n\n cur_len = int(TOTAL_BAR_LENGTH*current/total)\n rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1\n\n sys.stdout.write(' [')\n for i in range(cur_len):\n sys.stdout.write('=')\n sys.stdout.write('>')\n for i in range(rest_len):\n sys.stdout.write('.')\n sys.stdout.write(']')\n\n cur_time = time.time()\n step_time = cur_time - last_time\n last_time = cur_time\n tot_time = cur_time - begin_time\n\n L = []\n L.append(' Step: %s' % format_time(step_time))\n L.append(' | Tot: %s' % format_time(tot_time))\n if msg:\n L.append(' | ' + msg)\n\n msg = ''.join(L)\n sys.stdout.write(msg)\n for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):\n sys.stdout.write(' ')\n\n # Go back to the center of the bar.\n for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):\n sys.stdout.write('\\b')\n sys.stdout.write(' %d/%d ' % (current+1, total))\n\n if current < total-1:\n sys.stdout.write('\\r')\n else:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\ndef format_time(seconds):\n days = int(seconds / 3600/24)\n seconds = seconds - days*3600*24\n hours = int(seconds / 3600)\n seconds = seconds - hours*3600\n minutes = int(seconds / 60)\n seconds = seconds - minutes*60\n secondsf = int(seconds)\n seconds = seconds - secondsf\n millis = int(seconds*1000)\n\n f = ''\n i = 1\n if days > 0:\n f += str(days) + 'D'\n i += 1\n if hours > 0 and i <= 2:\n f += str(hours) + 'h'\n i += 1\n if minutes > 0 and i <= 2:\n f += str(minutes) + 'm'\n i += 1\n if secondsf > 0 and i <= 2:\n f += str(secondsf) + 's'\n i += 1\n if millis > 0 and i <= 2:\n f += str(millis) + 'ms'\n i += 1\n if f == '':\n f = '0ms'\n return f\n\n\ndef set_np_printoptions():\n np.set_printoptions(formatter={'float': lambda x: \"{0:0.3f}\".format(x)})\n\n\ndef generate_fname(dataset, model, path_graph, wnid=None, name='',\n trainset=None, include_labels=(), exclude_labels=(),\n include_classes=(), num_samples=0, max_leaves_supervised=-1,\n min_leaves_supervised=-1, tree_supervision_weight=0.5,\n weighted_average=False, fine_tune=False,\n loss='CrossEntropyLoss', word2vec=False, **kwargs):\n fname = 'ckpt'\n fname += '-' + dataset\n fname += '-' + model\n if name:\n fname += '-' + name\n if path_graph:\n path = Path(path_graph)\n fname += '-' + path.stem.replace('graph-', '', 1)\n if include_labels:\n labels = \",\".join(map(str, include_labels))\n fname += f'-incl{labels}'\n if exclude_labels:\n labels = \",\".join(map(str, exclude_labels))\n fname += f'-excl{labels}'\n if include_classes:\n labels = \",\".join(map(str, include_classes))\n fname += f'-incc{labels}'\n if num_samples != 0 and num_samples is not None:\n fname += f'-samples{num_samples}'\n if loss != 'CrossEntropyLoss':\n fname += f'-{loss}'\n if max_leaves_supervised > 0:\n fname += f'-mxls{max_leaves_supervised}'\n if min_leaves_supervised > 0:\n fname += f'-mnls{min_leaves_supervised}'\n if tree_supervision_weight is not None and tree_supervision_weight != 1:\n fname += f'-tsw{tree_supervision_weight}'\n if weighted_average:\n fname += '-weighted'\n if word2vec:\n fname += '-word2vec'\n return fname\n\ndef get_saved_word2vec(path, dimension, projection_matrix):\n word_vec = np.load(path)\n word_vec = np.asarray(word_vec).reshape(1, dimension)\n word_vec = np.matmul(word_vec, projection_matrix)[0]\n return np.array(word_vec / LA.norm(word_vec), dtype=float)\n\ndef get_word_embedding(cls, trainset, dataset_name='CIFAR10'):\n word2vec_path = os.path.join(os.path.join(trainset.root, DATASET_TO_FOLDER_NAME[dataset_name]), \"word2vec/\")\n word_vec = np.load(word2vec_path + cls + '.npy')\n return word_vec/LA.norm(word_vec)\n\n\ndef word2vec_model(net, trainset, dataset_name='CIFAR10', exclude_classes=None, pretrained=False):\n \"\"\" Sets FC layer weights to word2vec embeddings, freezing them unless\n exclude classes is given, in which case those specific rows are frozen in\n the backward call\"\"\"\n\n print('==> Adding in word2vec embeddings...')\n if isinstance(net, nn.DataParallel):\n module = net.module\n else:\n module = net\n if pretrained:\n layer = module.fc\n else:\n layer = module.linear\n word2vec_path = os.path.join(os.path.join('./data',DATASET_TO_FOLDER_NAME[dataset_name]), \"word2vec/\")\n if not os.path.exists(word2vec_path):\n raise Exception(\"No saved word2vec embeddings, run generate_word2vec.py\")\n fc_weights = []\n\n for i, cls in enumerate(trainset.classes):\n word_vec = np.load(word2vec_path+cls+'.npy')\n word_vec /= LA.norm(word_vec)\n print(word_vec.shape, len(fc_weights))\n fc_weights = np.append(fc_weights, word_vec)\n print(fc_weights.shape)\n print(trainset.classes, fc_weights.shape)\n fc_weights = fc_weights.reshape((len(trainset.classes), int(fc_weights.shape[0]/len(trainset.classes))))\n layer = nn.Linear(fc_weights.shape[1], len(trainset.classes)).to(\"cuda\")\n layer.weight = nn.Parameter(torch.from_numpy(fc_weights).float().to(\"cuda\"))\n # Colors.cyan(\"All word2vec checks passed!\")\n\n # freeze layer\n layer.weight.requires_grad = False\n layer.bias.requires_grad = False\n layer.requires_grad = False\n Colors.cyan(\"Freezing FC weights..\")\n return net\n\ndef test_word2vec(net, trainset, dataset_name='CIFAR10', exclude_classes=None, dimension=300):\n \"\"\" Check that word2vec weights are frozen in ZS rows \"\"\"\n word2vec_path = os.path.join(os.path.join('./data', DATASET_TO_FOLDER_NAME[dataset_name]), \"word2vec/\")\n if not os.path.exists(word2vec_path):\n raise Exception(\"No saved word2vec embeddings, run generate_word2vec.py\")\n\n net.eval()\n\n # get FC weights\n fc_weights = net.module.linear.weight.detach().cpu().numpy()\n\n # if no exclude classes, all FC rows should be word2vec embeddings\n if not exclude_classes:\n for i, cls in enumerate(trainset.classes):\n word_vec = word_vec = np.load(word2vec_path+cls+'.npy')\n assert all(fc_weights[i] == word_vec)\n else:\n for i, cls in enumerate(exclude_classes):\n word_vec = word_vec = np.load(word2vec_path+cls+'.npy')\n assert all(fc_weights[i] == word_vec)\n Colors.cyan(\"Freezing certain FC rows check passed!\")\n\ndef normalize_weights(net, pretrained=True):\n \"\"\" Check that word2vec weights are frozen in ZS rows \"\"\"\n net.eval()\n\n if pretrained:\n layer = net.module.fc\n else:\n layer = net.module.linear\n\n # get FC weights\n fc_weights = layer.weight.detach().cpu().numpy()\n for i in range(len(fc_weights)):\n fc_weights[i] -= np.mean(fc_weights[i])\n fc_weights[i] /= LA.norm(fc_weights[i])\n layer.weight = nn.Parameter(torch.from_numpy(fc_weights).float().to(\"cuda\"))\n layer.weight.requires_grad = False\n return net\n\nclass LabelSmoothingLoss(nn.Module):\n def __init__(self, classes, smoothing=0.0, dim=-1, seen_to_zsl_cls={}):\n super(LabelSmoothingLoss, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing\n self.cls = classes\n self.dim = dim\n self.seen_to_zsl_cls = seen_to_zsl_cls\n\n def smooth_one_hot(self, labels):\n \"\"\" Create Soft Label \"\"\"\n assert 0 <= self.smoothing < 1\n num_classes = len(self.cls)\n label_shape = torch.Size((labels.size(0), num_classes))\n confidence = 1.0 - self.smoothing\n\n if self.smoothing == 0 or not self.seen_to_zsl_cls:\n return torch.zeros_like(label_shape).scatter_(1, labels.data.unsqueeze(1), confidence)\n\n with torch.no_grad():\n true_dist = torch.zeros(size=label_shape, device=labels.device)\n true_dist.scatter_(1, labels.data.unsqueeze(1), 1)\n for seen, zsl in self.seen_to_zsl_cls.items():\n zsl_idx, seen_idx = self.cls.index(zsl), self.cls.index(seen)\n seen_selector = torch.zeros_like(labels.data.unsqueeze(1))\n seen_selector[true_dist[:, seen_idx] == 1] = seen_idx\n zsl_selector = torch.zeros_like(labels.data.unsqueeze(1))\n zsl_selector[true_dist[:, seen_idx] == 1] = zsl_idx\n true_dist.scatter_(1, seen_selector, confidence)\n true_dist.scatter_(1, zsl_selector, self.smoothing)\n return true_dist\n\n def forward(self, pred, target):\n pred = pred.log_softmax(dim=self.dim)\n with torch.no_grad():\n # true_dist = pred.data.clone()\n soft_label = self.smooth_one_hot(target)\n return torch.mean(torch.sum(-soft_label * pred, dim=self.dim))\n\nclass MaskLoss(nn.Module):\n def __init__(self, size_average=None, reduce=None, reduction='mean'):\n super(MaskLoss, self).__init__()\n self.reduction = reduction\n\n def forward(self, input, target):\n N, W = input.size()\n A = torch.min(input, target)\n values, index = torch.max(target, 0)\n B = 1/(1+torch.exp(-100*(target-.55*values)))\n sums = []\n for n in range(N):\n value = values[n]\n idx = index[n]\n tar = target[n]\n inp = input[n]\n a = torch.min(inp, tar)\n b = 1/(1+torch.exp(-100*(tar-.55*value)))\n sums.append(2*torch.div(torch.dot(a,b), torch.sum(inp+target, axis=-1)))\n sums = torch.stack(sums)\n sums[torch.isnan(sums)] = 0.0\n # return torch.mean(2*torch.div(torch.bmm(A.view(N, 1, W), B.view(N, W, 1)).view(1, N),\n # torch.sum(input+target, axis=-1)), dim=-1)\n return sums.mean()\n\ndef replicate(inputs, labels):\n \"\"\"\n inputs: torch Tensor size Bx(anything)\n labels: torch tensor size Bx(num_classes)\n (multilabel, where labels[i,j] is 1 if image i has class j, 0 otherwise)\n Return:\n rep_inputs size Kx(anything), where K is the number of 1's that appeared in all labels\n rep_labels size Kx1, where rep_labels[i] is a class number that appeared in images[i]\n Example:\n inputs = torch.zeros((2,3))\n labels = torch.Tensor([\n [0,1,1,0],\n [0,1,0,0]\n ])\n rep_inputs, rep_labels = replicate(inputs, labels)\n assert rep_inputs.shape == (3,3)\n assert torch.all(rep_labels == torch.Tensor([1,2,1]))\n \"\"\"\n input_dim = len(inputs.shape)\n rep_inputs, rep_labels = None, None\n for (sample, label) in zip(inputs,labels):\n if rep_inputs is None:\n rep_labels = torch.where(label == 1.)[0]\n rep_inputs = sample.unsqueeze(0).repeat(len(rep_labels),*([1] * (input_dim-1)))\n else:\n new_rep_labels = torch.where(label == 1.)[0]\n new_reps = sample.unsqueeze(0).repeat(len(new_rep_labels),*([1] * (input_dim-1)))\n rep_labels = torch.cat((rep_labels, new_rep_labels))\n rep_inputs = torch.cat((rep_inputs, new_reps))\n return rep_inputs, rep_labels\n\ndef replicate_outputs(inputs, num_replicate):\n \"\"\"\n inputs: torch Tensor size Bx(anything)\n labels: torch tensor size Bx(num_classes)\n (multilabel, where labels[i,j] is 1 if image i has class j, 0 otherwise)\n Return:\n rep_inputs size Kx(anything), where K is the number of 1's that appeared in all labels\n rep_labels size Kx1, where rep_labels[i] is a class number that appeared in images[i]\n Example:\n inputs = torch.zeros((2,3))\n labels = torch.Tensor([\n [0,1,1,0],\n [0,1,0,0]\n ])\n rep_inputs, rep_labels = replicate(inputs, labels)\n assert rep_inputs.shape == (3,3)\n assert torch.all(rep_labels == torch.Tensor([1,2,1]))\n \"\"\"\n ret = {i:None for i in range(num_replicate)}\n for i in range(num_replicate):\n ret[i] = inputs.clone()\n return ret" ]
[ [ "torch.max", "torch.zeros", "numpy.asarray", "torch.cat", "torch.utils.data.DataLoader", "torch.sum", "numpy.mean", "torch.no_grad", "torch.where", "numpy.matmul", "torch.from_numpy", "numpy.load", "torch.nn.init.constant", "torch.dot", "torch.nn.init.kaiming_normal", "torch.min", "torch.zeros_like", "torch.exp", "numpy.append", "torch.stack", "torch.isnan", "numpy.linalg.norm", "torch.nn.init.normal" ] ]
antoineMoPa/glumpy
[ "901df7eb37cd728c2fe7e54920392b700b46c0ac", "901df7eb37cd728c2fe7e54920392b700b46c0ac" ]
[ "examples/gloo-arrows.py", "glumpy/app/console.py" ]
[ "# -----------------------------------------------------------------------------\n# Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.\n# Distributed under the (new) BSD License.\n# -----------------------------------------------------------------------------\nimport numpy as np\nfrom glumpy import app, gl, gloo\nfrom glumpy.transforms import Position, OrthographicProjection, PanZoom\n\n# Create window\nwindow = app.Window(width=2*512, height=512, color=(1,1,1,1))\n\n# What to draw when necessary\[email protected]\ndef on_draw(dt):\n window.clear()\n program.draw(gl.GL_POINTS)\n program['orientation'][-1] += np.pi/1024.0\n\n# Setup some markers\nn = 500+1\ndata = np.zeros(n, dtype=[('position', np.float32, 2),\n ('fg_color', np.float32, 4),\n ('bg_color', np.float32, 4),\n ('size', np.float32, 1),\n ('head', np.float32, 1),\n ('orientation', np.float32, 1),\n ('linewidth', np.float32, 1)])\ndata = data.view(gloo.VertexBuffer)\ndata['linewidth'] = 1\ndata['fg_color'] = 0, 0, 0, 1\ndata['bg_color'] = 0, 0, 0, 1\ndata['orientation'] = 0\ndata['head'] = 0.25\nradius, theta, dtheta = 245.0, 0.0, 6.5 / 180.0 * np.pi\nfor i in range(500):\n theta += dtheta\n x = 256 + radius * np.cos(theta)\n y = 256 + radius * np.sin(theta)\n r = 10.1 - i * 0.01\n radius -= 0.4\n data['orientation'][i] = theta + np.pi\n data['position'][i] = x, y\n data['size'][i] = 2 * r\n data['linewidth'][i] = 1.5 - 0.5*i/500.\n\ndata['position'][-1] = 512+256, 256\ndata['size'][-1] = 512/np.sqrt(2)\ndata['linewidth'][-1] = 16.0\ndata['fg_color'][-1] = 0, 0, 0, 1\ndata['bg_color'][-1] = .95, .95, .95, 1\ndata['orientation'][-1] = 0\n\nprogram = gloo.Program(\"arrows/arrow.vert\", \"arrows/arrow.frag\")\nprogram.bind(data)\nprogram['antialias'] = 1.00\nprogram['arrow'] = \"stealth\"\nprogram['paint'] = \"filled\"\ntransform = OrthographicProjection(Position(\"position\"))\nprogram['transform'] = transform\nwindow.attach(transform)\n\napp.run()\n", "# -----------------------------------------------------------------------------\n# Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.\n# Distributed under the (new) BSD License.\n# -----------------------------------------------------------------------------\n\"\"\" Fast and failsafe GL console \"\"\"\nimport numpy as np\nfrom glumpy import gl, glm, gloo\n\n\n# Translated from\n# http://www.piclist.com/tecHREF/datafile/charset/extractor/charset_extractor.htm\n__font_6x8__ = np.array([\n (0x00,0x00,0x00,0x00,0x00,0x00), (0x10,0xE3,0x84,0x10,0x01,0x00),\n (0x6D,0xB4,0x80,0x00,0x00,0x00), (0x00,0xA7,0xCA,0x29,0xF2,0x80),\n (0x20,0xE4,0x0C,0x09,0xC1,0x00), (0x65,0x90,0x84,0x21,0x34,0xC0),\n (0x21,0x45,0x08,0x55,0x23,0x40), (0x30,0xC2,0x00,0x00,0x00,0x00),\n (0x10,0x82,0x08,0x20,0x81,0x00), (0x20,0x41,0x04,0x10,0x42,0x00),\n (0x00,0xA3,0x9F,0x38,0xA0,0x00), (0x00,0x41,0x1F,0x10,0x40,0x00),\n (0x00,0x00,0x00,0x00,0xC3,0x08), (0x00,0x00,0x1F,0x00,0x00,0x00),\n (0x00,0x00,0x00,0x00,0xC3,0x00), (0x00,0x10,0x84,0x21,0x00,0x00),\n (0x39,0x14,0xD5,0x65,0x13,0x80), (0x10,0xC1,0x04,0x10,0x43,0x80),\n (0x39,0x10,0x46,0x21,0x07,0xC0), (0x39,0x10,0x4E,0x05,0x13,0x80),\n (0x08,0x62,0x92,0x7C,0x20,0x80), (0x7D,0x04,0x1E,0x05,0x13,0x80),\n (0x18,0x84,0x1E,0x45,0x13,0x80), (0x7C,0x10,0x84,0x20,0x82,0x00),\n (0x39,0x14,0x4E,0x45,0x13,0x80), (0x39,0x14,0x4F,0x04,0x23,0x00),\n (0x00,0x03,0x0C,0x00,0xC3,0x00), (0x00,0x03,0x0C,0x00,0xC3,0x08),\n (0x08,0x42,0x10,0x20,0x40,0x80), (0x00,0x07,0xC0,0x01,0xF0,0x00),\n (0x20,0x40,0x81,0x08,0x42,0x00), (0x39,0x10,0x46,0x10,0x01,0x00),\n (0x39,0x15,0xD5,0x5D,0x03,0x80), (0x39,0x14,0x51,0x7D,0x14,0x40),\n (0x79,0x14,0x5E,0x45,0x17,0x80), (0x39,0x14,0x10,0x41,0x13,0x80),\n (0x79,0x14,0x51,0x45,0x17,0x80), (0x7D,0x04,0x1E,0x41,0x07,0xC0),\n (0x7D,0x04,0x1E,0x41,0x04,0x00), (0x39,0x14,0x17,0x45,0x13,0xC0),\n (0x45,0x14,0x5F,0x45,0x14,0x40), (0x38,0x41,0x04,0x10,0x43,0x80),\n (0x04,0x10,0x41,0x45,0x13,0x80), (0x45,0x25,0x18,0x51,0x24,0x40),\n (0x41,0x04,0x10,0x41,0x07,0xC0), (0x45,0xB5,0x51,0x45,0x14,0x40),\n (0x45,0x95,0x53,0x45,0x14,0x40), (0x39,0x14,0x51,0x45,0x13,0x80),\n (0x79,0x14,0x5E,0x41,0x04,0x00), (0x39,0x14,0x51,0x55,0x23,0x40),\n (0x79,0x14,0x5E,0x49,0x14,0x40), (0x39,0x14,0x0E,0x05,0x13,0x80),\n (0x7C,0x41,0x04,0x10,0x41,0x00), (0x45,0x14,0x51,0x45,0x13,0x80),\n (0x45,0x14,0x51,0x44,0xA1,0x00), (0x45,0x15,0x55,0x55,0x52,0x80),\n (0x45,0x12,0x84,0x29,0x14,0x40), (0x45,0x14,0x4A,0x10,0x41,0x00),\n (0x78,0x21,0x08,0x41,0x07,0x80), (0x38,0x82,0x08,0x20,0x83,0x80),\n (0x01,0x02,0x04,0x08,0x10,0x00), (0x38,0x20,0x82,0x08,0x23,0x80),\n (0x10,0xA4,0x40,0x00,0x00,0x00), (0x00,0x00,0x00,0x00,0x00,0x3F),\n (0x30,0xC1,0x00,0x00,0x00,0x00), (0x00,0x03,0x81,0x3D,0x13,0xC0),\n (0x41,0x07,0x91,0x45,0x17,0x80), (0x00,0x03,0x91,0x41,0x13,0x80),\n (0x04,0x13,0xD1,0x45,0x13,0xC0), (0x00,0x03,0x91,0x79,0x03,0x80),\n (0x18,0x82,0x1E,0x20,0x82,0x00), (0x00,0x03,0xD1,0x44,0xF0,0x4E),\n (0x41,0x07,0x12,0x49,0x24,0x80), (0x10,0x01,0x04,0x10,0x41,0x80),\n (0x08,0x01,0x82,0x08,0x24,0x8C), (0x41,0x04,0x94,0x61,0x44,0x80),\n (0x10,0x41,0x04,0x10,0x41,0x80), (0x00,0x06,0x95,0x55,0x14,0x40),\n (0x00,0x07,0x12,0x49,0x24,0x80), (0x00,0x03,0x91,0x45,0x13,0x80),\n (0x00,0x07,0x91,0x45,0x17,0x90), (0x00,0x03,0xD1,0x45,0x13,0xC1),\n (0x00,0x05,0x89,0x20,0x87,0x00), (0x00,0x03,0x90,0x38,0x13,0x80),\n (0x00,0x87,0x88,0x20,0xA1,0x00), (0x00,0x04,0x92,0x49,0x62,0x80),\n (0x00,0x04,0x51,0x44,0xA1,0x00), (0x00,0x04,0x51,0x55,0xF2,0x80),\n (0x00,0x04,0x92,0x31,0x24,0x80), (0x00,0x04,0x92,0x48,0xE1,0x18),\n (0x00,0x07,0x82,0x31,0x07,0x80), (0x18,0x82,0x18,0x20,0x81,0x80),\n (0x10,0x41,0x00,0x10,0x41,0x00), (0x30,0x20,0x83,0x08,0x23,0x00),\n (0x29,0x40,0x00,0x00,0x00,0x00), (0x10,0xE6,0xD1,0x45,0xF0,0x00)], dtype=np.float32)\n\n__vertex__ = \"\"\"\n#version 120\nuniform mat4 projection;\nuniform float scale;\nuniform vec4 color;\nattribute vec2 position;\nattribute vec3 bytes_012, bytes_345;\nvarying vec4 v_color;\nvarying vec3 v_bytes_012, v_bytes_345;\nvoid main (void)\n{\n gl_Position = projection*vec4(position*scale, 0.0, 1.0);\n gl_PointSize = 8.0 * scale;\n\n v_color = color;\n v_bytes_012 = bytes_012;\n v_bytes_345 = bytes_345;\n}\n\"\"\"\n\n__fragment__ = \"\"\"\n#version 120\nfloat segment(float edge0, float edge1, float x)\n{\n return step(edge0,x) * (1.0-step(edge1,x));\n}\nvarying vec4 v_color;\nvarying vec3 v_bytes_012, v_bytes_345;\nvoid main(void)\n{\n vec2 uv = floor(gl_PointCoord.xy * 8.0);\n if(uv.x > 5.0) discard;\n if(uv.y > 7.0) discard;\n float index = floor( (uv.y*6.0+uv.x)/8.0 );\n float offset = floor( mod(uv.y*6.0+uv.x,8.0));\n float byte = segment(0.0,1.0,index) * v_bytes_012.x\n + segment(1.0,2.0,index) * v_bytes_012.y\n + segment(2.0,3.0,index) * v_bytes_012.z\n + segment(3.0,4.0,index) * v_bytes_345.x\n + segment(4.0,5.0,index) * v_bytes_345.y\n + segment(5.0,6.0,index) * v_bytes_345.z;\n if( floor(mod(byte / (128.0/pow(2.0,offset)), 2.0)) > 0.0 )\n gl_FragColor = v_color;\n else\n discard;\n}\n\"\"\"\n\n\nclass Console(object):\n \"\"\" Fast and failsafe GL console \"\"\"\n\n def __init__(self, rows, cols, scale=2, color=(0,0,0,1)):\n\n # Harcoded because of font above and shader program\n self._cwidth = 6\n self._cheight = 8\n self._scale = int(max(scale,1))\n\n dtype = [(\"position\", np.float32, 2),\n (\"glyph\", np.float32, 6)]\n self._program = gloo.Program(__vertex__, __fragment__)\n self._data = np.zeros((rows,cols), dtype).view(gloo.VertexBuffer)\n dtype = [(\"position\", np.float32, 2),\n (\"bytes_012\", np.float32, 3),\n (\"bytes_345\", np.float32, 3)]\n self._program.bind(self._data.view(dtype))\n\n # Initialize glyph position (they won't move)\n C,R = np.meshgrid(np.arange(cols), np.arange(rows))\n self._data['position'][...,0] = 4.0 + self.cwidth*C\n self._data['position'][...,1] = 4.0 + self.cheight*R\n\n self._program['scale'] = self._scale\n self._program['color'] = color\n self._rows, self._cols = rows, cols\n self._row = 0\n\n\n @property\n def scale(self):\n return self._scale\n\n\n @property\n def rows(self):\n return self._rows\n\n\n @property\n def cols(self):\n return self._cols\n\n\n @property\n def cwidth(self):\n return self._cwidth\n\n\n @property\n def cheight(self):\n return self._cheight + 2\n\n\n def on_resize(self, width, height):\n \"\"\" Update console projection \"\"\"\n\n self._program[\"projection\"] = glm.ortho(0, width, height, 0, -1, +1)\n\n\n def draw(self):\n \"\"\" Draw console \"\"\"\n\n self._program.draw(gl.GL_POINTS)\n\n\n def clear(self):\n \"\"\" Clear console \"\"\"\n\n self._data[\"glyph\"] = 0\n self._row = 0\n\n\n def write(self, text=\"\"):\n \"\"\" Write text and scroll \"\"\"\n\n # Clear line\n self._data[\"glyph\"][self._row] = 0\n\n if len(text):\n # Crop text if necessary\n text = text[:self._cols]\n # Write text\n I = np.array([ord(c)-32 for c in text])\n self._data[\"glyph\"][self._row,:len(text)] = __font_6x8__[I]\n\n # Update row and scroll if necessary\n self._row += 1\n if self._row > self._rows-1:\n self._data[\"glyph\"][:-n] = self._data[\"glyph\"][n:]\n self._data[\"glyph\"][-n:] = 0\n self._row = self._rows-1\n" ]
[ [ "numpy.cos", "numpy.zeros", "numpy.sqrt", "numpy.sin" ], [ "numpy.arange", "numpy.array", "numpy.zeros" ] ]
fdeugenio/photutils
[ "33c8b15cbbda85dc11c86a73217422dcb61398b7", "33c8b15cbbda85dc11c86a73217422dcb61398b7", "33c8b15cbbda85dc11c86a73217422dcb61398b7" ]
[ "photutils/aperture/mask.py", "photutils/psf/tests/test_photometry.py", "photutils/psf/tests/test_epsf.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport numpy as np\nimport astropy.units as u\n\n\n__all__ = ['ApertureMask']\n\n\nclass ApertureMask:\n \"\"\"\n Class for an aperture mask.\n\n Parameters\n ----------\n data : array_like\n A 2D array representing the fractional overlap of an aperture on\n the pixel grid. This should be the full-sized (i.e. not\n truncated) array that is the direct output of one of the\n low-level `photutils.geometry` functions.\n\n bbox : `photutils.BoundingBox`\n The bounding box object defining the aperture minimal bounding\n box.\n \"\"\"\n\n def __init__(self, data, bbox):\n self.data = np.asanyarray(data)\n if self.data.shape != bbox.shape:\n raise ValueError('mask data and bounding box must have the same '\n 'shape')\n self.bbox = bbox\n self._mask = (self.data == 0)\n\n def __array__(self):\n \"\"\"\n Array representation of the mask data array (e.g., for matplotlib).\n \"\"\"\n\n return self.data\n\n @property\n def shape(self):\n \"\"\"\n The shape of the mask data array.\n \"\"\"\n\n return self.data.shape\n\n def _overlap_slices(self, shape):\n \"\"\"\n Calculate the slices for the overlapping part of the bounding\n box and an array of the given shape.\n\n Parameters\n ----------\n shape : tuple of int\n The ``(ny, nx)`` shape of array where the slices are to be\n applied.\n\n Returns\n -------\n slices_large : tuple of slices\n A tuple of slice objects for each axis of the large array,\n such that ``large_array[slices_large]`` extracts the region\n of the large array that overlaps with the small array.\n\n slices_small : slice\n A tuple of slice objects for each axis of the small array,\n such that ``small_array[slices_small]`` extracts the region\n of the small array that is inside the large array.\n \"\"\"\n\n if len(shape) != 2:\n raise ValueError('input shape must have 2 elements.')\n\n xmin = self.bbox.ixmin\n xmax = self.bbox.ixmax\n ymin = self.bbox.iymin\n ymax = self.bbox.iymax\n\n if xmin >= shape[1] or ymin >= shape[0] or xmax <= 0 or ymax <= 0:\n # no overlap of the aperture with the data\n return None, None\n\n slices_large = (slice(max(ymin, 0), min(ymax, shape[0])),\n slice(max(xmin, 0), min(xmax, shape[1])))\n\n slices_small = (slice(max(-ymin, 0),\n min(ymax - ymin, shape[0] - ymin)),\n slice(max(-xmin, 0),\n min(xmax - xmin, shape[1] - xmin)))\n\n return slices_large, slices_small\n\n def _to_image_partial_overlap(self, image):\n \"\"\"\n Return an image of the mask in a 2D array, where the mask\n is not fully within the image (i.e. partial or no overlap).\n \"\"\"\n\n # find the overlap of the mask on the output image shape\n slices_large, slices_small = self._overlap_slices(image.shape)\n\n if slices_small is None:\n return None # no overlap\n\n # insert the mask into the output image\n image[slices_large] = self.data[slices_small]\n\n return image\n\n def to_image(self, shape):\n \"\"\"\n Return an image of the mask in a 2D array of the given shape,\n taking any edge effects into account.\n\n Parameters\n ----------\n shape : tuple of int\n The ``(ny, nx)`` shape of the output array.\n\n Returns\n -------\n result : `~numpy.ndarray`\n A 2D array of the mask.\n \"\"\"\n\n if len(shape) != 2:\n raise ValueError('input shape must have 2 elements.')\n\n image = np.zeros(shape)\n\n if self.bbox.ixmin < 0 or self.bbox.iymin < 0:\n return self._to_image_partial_overlap(image)\n\n try:\n image[self.bbox.slices] = self.data\n except ValueError: # partial or no overlap\n image = self._to_image_partial_overlap(image)\n\n return image\n\n def cutout(self, data, fill_value=0., copy=False):\n \"\"\"\n Create a cutout from the input data over the mask bounding box,\n taking any edge effects into account.\n\n Parameters\n ----------\n data : array_like\n A 2D array on which to apply the aperture mask.\n\n fill_value : float, optional\n The value used to fill pixels where the aperture mask does\n not overlap with the input ``data``. The default is 0.\n\n copy : bool, optional\n If `True` then the returned cutout array will always be hold\n a copy of the input ``data``. If `False` and the mask is\n fully within the input ``data``, then the returned cutout\n array will be a view into the input ``data``. In cases\n where the mask partially overlaps or has no overlap with the\n input ``data``, the returned cutout array will always hold a\n copy of the input ``data`` (i.e. this keyword has no\n effect).\n\n Returns\n -------\n result : `~numpy.ndarray`\n A 2D array cut out from the input ``data`` representing the\n same cutout region as the aperture mask. If there is a\n partial overlap of the aperture mask with the input data,\n pixels outside of the data will be assigned to\n ``fill_value``. `None` is returned if there is no overlap\n of the aperture with the input ``data``.\n \"\"\"\n\n data = np.asanyarray(data)\n if (data.ndim != 2) and (data.ndim != 3):\n raise ValueError('data must be a 2D or 3D array.')\n\n partial_overlap = False\n if self.bbox.ixmin < 0 or self.bbox.iymin < 0:\n partial_overlap = True\n\n if not partial_overlap:\n # try this for speed -- the result may still be a partial\n # overlap, in which case the next block will be triggered\n if copy:\n cutout = data[(Ellipsis,)+self.bbox.slices].copy() # preserves Quantity\n else:\n cutout = data[(Ellipsis,)+self.bbox.slices]\n\n if partial_overlap or (cutout.shape[-2:] != self.shape):\n slices_large, slices_small = self._overlap_slices(data.shape[-2:])\n\n if slices_small is None:\n return None # no overlap\n\n # cutout is a copy\n output_shape = self.shape if data.ndim==2 else (data.shape[0],)+self.shape\n cutout = np.zeros(output_shape, dtype=data.dtype)\n cutout[:] = fill_value\n cutout[(Ellipsis,)+slices_small] = data[(Ellipsis,)+slices_large]\n\n if isinstance(data, u.Quantity):\n cutout = u.Quantity(cutout, unit=data.unit)\n\n return cutout\n\n def multiply(self, data, fill_value=0.):\n \"\"\"\n Multiply the aperture mask with the input data, taking any edge\n effects into account.\n\n The result is a mask-weighted cutout from the data.\n\n Parameters\n ----------\n data : array_like or `~astropy.units.Quantity`\n The 2D array to multiply with the aperture mask.\n\n fill_value : float, optional\n The value is used to fill pixels where the aperture mask\n does not overlap with the input ``data``. The default is 0.\n\n Returns\n -------\n result : `~numpy.ndarray` or `None`\n A 2D mask-weighted cutout from the input ``data``. If there\n is a partial overlap of the aperture mask with the input\n data, pixels outside of the data will be assigned to\n ``fill_value`` before being multipled with the mask. `None`\n is returned if there is no overlap of the aperture with the\n input ``data``.\n \"\"\"\n\n # make a copy to prevent changing the input data\n cutout = self.cutout(data, fill_value=fill_value, copy=True)\n\n if cutout is None:\n return None\n else:\n # needed to zero out non-finite data values outside of the\n # aperture mask but within the bounding box\n cutout[self._mask] = 0.\n\n return cutout * self.data\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_array_equal, assert_equal\n\nfrom astropy.convolution.utils import discretize_model\nfrom astropy.modeling import Parameter, Fittable2DModel\nfrom astropy.modeling.fitting import LevMarLSQFitter\nfrom astropy.modeling.models import Gaussian2D, Moffat2D\nfrom astropy.stats import gaussian_sigma_to_fwhm, SigmaClip\nfrom astropy.table import Table\nfrom astropy.tests.helper import catch_warnings\nfrom astropy.utils.exceptions import AstropyUserWarning\n\nfrom ..groupstars import DAOGroup\nfrom ..models import IntegratedGaussianPRF\nfrom ..photometry import (DAOPhotPSFPhotometry, BasicPSFPhotometry,\n IterativelySubtractedPSFPhotometry)\nfrom ..sandbox import DiscretePRF\nfrom ..utils import prepare_psf_model\nfrom ...background import StdBackgroundRMS, MMMBackground\nfrom ...datasets import make_gaussian_prf_sources_image, make_noise_image\nfrom ...detection import DAOStarFinder\n\ntry:\n import scipy # noqa\n HAS_SCIPY = True\nexcept ImportError:\n HAS_SCIPY = False\n\n\ndef make_psf_photometry_objs(std=1, sigma_psf=1):\n \"\"\"\n Produces baseline photometry objects which are then\n modified as-needed in specific tests below\n \"\"\"\n\n daofind = DAOStarFinder(threshold=5.0*std,\n fwhm=sigma_psf*gaussian_sigma_to_fwhm)\n daogroup = DAOGroup(1.5*sigma_psf*gaussian_sigma_to_fwhm)\n threshold = 5. * std\n fwhm = sigma_psf * gaussian_sigma_to_fwhm\n crit_separation = 1.5 * sigma_psf * gaussian_sigma_to_fwhm\n\n daofind = DAOStarFinder(threshold=threshold, fwhm=fwhm)\n daogroup = DAOGroup(crit_separation)\n mode_bkg = MMMBackground()\n psf_model = IntegratedGaussianPRF(sigma=sigma_psf)\n fitter = LevMarLSQFitter()\n\n basic_phot_obj = BasicPSFPhotometry(finder=daofind,\n group_maker=daogroup,\n bkg_estimator=mode_bkg,\n psf_model=psf_model,\n fitter=fitter,\n fitshape=(11, 11))\n\n iter_phot_obj = IterativelySubtractedPSFPhotometry(finder=daofind,\n group_maker=daogroup,\n bkg_estimator=mode_bkg,\n psf_model=psf_model,\n fitter=fitter, niters=1,\n fitshape=(11, 11))\n\n dao_phot_obj = DAOPhotPSFPhotometry(crit_separation=crit_separation,\n threshold=threshold, fwhm=fwhm,\n psf_model=psf_model, fitshape=(11, 11),\n niters=1)\n\n return (basic_phot_obj, iter_phot_obj, dao_phot_obj)\n\n\nsigma_psfs = []\n\n# A group of two overlapped stars and an isolated one\nsigma_psfs.append(2)\nsources1 = Table()\nsources1['flux'] = [800, 1000, 1200]\nsources1['x_0'] = [13, 18, 25]\nsources1['y_0'] = [16, 16, 25]\nsources1['sigma'] = [sigma_psfs[-1]] * 3\nsources1['theta'] = [0] * 3\nsources1['id'] = [1, 2, 3]\nsources1['group_id'] = [1, 1, 2]\n\n\n# one single group with four stars.\nsigma_psfs.append(2)\nsources2 = Table()\nsources2['flux'] = [700, 800, 700, 800]\nsources2['x_0'] = [12, 17, 12, 17]\nsources2['y_0'] = [15, 15, 20, 20]\nsources2['sigma'] = [sigma_psfs[-1]] * 4\nsources2['theta'] = [0] * 4\nsources2['id'] = [1, 2, 3, 4]\nsources2['group_id'] = [1, 1, 1, 1]\n\n# one faint star and one brither companion\n# although they are in the same group, the detection algorithm\n# is not able to detect the fainter star, hence photometry should\n# be performed with niters > 1 or niters=None\nsigma_psfs.append(2)\nsources3 = Table()\nsources3['flux'] = [10000, 1000]\nsources3['x_0'] = [18, 13]\nsources3['y_0'] = [17, 19]\nsources3['sigma'] = [sigma_psfs[-1]] * 2\nsources3['theta'] = [0] * 2\nsources3['id'] = [1] * 2\nsources3['group_id'] = [1] * 2\nsources3['iter_detected'] = [1, 2]\n\n\[email protected]('not HAS_SCIPY')\[email protected](\"sigma_psf, sources\", [(sigma_psfs[2], sources3)])\ndef test_psf_photometry_niters(sigma_psf, sources):\n img_shape = (32, 32)\n # generate image with read-out noise (Gaussian) and\n # background noise (Poisson)\n image = (make_gaussian_prf_sources_image(img_shape, sources) +\n make_noise_image(img_shape, type='poisson', mean=6.,\n random_state=1) +\n make_noise_image(img_shape, type='gaussian', mean=0.,\n stddev=2., random_state=1))\n cp_image = image.copy()\n sigma_clip = SigmaClip(sigma=3.)\n bkgrms = StdBackgroundRMS(sigma_clip)\n std = bkgrms(image)\n\n phot_obj = make_psf_photometry_objs(std, sigma_psf)[1:3]\n for iter_phot_obj in phot_obj:\n iter_phot_obj.niters = None\n\n result_tab = iter_phot_obj(image)\n residual_image = iter_phot_obj.get_residual_image()\n\n assert (result_tab['x_0_unc'] < 1.96 * sigma_psf /\n np.sqrt(sources['flux'])).all()\n assert (result_tab['y_0_unc'] < 1.96 * sigma_psf /\n np.sqrt(sources['flux'])).all()\n assert (result_tab['flux_unc'] < 1.96 *\n np.sqrt(sources['flux'])).all()\n\n assert_allclose(result_tab['x_fit'], sources['x_0'], rtol=1e-1)\n assert_allclose(result_tab['y_fit'], sources['y_0'], rtol=1e-1)\n assert_allclose(result_tab['flux_fit'], sources['flux'], rtol=1e-1)\n assert_array_equal(result_tab['id'], sources['id'])\n assert_array_equal(result_tab['group_id'], sources['group_id'])\n assert_array_equal(result_tab['iter_detected'],\n sources['iter_detected'])\n assert_allclose(np.mean(residual_image), 0.0, atol=1e1)\n\n # make sure image is note overwritten\n assert_array_equal(cp_image, image)\n\n\[email protected]('not HAS_SCIPY')\[email protected](\"sigma_psf, sources\",\n [(sigma_psfs[0], sources1),\n (sigma_psfs[1], sources2),\n # these ensure that the test *fails* if the model\n # PSFs are the wrong shape\n pytest.param(sigma_psfs[0]/1.2, sources1,\n marks=pytest.mark.xfail()),\n pytest.param(sigma_psfs[1]*1.2, sources2,\n marks=pytest.mark.xfail())])\ndef test_psf_photometry_oneiter(sigma_psf, sources):\n \"\"\"\n Tests in an image with a group of two overlapped stars and an\n isolated one.\n \"\"\"\n\n img_shape = (32, 32)\n # generate image with read-out noise (Gaussian) and\n # background noise (Poisson)\n image = (make_gaussian_prf_sources_image(img_shape, sources) +\n make_noise_image(img_shape, type='poisson', mean=6.,\n random_state=1) +\n make_noise_image(img_shape, type='gaussian', mean=0.,\n stddev=2., random_state=1))\n cp_image = image.copy()\n\n sigma_clip = SigmaClip(sigma=3.)\n bkgrms = StdBackgroundRMS(sigma_clip)\n std = bkgrms(image)\n phot_objs = make_psf_photometry_objs(std, sigma_psf)\n\n for phot_proc in phot_objs:\n result_tab = phot_proc(image)\n residual_image = phot_proc.get_residual_image()\n assert (result_tab['x_0_unc'] < 1.96 * sigma_psf /\n np.sqrt(sources['flux'])).all()\n assert (result_tab['y_0_unc'] < 1.96 * sigma_psf /\n np.sqrt(sources['flux'])).all()\n assert (result_tab['flux_unc'] < 1.96 *\n np.sqrt(sources['flux'])).all()\n assert_allclose(result_tab['x_fit'], sources['x_0'], rtol=1e-1)\n assert_allclose(result_tab['y_fit'], sources['y_0'], rtol=1e-1)\n assert_allclose(result_tab['flux_fit'], sources['flux'], rtol=1e-1)\n assert_array_equal(result_tab['id'], sources['id'])\n assert_array_equal(result_tab['group_id'], sources['group_id'])\n assert_allclose(np.mean(residual_image), 0.0, atol=1e1)\n\n # test fixed photometry\n phot_proc.psf_model.x_0.fixed = True\n phot_proc.psf_model.y_0.fixed = True\n\n pos = Table(names=['x_0', 'y_0'], data=[sources['x_0'],\n sources['y_0']])\n cp_pos = pos.copy()\n\n result_tab = phot_proc(image, pos)\n residual_image = phot_proc.get_residual_image()\n assert 'x_0_unc' not in result_tab.colnames\n assert 'y_0_unc' not in result_tab.colnames\n assert (result_tab['flux_unc'] < 1.96 *\n np.sqrt(sources['flux'])).all()\n assert_array_equal(result_tab['x_fit'], sources['x_0'])\n assert_array_equal(result_tab['y_fit'], sources['y_0'])\n assert_allclose(result_tab['flux_fit'], sources['flux'], rtol=1e-1)\n assert_array_equal(result_tab['id'], sources['id'])\n assert_array_equal(result_tab['group_id'], sources['group_id'])\n assert_allclose(np.mean(residual_image), 0.0, atol=1e1)\n\n # make sure image is not overwritten\n assert_array_equal(cp_image, image)\n\n # make sure initial guess table is not modified\n assert_array_equal(cp_pos, pos)\n\n # resets fixed positions\n phot_proc.psf_model.x_0.fixed = False\n phot_proc.psf_model.y_0.fixed = False\n\n\[email protected]('not HAS_SCIPY')\ndef test_niters_errors():\n iter_phot_obj = make_psf_photometry_objs()[1]\n\n # tests that niters is set to an integer even if the user inputs\n # a float\n iter_phot_obj.niters = 1.1\n assert_equal(iter_phot_obj.niters, 1)\n\n # test that a ValueError is raised if niters <= 0\n with pytest.raises(ValueError):\n iter_phot_obj.niters = 0\n\n # test that it's OK to set niters to None\n iter_phot_obj.niters = None\n\n\[email protected]('not HAS_SCIPY')\ndef test_fitshape_errors():\n basic_phot_obj = make_psf_photometry_objs()[0]\n\n # first make sure setting to a scalar does the right thing (and makes\n # no errors)\n basic_phot_obj.fitshape = 11\n assert np.all(basic_phot_obj.fitshape == (11, 11))\n\n # test that a ValuError is raised if fitshape has even components\n with pytest.raises(ValueError):\n basic_phot_obj.fitshape = (2, 2)\n with pytest.raises(ValueError):\n basic_phot_obj.fitshape = 2\n\n # test that a ValueError is raised if fitshape has non positive\n # components\n with pytest.raises(ValueError):\n basic_phot_obj.fitshape = (-1, 0)\n\n # test that a ValueError is raised if fitshape has more than two\n # dimensions\n with pytest.raises(ValueError):\n basic_phot_obj.fitshape = (3, 3, 3)\n\n\[email protected]('not HAS_SCIPY')\ndef test_aperture_radius_errors():\n basic_phot_obj = make_psf_photometry_objs()[0]\n\n # test that aperture_radius was set to None by default\n assert_equal(basic_phot_obj.aperture_radius, None)\n\n # test that a ValueError is raised if aperture_radius is non positive\n with pytest.raises(ValueError):\n basic_phot_obj.aperture_radius = -3\n\n\[email protected]('not HAS_SCIPY')\ndef test_finder_errors():\n iter_phot_obj = make_psf_photometry_objs()[1]\n\n with pytest.raises(ValueError):\n iter_phot_obj.finder = None\n\n with pytest.raises(ValueError):\n iter_phot_obj = IterativelySubtractedPSFPhotometry(\n finder=None, group_maker=DAOGroup(1),\n bkg_estimator=MMMBackground(),\n psf_model=IntegratedGaussianPRF(1), fitshape=(11, 11))\n\n\[email protected]('not HAS_SCIPY')\ndef test_finder_positions_warning():\n basic_phot_obj = make_psf_photometry_objs(sigma_psf=2)[0]\n positions = Table()\n positions['x_0'] = [12.8, 18.2, 25.3]\n positions['y_0'] = [15.7, 16.5, 25.1]\n\n image = (make_gaussian_prf_sources_image((32, 32), sources1) +\n make_noise_image((32, 32), type='poisson', mean=6.,\n random_state=1))\n\n with catch_warnings(AstropyUserWarning):\n result_tab = basic_phot_obj(image=image, init_guesses=positions)\n assert_array_equal(result_tab['x_0'], positions['x_0'])\n assert_array_equal(result_tab['y_0'], positions['y_0'])\n assert_allclose(result_tab['x_fit'], positions['x_0'], rtol=1e-1)\n assert_allclose(result_tab['y_fit'], positions['y_0'], rtol=1e-1)\n\n with pytest.raises(ValueError):\n basic_phot_obj.finder = None\n result_tab = basic_phot_obj(image=image)\n\n\[email protected]('not HAS_SCIPY')\ndef test_aperture_radius():\n img_shape = (32, 32)\n\n # generate image with read-out noise (Gaussian) and\n # background noise (Poisson)\n image = (make_gaussian_prf_sources_image(img_shape, sources1) +\n make_noise_image(img_shape, type='poisson', mean=6.,\n random_state=1) +\n make_noise_image(img_shape, type='gaussian', mean=0.,\n stddev=2., random_state=1))\n\n basic_phot_obj = make_psf_photometry_objs()[0]\n\n # test that aperture radius is properly set whenever the PSF model has\n # a `fwhm` attribute\n class PSFModelWithFWHM(Fittable2DModel):\n x_0 = Parameter(default=1)\n y_0 = Parameter(default=1)\n flux = Parameter(default=1)\n fwhm = Parameter(default=5)\n\n def __init__(self, fwhm=fwhm.default):\n super().__init__(fwhm=fwhm)\n\n def evaluate(self, x, y, x_0, y_0, flux, fwhm):\n return flux / (fwhm * (x - x_0)**2 * (y - y_0)**2)\n\n psf_model = PSFModelWithFWHM()\n basic_phot_obj.psf_model = psf_model\n basic_phot_obj(image)\n assert_equal(basic_phot_obj.aperture_radius, psf_model.fwhm.value)\n\n\nPARS_TO_SET_0 = {'x_0': 'x_0', 'y_0': 'y_0', 'flux_0': 'flux'}\nPARS_TO_OUTPUT_0 = {'x_fit': 'x_0', 'y_fit': 'y_0', 'flux_fit': 'flux'}\nPARS_TO_SET_1 = PARS_TO_SET_0.copy()\nPARS_TO_SET_1['sigma_0'] = 'sigma'\nPARS_TO_OUTPUT_1 = PARS_TO_OUTPUT_0.copy()\nPARS_TO_OUTPUT_1['sigma_fit'] = 'sigma'\n\n\[email protected](\"actual_pars_to_set, actual_pars_to_output,\"\n \"is_sigma_fixed\", [(PARS_TO_SET_0, PARS_TO_OUTPUT_0,\n True),\n (PARS_TO_SET_1, PARS_TO_OUTPUT_1,\n False)])\[email protected]('not HAS_SCIPY')\ndef test_define_fit_param_names(actual_pars_to_set, actual_pars_to_output,\n is_sigma_fixed):\n psf_model = IntegratedGaussianPRF()\n psf_model.sigma.fixed = is_sigma_fixed\n\n basic_phot_obj = make_psf_photometry_objs()[0]\n basic_phot_obj.psf_model = psf_model\n\n basic_phot_obj._define_fit_param_names()\n assert_equal(basic_phot_obj._pars_to_set, actual_pars_to_set)\n assert_equal(basic_phot_obj._pars_to_output, actual_pars_to_output)\n\n\n# tests previously written to psf_photometry\n\nPSF_SIZE = 11\nGAUSSIAN_WIDTH = 1.\nIMAGE_SIZE = 101\n\n# Position and FLUXES of test sources\nINTAB = Table([[50., 23, 12, 86], [50., 83, 80, 84],\n [np.pi * 10, 3.654, 20., 80 / np.sqrt(3)]],\n names=['x_0', 'y_0', 'flux_0'])\n\n# Create test psf\npsf_model = Gaussian2D(1. / (2 * np.pi * GAUSSIAN_WIDTH ** 2), PSF_SIZE // 2,\n PSF_SIZE // 2, GAUSSIAN_WIDTH, GAUSSIAN_WIDTH)\ntest_psf = discretize_model(psf_model, (0, PSF_SIZE), (0, PSF_SIZE),\n mode='oversample')\n\n# Set up grid for test image\nimage = np.zeros((IMAGE_SIZE, IMAGE_SIZE))\n\n# Add sources to test image\nfor x, y, flux in INTAB:\n model = Gaussian2D(flux / (2 * np.pi * GAUSSIAN_WIDTH ** 2),\n x, y, GAUSSIAN_WIDTH, GAUSSIAN_WIDTH)\n image += discretize_model(model, (0, IMAGE_SIZE), (0, IMAGE_SIZE),\n mode='oversample')\n\n# Some tests require an image with wider sources.\nWIDE_GAUSSIAN_WIDTH = 3.\nWIDE_INTAB = Table([[50, 23.2], [50.5, 1], [10, 20]],\n names=['x_0', 'y_0', 'flux_0'])\nwide_image = np.zeros((IMAGE_SIZE, IMAGE_SIZE))\n\n# Add sources to test image\nfor x, y, flux in WIDE_INTAB:\n model = Gaussian2D(flux / (2 * np.pi * WIDE_GAUSSIAN_WIDTH ** 2),\n x, y, WIDE_GAUSSIAN_WIDTH, WIDE_GAUSSIAN_WIDTH)\n wide_image += discretize_model(model, (0, IMAGE_SIZE), (0, IMAGE_SIZE),\n mode='oversample')\n\n\[email protected]('not HAS_SCIPY')\ndef test_psf_photometry_discrete():\n \"\"\" Test psf_photometry with discrete PRF model. \"\"\"\n\n prf = DiscretePRF(test_psf, subsampling=1)\n basic_phot = BasicPSFPhotometry(group_maker=DAOGroup(2),\n bkg_estimator=None, psf_model=prf,\n fitshape=7)\n f = basic_phot(image=image, init_guesses=INTAB)\n\n for n in ['x', 'y', 'flux']:\n assert_allclose(f[n + '_0'], f[n + '_fit'], rtol=1e-6)\n\n\[email protected]('not HAS_SCIPY')\ndef test_tune_coordinates():\n \"\"\"\n Test psf_photometry with discrete PRF model and coordinates that need\n to be adjusted in the fit.\n \"\"\"\n\n prf = DiscretePRF(test_psf, subsampling=1)\n prf.x_0.fixed = False\n prf.y_0.fixed = False\n # Shift all sources by 0.3 pixels\n intab = INTAB.copy()\n intab['x_0'] += 0.3\n\n basic_phot = BasicPSFPhotometry(group_maker=DAOGroup(2),\n bkg_estimator=None, psf_model=prf,\n fitshape=7)\n\n f = basic_phot(image=image, init_guesses=intab)\n for n in ['x', 'y', 'flux']:\n assert_allclose(f[n + '_0'], f[n + '_fit'], rtol=1e-3)\n\n\[email protected]('not HAS_SCIPY')\ndef test_psf_boundary():\n \"\"\"\n Test psf_photometry with discrete PRF model at the boundary of the data.\n \"\"\"\n\n prf = DiscretePRF(test_psf, subsampling=1)\n\n basic_phot = BasicPSFPhotometry(group_maker=DAOGroup(2),\n bkg_estimator=None, psf_model=prf,\n fitshape=7, aperture_radius=5.5)\n\n intab = Table(data=[[1], [1]], names=['x_0', 'y_0'])\n f = basic_phot(image=image, init_guesses=intab)\n assert_allclose(f['flux_fit'], 0, atol=1e-8)\n\n\[email protected]('not HAS_SCIPY')\ndef test_aperture_radius_value_error():\n \"\"\"\n Test psf_photometry with discrete PRF model at the boundary of the data.\n \"\"\"\n\n prf = DiscretePRF(test_psf, subsampling=1)\n\n basic_phot = BasicPSFPhotometry(group_maker=DAOGroup(2),\n bkg_estimator=None, psf_model=prf,\n fitshape=7)\n\n intab = Table(data=[[1], [1]], names=['x_0', 'y_0'])\n with pytest.raises(ValueError) as err:\n basic_phot(image=image, init_guesses=intab)\n\n assert 'aperture_radius is None' in str(err.value)\n\n\[email protected]('not HAS_SCIPY')\ndef test_psf_boundary_gaussian():\n \"\"\"\n Test psf_photometry with discrete PRF model at the boundary of the data.\n \"\"\"\n\n psf = IntegratedGaussianPRF(GAUSSIAN_WIDTH)\n\n basic_phot = BasicPSFPhotometry(group_maker=DAOGroup(2),\n bkg_estimator=None, psf_model=psf,\n fitshape=7)\n\n intab = Table(data=[[1], [1]], names=['x_0', 'y_0'])\n f = basic_phot(image=image, init_guesses=intab)\n assert_allclose(f['flux_fit'], 0, atol=1e-8)\n\n\[email protected]('not HAS_SCIPY')\ndef test_psf_photometry_gaussian():\n \"\"\"\n Test psf_photometry with Gaussian PSF model.\n \"\"\"\n\n psf = IntegratedGaussianPRF(sigma=GAUSSIAN_WIDTH)\n\n basic_phot = BasicPSFPhotometry(group_maker=DAOGroup(2),\n bkg_estimator=None, psf_model=psf,\n fitshape=7)\n f = basic_phot(image=image, init_guesses=INTAB)\n for n in ['x', 'y', 'flux']:\n assert_allclose(f[n + '_0'], f[n + '_fit'], rtol=1e-3)\n\n\[email protected]('not HAS_SCIPY')\[email protected](\"renormalize_psf\", (True, False))\ndef test_psf_photometry_gaussian2(renormalize_psf):\n \"\"\"\n Test psf_photometry with Gaussian PSF model from Astropy.\n \"\"\"\n\n psf = Gaussian2D(1. / (2 * np.pi * GAUSSIAN_WIDTH ** 2), PSF_SIZE // 2,\n PSF_SIZE // 2, GAUSSIAN_WIDTH, GAUSSIAN_WIDTH)\n psf = prepare_psf_model(psf, xname='x_mean', yname='y_mean',\n renormalize_psf=renormalize_psf)\n\n basic_phot = BasicPSFPhotometry(group_maker=DAOGroup(2),\n bkg_estimator=None, psf_model=psf,\n fitshape=7)\n f = basic_phot(image=image, init_guesses=INTAB)\n\n for n in ['x', 'y']:\n assert_allclose(f[n + '_0'], f[n + '_fit'], rtol=1e-1)\n assert_allclose(f['flux_0'], f['flux_fit'], rtol=1e-1)\n\n\[email protected]('not HAS_SCIPY')\ndef test_psf_photometry_moffat():\n \"\"\"\n Test psf_photometry with Moffat PSF model from Astropy.\n \"\"\"\n\n psf = Moffat2D(1. / (2 * np.pi * GAUSSIAN_WIDTH ** 2), PSF_SIZE // 2,\n PSF_SIZE // 2, 1, 1)\n psf = prepare_psf_model(psf, xname='x_0', yname='y_0',\n renormalize_psf=False)\n\n basic_phot = BasicPSFPhotometry(group_maker=DAOGroup(2),\n bkg_estimator=None, psf_model=psf,\n fitshape=7)\n f = basic_phot(image=image, init_guesses=INTAB)\n f.pprint(max_width=-1)\n\n for n in ['x', 'y']:\n assert_allclose(f[n + '_0'], f[n + '_fit'], rtol=1e-3)\n # image was created with a gaussian, so flux won't match exactly\n assert_allclose(f['flux_0'], f['flux_fit'], rtol=1e-1)\n\n\[email protected]('not HAS_SCIPY')\ndef test_psf_fitting_data_on_edge():\n \"\"\"\n No mask is input explicitly here, but source 2 is so close to the\n edge that the subarray that's extracted gets a mask internally.\n \"\"\"\n\n psf_guess = IntegratedGaussianPRF(flux=1, sigma=WIDE_GAUSSIAN_WIDTH)\n psf_guess.flux.fixed = psf_guess.x_0.fixed = psf_guess.y_0.fixed = False\n basic_phot = BasicPSFPhotometry(group_maker=DAOGroup(2),\n bkg_estimator=None, psf_model=psf_guess,\n fitshape=7)\n\n outtab = basic_phot(image=wide_image, init_guesses=WIDE_INTAB)\n\n for n in ['x', 'y', 'flux']:\n assert_allclose(outtab[n + '_0'], outtab[n + '_fit'],\n rtol=0.05, atol=0.1)\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport pytest\n\nfrom astropy.modeling.fitting import LevMarLSQFitter\nfrom astropy.nddata import NDData\nfrom astropy.table import Table\n\nfrom ..epsf import EPSFBuilder, EPSFFitter\nfrom ..epsf_stars import extract_stars, EPSFStar, EPSFStars\nfrom ...centroids import gaussian1d_moments\nfrom ...datasets import make_gaussian_sources_image\n\ntry:\n import scipy # noqa\n HAS_SCIPY = True\nexcept ImportError:\n HAS_SCIPY = False\n\n\[email protected]('not HAS_SCIPY')\nclass TestEPSFBuild:\n def setup_class(self):\n \"\"\"\n Create a simulated image for testing.\n \"\"\"\n\n from scipy.spatial import cKDTree\n\n shape = (500, 500)\n\n # define random star positions\n nstars = 50\n from astropy.utils.misc import NumpyRNGContext\n with NumpyRNGContext(12345): # seed for repeatability\n xx = np.random.uniform(low=0, high=shape[1], size=nstars)\n yy = np.random.uniform(low=0, high=shape[0], size=nstars)\n\n # enforce a minimum separation\n min_dist = 25\n coords = [(yy[0], xx[0])]\n for xxi, yyi in zip(xx, yy):\n newcoord = [yyi, xxi]\n dist, distidx = cKDTree([newcoord]).query(coords, 1)\n if np.min(dist) > min_dist:\n coords.append(newcoord)\n yy, xx = np.transpose(coords)\n\n with NumpyRNGContext(12345): # seed for repeatability\n zz = np.random.uniform(low=0, high=200000., size=len(xx))\n\n # define a table of model parameters\n self.stddev = 2.\n sources = Table()\n sources['amplitude'] = zz\n sources['x_mean'] = xx\n sources['y_mean'] = yy\n sources['x_stddev'] = np.zeros(len(xx)) + self.stddev\n sources['y_stddev'] = sources['x_stddev']\n sources['theta'] = 0.\n\n self.data = make_gaussian_sources_image(shape, sources)\n self.nddata = NDData(self.data)\n\n init_stars = Table()\n init_stars['x'] = xx.astype(int)\n init_stars['y'] = yy.astype(int)\n self.init_stars = init_stars\n\n def test_extract_stars(self):\n size = 25\n stars = extract_stars(self.nddata, self.init_stars, size=size)\n\n assert len(stars) == 41\n assert isinstance(stars, EPSFStars)\n assert isinstance(stars[0], EPSFStar)\n assert stars[0].data.shape == (size, size)\n\n def test_epsf_build(self):\n \"\"\"\n This is an end-to-end test of EPSFBuilder on a simulated image.\n \"\"\"\n\n size = 25\n oversampling = 4.\n stars = extract_stars(self.nddata, self.init_stars, size=size)\n epsf_builder = EPSFBuilder(oversampling=oversampling, maxiters=20,\n progress_bar=False)\n epsf, fitted_stars = epsf_builder(stars)\n\n ref_size = (size * oversampling) + 1\n assert epsf.data.shape == (ref_size, ref_size)\n\n y0 = int((ref_size - 1) / 2)\n z = epsf.data[y0, :]\n ampl, peak, sigma = gaussian1d_moments(z)\n assert_allclose(ampl, 0.002487, rtol=1e-4)\n assert_allclose(peak, y0, rtol=1e-3)\n assert_allclose(sigma, oversampling * self.stddev, rtol=1e-5)\n\n def test_epsf_build_invalid_fitter(self):\n \"\"\"\n Test that the input fitter is an EPSFFitter instance.\n \"\"\"\n\n with pytest.raises(TypeError):\n EPSFBuilder(fitter=EPSFFitter, maxiters=3)\n\n with pytest.raises(TypeError):\n EPSFBuilder(fitter=LevMarLSQFitter(), maxiters=3)\n\n with pytest.raises(TypeError):\n EPSFBuilder(fitter=LevMarLSQFitter, maxiters=3)\n" ]
[ [ "numpy.asanyarray", "numpy.zeros" ], [ "numpy.testing.assert_equal", "numpy.sqrt", "numpy.all", "numpy.testing.assert_array_equal", "numpy.mean", "numpy.testing.assert_allclose", "numpy.zeros" ], [ "numpy.min", "numpy.transpose", "numpy.testing.assert_allclose", "numpy.random.uniform", "scipy.spatial.cKDTree" ] ]
Jarred-Sumner/im2smpl
[ "cb3a09ee99815939e9f7d55479920a32703be9ce" ]
[ "main.py" ]
[ "# Software License Agreement (BSD License)\n#\n# Copyright (c) 2019, Zerong Zheng ([email protected])\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the <organization> nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nfrom __future__ import print_function, absolute_import, division\n\nimport os\nimport numpy as np\nimport cv2 as cv\nimport argparse\nimport random\nimport string\nimport shutil\nfrom subprocess import call\nimport pynvml\nimport time\n\npynvml.nvmlInit()\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--img_file', type=str, required=True, help='path to image file')\n parser.add_argument('--out_dir', type=str, required=True, help='output directory')\n return parser.parse_args()\n\n\ndef waitgpu(empty_thres_duration=7):\n empty_flag = 0\n while True:\n handle = pynvml.nvmlDeviceGetHandleByIndex(0)\n meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)\n usage_percent = float(meminfo.used)/float(meminfo.total)\n if usage_percent < 0.1:\n if empty_flag >= empty_thres_duration: # empty for 5 second\n break\n empty_flag += 1\n time.sleep(1)\n continue\n empty_flag = 0\n print('GPU is busy right now....waiting....')\n print('meminfo.used/meminfo.total = %f' % usage_percent)\n time.sleep(np.random.randint(5, 15))\n\n\ndef detect_human(fname, out_dir):\n \"\"\" obtains bounding box of the subject in the input image\"\"\"\n waitgpu()\n print('\\n\\nStep 1. Human Detection RCNN')\n # generate a temporal script to call RCNN\n shutil.copy('./detect_human.py', './AlphaPose/human-detection/tools/')\n temp_shname = ''.join(random.sample(string.ascii_letters + string.digits, 8)) + '.sh'\n temp_shname = os.path.join('./', temp_shname)\n with open(temp_shname, 'w') as fp:\n fp.write('#!/usr/local/bin/bash\\n')\n fp.write('cd ./AlphaPose/human-detection/tools\\n')\n fp.write('python2 detect_human.py --img_file %s --out_dir %s\\n'\n % (fname, out_dir))\n fp.write('cd ../../../\\n')\n call(['sh', temp_shname])\n os.remove(temp_shname)\n # os.remove('./AlphaPose/human-detection/tools/detect_human.py')\n\n\ndef crop_or_pad_img(fname, out_dir):\n \"\"\" crops or pads the original image to make the subject located at the center\n of the image and occupy 90% of the image\n \"\"\"\n print('\\n\\nStep 2. Image cropping or padding')\n img_dir, img_name = os.path.split(img_fname)\n with open(os.path.join(out_dir, img_name + '.bbox.txt'), 'r') as fp:\n lines = fp.readlines()\n if lines[-1] == '':\n lines = lines[:-1]\n if len(lines) > 1:\n print('Warning: More than 1 bounding boxes are detected. '\n 'Only the first one is used.')\n entries = lines[0].split(' ')\n xmin, ymin = int(entries[0]), int(entries[1])\n xmax, ymax = int(entries[2]), int(entries[3])\n x_center = int((xmin+xmax)/2)\n y_center = int((ymin+ymax)/2)\n edge_len = int(max(xmax-xmin, ymax-ymin) * 1.2)\n edge_len_half = int(edge_len/2)\n\n img = cv.imread(fname)\n cv.imwrite(os.path.join(out_dir, img_name[:-4]+'_orig.png'), img)\n if len(img.shape) == 2:\n img = cv.cvtColor(img, cv.COLOR_GRAY2RGB)\n h, w = img.shape[0], img.shape[1]\n img_pad = np.zeros((3*h, 3*w, 3), dtype=np.uint8)\n img_pad[h:(h*2), w:(w*2), :] = img\n crop_tl = (h+y_center-edge_len_half, w+x_center-edge_len_half)\n crop_dr = (h+y_center+edge_len_half, w+x_center+edge_len_half)\n img_crop = img_pad[crop_tl[0]:crop_dr[0], crop_tl[1]:crop_dr[1], :]\n cv.imwrite(os.path.join(out_dir, img_name), img_crop)\n cv.imwrite(os.path.join(out_dir, img_name), img_crop)\n\n\ndef infer_smpl_and_pose(fname, out_dir):\n waitgpu()\n print('\\n\\nStep 3a Body model estimation using HMR. ')\n shutil.copy('./infer_smpl.py', './hmr/')\n temp_shname = ''.join(random.sample(string.ascii_letters + string.digits, 8)) + '.sh'\n temp_shname = os.path.join('./', temp_shname)\n with open(temp_shname, 'w') as fp:\n fp.write('#!/usr/local/bin/bash\\n')\n fp.write('cd ./hmr/\\n')\n fp.write('python2 infer_smpl.py --img_path %s --out_dir %s\\n' % (fname, out_dir))\n fp.write('cd ../\\n')\n call(['sh', temp_shname])\n os.remove(temp_shname)\n # os.remove('./hmr/infer_smpl.py')\n\n print('\\n\\nStep 3b Pose estimation using AlphaPose')\n img_dir, img_name = os.path.split(img_fname)\n tmp_folder = ''.join(random.sample(string.ascii_letters + string.digits, 8))\n os.mkdir(os.path.join('./AlphaPose/examples', tmp_folder))\n os.mkdir(os.path.join('./AlphaPose/examples', tmp_folder, 'demo'))\n os.mkdir(os.path.join('./AlphaPose/examples', tmp_folder, 'results'))\n call(['cp', os.path.join(out_dir, img_name),\n os.path.join('./AlphaPose/examples', tmp_folder, 'demo/1.jpg')])\n call(['./AlphaPose/run.sh', '--indir', os.path.join('./examples', tmp_folder, 'demo'),\n '--outdir', os.path.join('./examples', tmp_folder, 'results'), '--vis'])\n call(['mv', os.path.join('./AlphaPose/examples', tmp_folder, 'results/POSE/pred.txt'),\n os.path.join(out_dir, img_name+'.joints.txt')])\n call(['mv', os.path.join('./AlphaPose/examples', tmp_folder, 'results/POSE/scores.txt'),\n os.path.join(out_dir, img_name+'.joint_scores.txt')])\n call(['rm', '-r', os.path.join('./AlphaPose/examples', tmp_folder)])\n\n print('\\n\\nStep 3c Image segmentation')\n shutil.copy('./segment_by_parsing.py', './LIP_JPPNet/')\n temp_shname = ''.join(random.sample(string.ascii_letters + string.digits, 8)) + '.sh'\n temp_shname = os.path.join('./', temp_shname)\n with open(temp_shname, 'w') as fp:\n fp.write('#!/usr/local/bin/bash\\n')\n fp.write('cd ./LIP_JPPNet/\\n')\n fp.write('python2 segment_by_parsing.py --img_file %s --out_dir %s\\n' % (fname, out_dir))\n fp.write('cd ../\\n')\n call(['sh', temp_shname])\n os.remove(temp_shname)\n # os.remove('./LIP_JPPNet/segment_by_parsing.py')\n\n\ndef optimize_smpl(fname, out_dir):\n print('\\n\\nStep 4 SMPL model optimization')\n shutil.copy('./fit_3d_accurate.py', './smplify_public/code/')\n temp_shname = ''.join(random.sample(string.ascii_letters + string.digits, 8)) + '.sh'\n temp_shname = os.path.join('./', temp_shname)\n with open(temp_shname, 'w') as fp:\n fp.write('#!/usr/local/bin/bash\\n')\n fp.write('cd ./smplify_public/code\\n')\n fp.write('python2 fit_3d_accurate.py --img_file %s --out_dir %s\\n' % (fname, out_dir))\n fp.write('cd ../../\\n')\n call(['sh', temp_shname])\n os.remove(temp_shname)\n # os.remove('smplify_public/code/fit_3d_accurate.py')\n\n\ndef main(img_fname, out_dir):\n print('image file: ' + img_fname)\n print('output directory: ' + out_dir)\n if not os.path.isfile(img_fname):\n raise IOError('Image file does not exist!')\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n\n detect_human(img_fname, out_dir)\n crop_or_pad_img(img_fname, out_dir)\n infer_smpl_and_pose(img_fname, out_dir)\n optimize_smpl(img_fname, out_dir)\n\n\nif __name__ == '__main__':\n args = parse_args()\n img_fname = args.img_file\n out_dir = args.out_dir\n img_fname = os.path.abspath(img_fname)\n out_dir = os.path.abspath(out_dir)\n main(img_fname, out_dir)\n" ]
[ [ "numpy.zeros", "numpy.random.randint" ] ]
mostafa-mahmoud/HyPRec
[ "f18318f179dd9f9af7cf01a11f13f0aefb42b3bb" ]
[ "tests/collaborative_tests.py" ]
[ "#!/usr/bin/env python\nimport numpy\nimport unittest\nfrom lib.abstract_recommender import AbstractRecommender\nfrom lib.collaborative_filtering import CollaborativeFiltering\nfrom lib.evaluator import Evaluator\nfrom util.data_parser import DataParser\nfrom util.model_initializer import ModelInitializer\n\n\nclass TestcaseBase(unittest.TestCase):\n def setUp(self):\n \"\"\"\n Setup method that is called at the beginning of each test.\n \"\"\"\n self.documents, self.users = 30, 4\n documents_cnt, users_cnt = self.documents, self.users\n self.n_factors = 5\n self.n_iterations = 20\n self.k_folds = 3\n self.hyperparameters = {'n_factors': self.n_factors, '_lambda': 0.01}\n self.options = {'k_folds': self.k_folds, 'n_iterations': self.n_iterations}\n self.initializer = ModelInitializer(self.hyperparameters.copy(), self.n_iterations)\n\n def mock_get_ratings_matrix(self=None):\n return [[int(not bool((article + user) % 3)) for article in range(documents_cnt)]\n for user in range(users_cnt)]\n self.ratings_matrix = numpy.array(mock_get_ratings_matrix())\n self.evaluator = Evaluator(self.ratings_matrix)\n setattr(DataParser, \"get_ratings_matrix\", mock_get_ratings_matrix)\n\n\nclass TestALS(TestcaseBase):\n def runTest(self):\n cf = CollaborativeFiltering(self.initializer, self.evaluator, self.hyperparameters,\n self.options, load_matrices=False)\n self.assertEqual(cf.n_factors, self.n_factors)\n self.assertEqual(cf.n_items, self.documents)\n cf.train()\n self.assertEqual(cf.get_predictions().shape, (self.users, self.documents))\n self.assertTrue(isinstance(cf, AbstractRecommender))\n shape = (self.users, self.documents)\n ratings = cf.get_ratings()\n self.assertLessEqual(numpy.amax(ratings), 1 + 1e-6)\n self.assertGreaterEqual(numpy.amin(ratings), -1e-6)\n self.assertTrue(ratings.shape == shape)\n rounded_predictions = cf.rounded_predictions()\n self.assertLessEqual(numpy.amax(rounded_predictions), 1 + 1e-6)\n self.assertGreaterEqual(numpy.amin(rounded_predictions), -1e-6)\n self.assertTrue(rounded_predictions.shape == shape)\n recall = cf.evaluator.calculate_recall(ratings, cf.get_predictions())\n self.assertTrue(-1e-6 <= recall <= 1 + 1e-6)\n random_user = int(numpy.random.random() * self.users)\n random_item = int(numpy.random.random() * self.documents)\n random_prediction = cf.predict(random_user, random_item)\n self.assertTrue(isinstance(random_prediction, numpy.float64))\n" ]
[ [ "numpy.amin", "numpy.amax", "numpy.random.random" ] ]
dcronbach/pandapipes
[ "312fef81ddd0fb3eb23ec1c5bbc2848d568faa52", "312fef81ddd0fb3eb23ec1c5bbc2848d568faa52" ]
[ "pandapipes/test/io/test_file_io.py", "pandapipes/test/openmodelica_comparison/test_water_openmodelica.py" ]
[ "# Copyright (c) 2020 by Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.\n\nimport os\n\nimport pandapipes\nimport pytest\nfrom pandapower.test.toolbox import tempdir\nfrom pandas.testing import assert_frame_equal\n\n\n# @pytest.fixture()\ndef load_net():\n # create test network\n\n net = pandapipes.create_empty_network(\"test_net\", fluid=\"lgas\")\n j1 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15,\n name=\"Connection to External Grid\", geodata=(0, 0))\n j2 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name=\"Junction 2\",\n geodata=(2, 0))\n j3 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name=\"Junction 3\",\n geodata=(7, 4))\n j4 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name=\"Junction 4\",\n geodata=(7, -4))\n j5 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name=\"Junction 5\",\n geodata=(5, 3))\n j6 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name=\"Junction 6\",\n geodata=(5, -3))\n\n pandapipes.create_ext_grid(net, junction=j1, p_bar=1.1, t_k=293.15, name=\"Grid Connection\")\n\n pandapipes.create_pipe_from_parameters(net, from_junction=j1, to_junction=j2, length_km=10,\n diameter_m=0.05, name=\"Pipe 1\", geodata=[(0, 0), (2, 0)])\n pandapipes.create_pipe_from_parameters(net, from_junction=j2, to_junction=j3, length_km=2,\n diameter_m=0.05, name=\"Pipe 2\",\n geodata=[(2, 0), (2, 4), (7, 4)])\n pandapipes.create_pipe_from_parameters(net, from_junction=j2, to_junction=j4, length_km=2.5,\n diameter_m=0.05, name=\"Pipe 3\",\n geodata=[(2, 0), (2, -4), (7, -4)])\n pandapipes.create_pipe_from_parameters(net, from_junction=j3, to_junction=j5, length_km=1,\n diameter_m=0.05, name=\"Pipe 4\",\n geodata=[(7, 4), (7, 3), (5, 3)])\n pandapipes.create_pipe_from_parameters(net, from_junction=j4, to_junction=j6, length_km=1,\n diameter_m=0.05, name=\"Pipe 5\",\n geodata=[(7, -4), (7, -3), (5, -3)])\n\n pandapipes.create_valve(net, from_junction=j5, to_junction=j6, diameter_m=0.05,\n opened=True)\n\n pandapipes.create_sink(net, junction=j4, mdot_kg_per_s=5.45e-5, name=\"Sink 1\")\n\n pandapipes.create_source(net, junction=j3, mdot_kg_per_s=3.45e-5)\n\n return net\n\n\ndef test_pickle(tempdir):\n \"\"\"\n Checks if a network saved and reloaded as a pickle file is identical.\n :return:\n :rtype:\n \"\"\"\n\n net = load_net()\n filename = os.path.join(tempdir, \"test_net_1.p\")\n\n # save test network\n pandapipes.to_pickle(net, filename)\n\n # load test network\n net2 = pandapipes.from_pickle(filename)\n\n # check if saved and loaded versions are identical\n assert pandapipes.nets_equal(net, net2), \"Error in comparison after saving to Pickle.\"\n\n\ndef test_json(tempdir):\n \"\"\"\n Checks if a network saved and reloaded as a json file is identical.\n :return:\n :rtype:\n \"\"\"\n net = load_net()\n filename = os.path.join(tempdir, \"test_net_1.json\")\n\n # save test network\n pandapipes.to_json(net, filename)\n\n # load test network\n net2 = pandapipes.from_json(filename)\n\n # check if saved and loaded versions are identical\n assert_frame_equal(net.pipe_geodata, net2.pipe_geodata)\n del net.pipe_geodata\n del net2.pipe_geodata\n\n assert pandapipes.nets_equal(net, net2), \"Error in comparison after saving to JSON.\"\n\n\ndef test_json_string():\n \"\"\"\n Checks if a network saved and reloaded as a json file is identical.\n :return:\n :rtype:\n \"\"\"\n net = load_net()\n\n # save test network\n json_string = pandapipes.to_json(net)\n\n # load test network\n net2 = pandapipes.from_json_string(json_string)\n\n # check if saved and loaded versions are identical\n assert_frame_equal(net.pipe_geodata, net2.pipe_geodata)\n del net.pipe_geodata\n del net2.pipe_geodata\n\n assert pandapipes.nets_equal(net, net2),\\\n \"Error in comparison after saving to JSON string.\"\n\n\nif __name__ == '__main__':\n pytest.main([\"test_file_io.py\"])\n", "# Copyright (c) 2020 by Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.\n\nimport os\nimport pytest\nimport numpy as np\nimport pandapipes.networks.simple_water_networks as nw\nfrom pandapipes.pipeflow import logger as pf_logger\nfrom pandapipes.test.openmodelica_comparison.pipeflow_openmodelica_comparison\\\n import pipeflow_openmodelica_comparison\n\ntry:\n import pplog as logging\nexcept ImportError:\n import logging\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\npf_logger.setLevel(logging.WARNING)\n\n\n# ---------- TEST AREA: combined networks ----------\n# mixed_net\ndef test_case_combined_mixed(log_results=False):\n net = nw.water_combined_mixed()\n p_diff, v_diff_abs = pipeflow_openmodelica_comparison(net, log_results)\n assert np.all(p_diff < 0.01)\n assert np.all(v_diff_abs < 0.05)\n\n\n# versatility\ndef test_case_combined_versatility(log_results=False):\n net = nw.water_combined_versatility()\n p_diff, v_diff_abs = pipeflow_openmodelica_comparison(net, log_results)\n assert np.all(p_diff < 0.06) # only in one place the comparison for 0.01 is not correct\n assert np.all(v_diff_abs < 0.05)\n\n\n# ---------- TEST AREA: meshed networks ----------\n# delta\ndef test_case_meshed_delta(log_results=False):\n net = nw.water_meshed_delta()\n p_diff, v_diff_abs = pipeflow_openmodelica_comparison(net, log_results)\n assert np.all(p_diff < 0.01)\n assert np.all(v_diff_abs < 0.05)\n\n\n# two_valves\ndef test_case_meshed_2valves(log_results=False):\n net = nw.water_meshed_2valves()\n p_diff, v_diff_abs = pipeflow_openmodelica_comparison(net, log_results)\n assert np.all(p_diff < 0.01)\n assert np.all(v_diff_abs < 0.05)\n\n\n# pumps\ndef test_case_meshed_pumps(log_results=False):\n net = nw.water_meshed_pumps()\n p_diff, v_diff_abs = pipeflow_openmodelica_comparison(net, log_results)\n assert np.all(p_diff < 0.02) # in two places the comparison for 0.01 is not correct\n assert np.all(v_diff_abs < 0.05)\n\n\n# heights\ndef test_case_meshed_heights(log_results=False):\n net = nw.water_meshed_heights()\n p_diff, v_diff_abs = pipeflow_openmodelica_comparison(net, log_results)\n assert np.all(p_diff < 0.01)\n assert np.all(v_diff_abs < 0.05)\n\n\n# ---------- TEST AREA: one pipe ----------\n# pipe_1\ndef test_case_one_pipe_1(log_results=False):\n net = nw.water_one_pipe1()\n p_diff, v_diff_abs = pipeflow_openmodelica_comparison(net, log_results)\n assert np.all(p_diff < 0.01)\n assert np.all(v_diff_abs < 0.05)\n\n\n# pipe_2\ndef test_case_one_pipe_2(log_results=False):\n net = nw.water_one_pipe2()\n p_diff, v_diff_abs = pipeflow_openmodelica_comparison(net, log_results)\n assert np.all(p_diff < 0.01)\n assert np.all(v_diff_abs < 0.05)\n\n\n# pipe_3\ndef test_case_one_pipe_3(log_results=False):\n net = nw.water_one_pipe3()\n p_diff, v_diff_abs = pipeflow_openmodelica_comparison(net, log_results)\n assert np.all(p_diff < 0.01)\n assert np.all(v_diff_abs < 0.05)\n\n\n# ---------- TEST AREA: strand net ----------\n# cross_3ext\ndef test_case_strand_net_cross3ext(log_results=False):\n net = nw.water_strand_cross()\n p_diff, v_diff_abs = pipeflow_openmodelica_comparison(net, log_results)\n assert np.all(p_diff < 0.01)\n assert np.all(v_diff_abs < 0.05)\n\n\n# strand_net\ndef test_case_strand_net(log_results=False):\n net = nw.water_simple_strand_net()\n p_diff, v_diff_abs = pipeflow_openmodelica_comparison(net, log_results)\n assert np.all(p_diff < 0.01)\n assert np.all(v_diff_abs < 0.05)\n\n\n# two_pipes\ndef test_case_strand_net_2pipes(log_results=False):\n net = nw.water_strand_2pipes()\n p_diff, v_diff_abs = pipeflow_openmodelica_comparison(net, log_results)\n assert np.all(p_diff < 0.01)\n assert np.all(v_diff_abs < 0.05)\n\n\n# two_pumps\ndef test_case_strand_net_2pumps(log_results=False):\n net = nw.water_strand_net_2pumps()\n p_diff, v_diff_abs = pipeflow_openmodelica_comparison(net, log_results)\n assert np.all(p_diff < 0.01)\n assert np.all(v_diff_abs < 0.05)\n\n\n# ---------- TEST AREA: t_cross ----------\n# t_cross\ndef test_case_tcross(log_results=False):\n net = nw.water_tcross()\n p_diff, v_diff_abs = pipeflow_openmodelica_comparison(net, log_results)\n assert np.all(p_diff < 0.01)\n assert np.all(v_diff_abs < 0.05)\n\n\n# valves\ndef test_case_tcross_valves(log_results=False):\n net = nw.water_tcross_valves()\n p_diff, v_diff_abs = pipeflow_openmodelica_comparison(net, log_results)\n assert np.all(p_diff < 0.4) # only in one place the comparison for 0.01 is not correct\n assert np.all(v_diff_abs < 0.05)\n\n\n# ---------- TEST AREA: two pressure junctions ----------\n# two_pipes\ndef test_case_2eg_two_pipes(log_results=False):\n net = nw.water_2eg_two_pipes()\n p_diff, v_diff_abs = pipeflow_openmodelica_comparison(net, log_results)\n assert np.all(p_diff < 0.01)\n assert np.all(v_diff_abs < 0.05)\n\n\nif __name__ == \"__main__\":\n pytest.main([os.path.join(os.path.dirname(__file__), \"test_water_openmodelica.py\")])\n" ]
[ [ "pandas.testing.assert_frame_equal" ], [ "numpy.all" ] ]
LiuHaolan/models
[ "1639b3039237c3997c51ff87f0b6113bb2e8d236", "1639b3039237c3997c51ff87f0b6113bb2e8d236" ]
[ "scripts/compare_speed_with_pytorch.py", "Vision/classification/image/poseNet/train_oneflow.py" ]
[ "import numpy as np\n\nimport time\nimport tempfile\nimport os\nimport importlib.util\nimport argparse\nfrom typing import Sequence\nimport subprocess\nimport re\n\n\nimport oneflow as flow\nimport oneflow._oneflow_internal as oneflow_internal\n\n\nDEFAULT_TIMES = 20\ngpu_memory_used_by_oneflow = 0\n\n\ndef import_file(path):\n spec = importlib.util.spec_from_file_location(\"mod\", path)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n return mod\n\n\ndef sync(x):\n if test_oneflow:\n x.numpy()\n else:\n x.cpu()\n\n\ndef gpu_memory_used():\n output = subprocess.check_output(\n [\n \"nvidia-smi\",\n \"--query-compute-apps=pid,used_gpu_memory\",\n \"--format=csv,noheader\",\n ]\n )\n output = output.decode(\"utf-8\").strip()\n my_pid = os.getpid()\n mem_used_by_me = 0\n for line in output.split(\"\\n\"):\n pid, mem_used = map(int, re.split(\",? \", line)[:2])\n if pid == my_pid:\n mem_used_by_me += mem_used\n return mem_used_by_me\n\n\ndef print_rank_0(*args, **kwargs):\n rank = int(os.getenv(\"RANK\", \"0\"))\n if rank == 0:\n print(*args, **kwargs)\n\n\ndef test(\n model_path: str,\n module_name: str,\n input_shape: Sequence[int],\n disable_backward=False,\n times=DEFAULT_TIMES,\n no_verbose=False,\n ddp=False,\n ddp_broadcast_buffers=False,\n show_memory=True,\n):\n framework_name = \"OneFlow\" if test_oneflow else \"PyTorch\"\n if test_oneflow:\n python_module = import_file(model_path)\n torch = flow\n else:\n with open(model_path) as f:\n buf = f.read()\n\n lines = buf.split(\"\\n\")\n for i, line in enumerate(lines):\n if \"import\" not in line and len(line.strip()) != 0:\n break\n lines = (\n lines[:i]\n + [\n \"import torch as flow\",\n \"import torch.nn as nn\",\n \"from torch import Tensor\",\n \"from torch.nn import Parameter\",\n ]\n + lines[i:]\n )\n buf = \"\\n\".join(lines)\n with tempfile.NamedTemporaryFile(\"w\", suffix=\".py\") as f:\n f.write(buf)\n f.flush()\n python_module = import_file(f.name)\n\n import torch\n\n if ddp:\n import torch.distributed as dist\n\n local_rank_env_var = os.getenv(\"LOCAL_RANK\")\n assert local_rank_env_var is not None\n rank = int(local_rank_env_var)\n torch.cuda.set_device(rank)\n\n dist.init_process_group(backend=\"nccl\", init_method=\"env://\")\n\n Net = getattr(python_module, module_name)\n\n warmup_times = 5\n\n m = Net()\n m = m.to(\"cuda\")\n\n if ddp:\n if test_oneflow:\n m = torch.nn.parallel.DistributedDataParallel(\n m, broadcast_buffers=ddp_broadcast_buffers\n )\n else:\n m = torch.nn.parallel.DistributedDataParallel(\n m, device_ids=[rank], broadcast_buffers=ddp_broadcast_buffers\n )\n\n def run_model(m, x):\n if disable_backward:\n with torch.no_grad():\n return m(x)\n else:\n return m(x)\n\n learning_rate = 0.01\n mom = 0.9\n optimizer = torch.optim.SGD(m.parameters(), lr=learning_rate, momentum=mom)\n\n # input tensor of OneFlow should set requires_grad=False due to a bug\n x = torch.tensor(\n np.ones(input_shape).astype(np.float32), requires_grad=not test_oneflow\n ).to(\"cuda\")\n for i in range(warmup_times + times):\n if i == warmup_times:\n start = time.time()\n y = run_model(m, x)\n if not disable_backward:\n y = y.sum()\n y.backward()\n optimizer.zero_grad()\n optimizer.step()\n sync(y)\n end = time.time()\n total_time_ms = (end - start) * 1000\n time_per_run_ms = total_time_ms / times\n if no_verbose:\n print_rank_0(f\"{framework_name}: {time_per_run_ms:.1f}ms\")\n else:\n print_rank_0(\n f\"{framework_name} {module_name} time: {time_per_run_ms:.1f}ms (= {total_time_ms:.1f}ms / {times}, input_shape={input_shape}{', backward is disabled' if disable_backward else ''}{', ddp' if ddp else ''}{', ddp_broadcast_buffers is disabled' if not ddp_broadcast_buffers else ''}{f', world size={flow.env.get_world_size()}' if flow.env.get_world_size() != 1 else ''})\"\n )\n if show_memory:\n global gpu_memory_used_by_oneflow\n if test_oneflow:\n gpu_memory_used_by_oneflow = gpu_memory_used()\n\n print_rank_0(\n f\"{framework_name} GPU used (rank 0): {gpu_memory_used_by_oneflow} MiB\"\n )\n else:\n print_rank_0(\n f\"{framework_name} GPU used (rank 0, estimated): {gpu_memory_used() - gpu_memory_used_by_oneflow} MiB\"\n )\n if ddp and not test_oneflow:\n import torch.distributed as dist\n\n dist.destroy_process_group()\n\n return time_per_run_ms\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"model_path\", type=str)\n parser.add_argument(\"module_name\", type=str)\n parser.add_argument(\"input_shape\", type=str)\n parser.add_argument(\"--times\", type=int, default=DEFAULT_TIMES)\n parser.add_argument(\"--disable-backward\", action=\"store_true\")\n parser.add_argument(\"--no-verbose\", action=\"store_true\")\n parser.add_argument(\"--ddp\", action=\"store_true\")\n parser.add_argument(\"--ddp-no-broadcast-buffers\", action=\"store_true\")\n parser.add_argument(\"--only-oneflow\", action=\"store_true\")\n parser.add_argument(\"--only-pytorch\", action=\"store_true\")\n parser.add_argument(\"--no-show-memory\", action=\"store_true\")\n\n args = parser.parse_args()\n input_shape = list(map(int, args.input_shape.split(\"x\")))\n\n global test_oneflow\n\n if not args.only_pytorch:\n # NOTE: PyTorch must run after OneFlow for correct memory usage\n test_oneflow = True\n oneflow_time = test(\n args.model_path,\n args.module_name,\n input_shape,\n disable_backward=args.disable_backward,\n times=args.times,\n no_verbose=args.no_verbose,\n ddp=args.ddp,\n ddp_broadcast_buffers=not args.ddp_no_broadcast_buffers,\n show_memory=not args.no_show_memory,\n )\n\n if not args.only_oneflow:\n test_oneflow = False\n pytorch_time = test(\n args.model_path,\n args.module_name,\n input_shape,\n disable_backward=args.disable_backward,\n times=args.times,\n no_verbose=args.no_verbose,\n ddp=args.ddp,\n ddp_broadcast_buffers=not args.ddp_no_broadcast_buffers,\n show_memory=not args.no_show_memory,\n )\n\n if not args.only_pytorch and not args.only_oneflow:\n relative_speed = pytorch_time / oneflow_time\n if args.no_verbose:\n print_rank_0(f\"Relative speed: {relative_speed:.2f}\")\n else:\n print_rank_0(\n f\"Relative speed: {relative_speed:.2f} (= {pytorch_time:.1f}ms / {oneflow_time:.1f}ms)\"\n )\n", "import oneflow as flow\nimport argparse\nimport numpy as np\nimport os\nimport time\n\nfrom models.posenet import PoseNet\n\nfrom utils.ofrecord_data_utils import OFRecordDataLoader\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(\"flags for train posenet\")\n parser.add_argument(\n \"--save_checkpoint_path\",\n type=str,\n default=\"./checkpoints\",\n help=\"save checkpoint root dir\",\n )\n parser.add_argument(\n \"--load_checkpoint\", type=str, default=\"\", help=\"load checkpoint\"\n )\n parser.add_argument(\n \"--ofrecord_path\", type=str, default=\"./ofrecord\", help=\"dataset path\"\n )\n # training hyper-parameters\n parser.add_argument(\n \"--learning_rate\", type=float, default=0.001, help=\"learning rate\"\n )\n parser.add_argument(\"--mom\", type=float, default=0.9, help=\"momentum\")\n parser.add_argument(\"--epochs\", type=int, default=1000, help=\"training epochs\")\n parser.add_argument(\n \"--train_batch_size\", type=int, default=32, help=\"train batch size\"\n )\n parser.add_argument(\"--val_batch_size\", type=int, default=32, help=\"val batch size\")\n\n return parser.parse_args()\n\n\ndef main(args):\n flow.enable_eager_execution()\n\n train_data_loader = OFRecordDataLoader(\n ofrecord_root=args.ofrecord_path,\n mode=\"train\",\n # NOTE(Liang Depeng): needs to explictly set the dataset size\n dataset_size=7459,\n batch_size=args.train_batch_size,\n )\n\n val_data_loader = OFRecordDataLoader(\n ofrecord_root=args.ofrecord_path,\n mode=\"val\",\n dataset_size=1990,\n batch_size=args.val_batch_size,\n )\n\n # oneflow init\n start_t = time.time()\n posenet_module = PoseNet()\n\n if args.load_checkpoint != \"\":\n posenet_module.load_state_dict(flow.load(args.load_checkpoint))\n\n end_t = time.time()\n print(\"init time : {}\".format(end_t - start_t))\n\n of_cross_entropy = flow.nn.CrossEntropyLoss()\n\n posenet_module.to(\"cuda\")\n of_cross_entropy.to(\"cuda\")\n\n of_sgd = flow.optim.SGD(\n posenet_module.parameters(), lr=args.learning_rate, momentum=args.mom\n )\n\n of_losses = []\n all_samples = len(val_data_loader) * args.val_batch_size\n print_interval = 100\n\n for epoch in range(args.epochs):\n posenet_module.train()\n\n for b in range(len(train_data_loader)):\n image, label = train_data_loader.get_batch()\n\n # oneflow train\n start_t = time.time()\n image = image.to(\"cuda\")\n label = label.to(\"cuda\")\n logits = posenet_module(image)\n loss = of_cross_entropy(logits, label)\n\n loss.backward()\n of_sgd.step()\n of_sgd.zero_grad()\n end_t = time.time()\n if b % print_interval == 0:\n l = loss.numpy()\n of_losses.append(l)\n print(\n \"epoch {} train iter {} oneflow loss {}, train time : {}\".format(\n epoch, b, l, end_t - start_t\n )\n )\n\n print(\"epoch %d train done, start validation\" % epoch)\n posenet_module.eval()\n correct_of = 0.0\n for b in range(len(val_data_loader)):\n image, label = val_data_loader.get_batch()\n\n start_t = time.time()\n image = image.to(\"cuda\")\n with flow.no_grad():\n logits = posenet_module(image)\n predictions = logits.softmax()\n of_predictions = predictions.numpy()\n clsidxs = np.argmax(of_predictions, axis=1)\n\n label_nd = label.numpy()\n for i in range(args.val_batch_size):\n if clsidxs[i] == label_nd[i]:\n correct_of += 1\n end_t = time.time()\n\n print(\"epoch %d, oneflow top1 val acc: %f\" % (epoch, correct_of / all_samples))\n flow.save(\n posenet_module.state_dict(),\n os.path.join(\n args.save_checkpoint_path,\n \"epoch_%d_val_acc_%f\" % (epoch, correct_of / all_samples),\n ),\n )\n\n writer = open(\"of_losses.txt\", \"w\")\n for o in of_losses:\n writer.write(\"%f\\n\" % o)\n writer.close()\n\n\nif __name__ == \"__main__\":\n args = _parse_args()\n main(args)\n" ]
[ [ "torch.distributed.init_process_group", "torch.cuda.set_device", "numpy.ones", "torch.no_grad", "torch.distributed.destroy_process_group", "torch.nn.parallel.DistributedDataParallel" ], [ "numpy.argmax" ] ]
ksasi/DICTOL_python
[ "d2ea3f2a2fdb07c76e63d75e11edf9c8b11d9e69" ]
[ "dictol/base.py" ]
[ "from __future__ import print_function\nimport numpy as np\n\n\nclass BaseModel(object):\n \"\"\"\n base dictionary learning model for classification\n \"\"\"\n # def __init__(self)\n def predict(self, data):\n raise NotImplementedError\n\n\n def evaluate(self, data, label):\n pred = self.predict(data)\n acc = np.sum(pred == label)/float(len(label))\n print('accuracy = {:.2f} %'.format(100 * acc))\n return acc\n\n" ]
[ [ "numpy.sum" ] ]
sebastianbernasek/growth
[ "6d1cace75b19ad8b6130d0940584c24dd26bbe91" ]
[ "growth/cells/cells.py" ]
[ "from os.path import join\nimport numpy as np\nfrom functools import reduce\nfrom operator import add\n\n\nclass Cell:\n\n def __init__(self, xy=None, chromosomes=None, lineage=''):\n\n # set generation\n self.lineage = lineage\n\n # set chromosomes\n if chromosomes is None:\n chromosomes = np.array([0, 1])\n self.chromosomes = chromosomes\n\n # set position\n if xy is None:\n xy = np.zeros(2, dtype=float)\n self.xy = xy\n\n @property\n def generation(self):\n return len(self.lineage)\n\n @property\n def genotype(self):\n return self.chromosomes.sum()\n\n @property\n def phenotype(self):\n return np.random.normal(loc=self.genotype, scale=1.)\n\n def copy(self):\n \"\"\" Returns copy of cell. \"\"\"\n return self.__class__(self.xy, self.chromosomes, self.lineage)\n\n def set_xy(self, xy):\n self.xy = xy\n\n def recombine(self, rate=0.):\n\n # duplicate chromosomes\n chromosomes = np.tile(self.chromosomes, 2)\n\n # recombination\n if np.random.random() <= rate:\n chromosomes.sort()\n\n return chromosomes\n\n def divide(self, recombination_rate=0., reference_population=1000):\n\n # set average spacing between cells\n spacing = np.sqrt(2/reference_population) / 1e5\n\n # perform recombination\n chromosomes = self.recombine(rate=recombination_rate)\n\n # determine child positions\n jitter = np.random.normal(scale=spacing, size=(2, 2))\n xy_a, xy_b = self.xy+jitter[0], self.xy+jitter[1]\n\n # instantiate children\n daughter_a = self.__class__(xy_a, chromosomes[:2], self.lineage+'0')\n daughter_b = self.__class__(xy_b, chromosomes[2:], self.lineage+'1')\n\n return [daughter_a, daughter_b]\n\n def grow(self, max_generation=3, **kwargs):\n \"\"\"\n Recursive growth.\n \"\"\"\n\n # stopping criterion\n if self.generation >= max_generation:\n return [self]\n\n # divide\n else:\n children = self.divide(**kwargs)\n recurse = lambda x: x.grow(max_generation=max_generation, **kwargs)\n return reduce(add, map(recurse, children))\n" ]
[ [ "numpy.random.random", "numpy.sqrt", "numpy.tile", "numpy.random.normal", "numpy.array", "numpy.zeros" ] ]
Guaguago/CommonGen
[ "0a81b4edb8cd111571eba817eb994420f1070c48" ]
[ "evaluation/Traditional/eval_metrics/rouge/rouge.py" ]
[ "#!/usr/bin/env python\n# \n# File Name : rouge.py\n#\n# Description : Computes ROUGE-L metric as described by Lin and Hovey (2004)\n#\n# Creation Date : 2015-01-07 06:03\n# Author : Ramakrishna Vedantam <[email protected]>\n\nimport numpy as np\nimport pdb\n\ndef my_lcs(string, sub):\n \"\"\"\n Calculates longest common subsequence for a pair of tokenized strings\n :param string : list of str : tokens from a string split using whitespace\n :param sub : list of str : shorter string, also split using whitespace\n :returns: length (list of int): length of the longest common subsequence between the two strings\n\n Note: my_lcs only gives length of the longest common subsequence, not the actual LCS\n \"\"\"\n if(len(string)< len(sub)):\n sub, string = string, sub\n\n lengths = [[0 for i in range(0,len(sub)+1)] for j in range(0,len(string)+1)]\n\n for j in range(1,len(sub)+1):\n for i in range(1,len(string)+1):\n if(string[i-1] == sub[j-1]):\n lengths[i][j] = lengths[i-1][j-1] + 1\n else:\n lengths[i][j] = max(lengths[i-1][j] , lengths[i][j-1])\n\n return lengths[len(string)][len(sub)]\n\nclass Rouge():\n '''\n Class for computing ROUGE-L score for a set of candidate sentences for the MS COCO test set\n\n '''\n def __init__(self):\n # vrama91: updated the value below based on discussion with Hovey\n self.beta = 1.2\n\n def calc_score(self, candidate, refs):\n \"\"\"\n Compute ROUGE-L score given one candidate and references for an image\n :param candidate: str : candidate sentence to be evaluated\n :param refs: list of str : COCO reference sentences for the particular image to be evaluated\n :returns score: int (ROUGE-L score for the candidate evaluated against references)\n \"\"\"\n assert(len(candidate)==1)\t\n assert(len(refs)>0) \n prec = []\n rec = []\n\n # split into tokens\n token_c = candidate[0].split(\" \")\n \t\n for reference in refs:\n # split into tokens\n token_r = reference.split(\" \")\n # compute the longest common subsequence\n lcs = my_lcs(token_r, token_c)\n prec.append(lcs/float(len(token_c)))\n rec.append(lcs/float(len(token_r)))\n\n prec_max = max(prec)\n rec_max = max(rec)\n\n if(prec_max!=0 and rec_max !=0):\n score = ((1 + self.beta**2)*prec_max*rec_max)/float(rec_max + self.beta**2*prec_max)\n else:\n score = 0.0\n return score\n\n def compute_score(self, gts, res):\n \"\"\"\n Computes Rouge-L score given a set of reference and candidate sentences for the dataset\n Invoked by evaluate_captions.py \n :param hypo_for_image: dict : candidate / test sentences with \"image name\" key and \"tokenized sentences\" as values \n :param ref_for_image: dict : reference MS-COCO sentences with \"image name\" key and \"tokenized sentences\" as values\n :returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images)\n \"\"\"\n assert(gts.keys() == res.keys())\n imgIds = gts.keys()\n\n score = []\n for id in imgIds:\n hypo = res[id]\n ref = gts[id]\n\n score.append(self.calc_score(hypo, ref))\n\n # Sanity check.\n assert(type(hypo) is list)\n assert(len(hypo) == 1)\n assert(type(ref) is list)\n assert(len(ref) > 0)\n\n average_score = np.mean(np.array(score))\n return average_score, np.array(score)\n\n def method(self):\n return \"Rouge\"\n\n# if __name__ == \"__main__\":\n#\n# cand_1 = \"A boy picks an apple tree and places it into bags.\"\n# cand_2 = \"Two girls pick many red apples from trees and place them in a large bag.\"\n# ref = \"A boy picks an apple from a tree and places it into bags.\"\n# concepts = [\"pick\", \"apple\", \"tree\", \"place\", \"bag\"]\n#\n#\n# rouge = Rouge()\n# print rouge.calc_score([cand_1], ref)" ]
[ [ "numpy.array" ] ]
dnandha/grraspn
[ "0a660d3f73487ea2f8caabf791809de283e8b806", "0a660d3f73487ea2f8caabf791809de283e8b806" ]
[ "detectron2/modeling/proposal_generator/rrpn.py", "detectron2/evaluation/evaluator.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nfrom typing import Dict\nimport torch\n\nfrom detectron2.layers import ShapeSpec\n\nfrom ..box_regression import Box2BoxTransformRotated\nfrom .build import PROPOSAL_GENERATOR_REGISTRY\nfrom .rpn import RPN\nfrom .rrpn_outputs import RRPNOutputs, find_top_rrpn_proposals\n\nlogger = logging.getLogger(__name__)\n\n\n@PROPOSAL_GENERATOR_REGISTRY.register()\nclass RRPN(RPN):\n \"\"\"\n Rotated RPN subnetwork.\n Please refer to https://arxiv.org/pdf/1703.01086.pdf for the original RRPN paper:\n Ma, J., Shao, W., Ye, H., Wang, L., Wang, H., Zheng, Y., & Xue, X. (2018).\n Arbitrary-oriented scene text detection via rotation proposals.\n IEEE Transactions on Multimedia, 20(11), 3111-3122.\n \"\"\"\n\n def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):\n super().__init__(cfg, input_shape)\n self.box2box_transform = Box2BoxTransformRotated(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS)\n\n def forward(self, images, features, gt_instances=None):\n \"\"\"\n Args:\n images (ImageList): input images of length `N`\n features (dict[str: Tensor]): input data as a mapping from feature\n map name to tensor. Axis 0 represents the number of images `N` in\n the input data; axes 1-3 are channels, height, and width, which may\n vary between feature maps (e.g., if a feature pyramid is used).\n gt_instances (list[Instances], optional): a length `N` list of `Instances`s.\n Each `Instances` stores ground-truth instances for the corresponding image.\n\n Returns:\n proposals: list[Instances] or None\n loss: dict[Tensor]\n \"\"\"\n gt_boxes = [x.gt_boxes for x in gt_instances] if gt_instances is not None else None\n del gt_instances\n features = [features[f] for f in self.in_features]\n pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features)\n anchors = self.anchor_generator(features)\n\n outputs = RRPNOutputs(\n self.box2box_transform,\n self.anchor_matcher,\n self.batch_size_per_image,\n self.positive_fraction,\n images,\n pred_objectness_logits,\n pred_anchor_deltas,\n anchors,\n self.boundary_threshold,\n gt_boxes,\n self.smooth_l1_beta,\n self.lambda_\n )\n\n if self.training:\n losses = outputs.losses()\n else:\n losses = {}\n\n with torch.no_grad():\n # Find the top proposals by applying NMS and removing boxes that\n # are too small. The proposals are treated as fixed for approximate\n # joint training with roi heads. This approach ignores the derivative\n # w.r.t. the proposal boxes’ coordinates that are also network\n # responses, so is approximate.\n proposals = find_top_rrpn_proposals(\n outputs.predict_proposals(),\n outputs.predict_objectness_logits(),\n images,\n self.nms_thresh,\n self.pre_nms_topk[self.training],\n self.post_nms_topk[self.training],\n self.min_box_side_len,\n self.training,\n )\n\n return proposals, losses\n", "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport datetime\nimport logging\nimport time\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nimport torch\n\nfrom detectron2.utils.comm import is_main_process\nfrom detectron2.utils.logger import log_every_n_seconds\n\n\nclass DatasetEvaluator:\n \"\"\"\n Base class for a dataset evaluator.\n\n The function :func:`inference_on_dataset` runs the model over\n all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs.\n\n This class will accumulate information of the inputs/outputs (by :meth:`process`),\n and produce evaluation results in the end (by :meth:`evaluate`).\n \"\"\"\n\n def reset(self):\n \"\"\"\n Preparation for a new round of evaluation.\n Should be called before starting a round of evaluation.\n \"\"\"\n pass\n\n def process(self, input, output):\n \"\"\"\n Process an input/output pair.\n\n Args:\n input: the input that's used to call the model.\n output: the return value of `model(input)`\n \"\"\"\n pass\n\n def evaluate(self):\n \"\"\"\n Evaluate/summarize the performance, after processing all input/output pairs.\n\n Returns:\n dict:\n A new evaluator class can return a dict of arbitrary format\n as long as the user can process the results.\n In our train_net.py, we expect the following format:\n\n * key: the name of the task (e.g., bbox)\n * value: a dict of {metric name: score}, e.g.: {\"AP50\": 80}\n \"\"\"\n pass\n\n\nclass DatasetEvaluators(DatasetEvaluator):\n def __init__(self, evaluators):\n assert len(evaluators)\n super().__init__()\n self._evaluators = evaluators\n\n def reset(self):\n for evaluator in self._evaluators:\n evaluator.reset()\n\n def process(self, input, output):\n for evaluator in self._evaluators:\n evaluator.process(input, output)\n\n def evaluate(self):\n results = OrderedDict()\n for evaluator in self._evaluators:\n result = evaluator.evaluate()\n if is_main_process() and result is not None:\n for k, v in result.items():\n assert (\n k not in results\n ), \"Different evaluators produce results with the same key {}\".format(k)\n results[k] = v\n return results\n\n\ndef inference_on_dataset(model, data_loader, evaluator):\n \"\"\"\n Run model on the data_loader and evaluate the metrics with evaluator.\n The model will be used in eval mode.\n\n Args:\n model (nn.Module): a module which accepts an object from\n `data_loader` and returns some outputs. It will be temporarily set to `eval` mode.\n\n If you wish to evaluate a model in `training` mode instead, you can\n wrap the given model and override its behavior of `.eval()` and `.train()`.\n data_loader: an iterable object with a length.\n The elements it generates will be the inputs to the model.\n evaluator (DatasetEvaluator): the evaluator to run. Use\n :class:`DatasetEvaluators([])` if you only want to benchmark, but\n don't want to do any evaluation.\n\n Returns:\n The return value of `evaluator.evaluate()`\n \"\"\"\n num_devices = torch.distributed.get_world_size() if torch.distributed.is_initialized() else 1\n logger = logging.getLogger(__name__)\n logger.info(\"Start inference on {} images\".format(len(data_loader)))\n\n total = len(data_loader) # inference data loader must have a fixed length\n evaluator.reset()\n\n num_warmup = min(5, total - 1)\n start_time = time.perf_counter()\n total_compute_time = 0\n with inference_context(model), torch.no_grad():\n for idx, inputs in enumerate(data_loader):\n if idx == num_warmup:\n start_time = time.perf_counter()\n total_compute_time = 0\n\n start_compute_time = time.perf_counter()\n outputs = model(inputs)\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n total_compute_time += time.perf_counter() - start_compute_time\n evaluator.process(inputs, outputs)\n\n if idx >= num_warmup * 2:\n seconds_per_img = total_compute_time / (idx + 1 - num_warmup)\n total_seconds_per_img = (time.perf_counter() - start_time) / (idx + 1 - num_warmup)\n eta = datetime.timedelta(seconds=int(total_seconds_per_img * (total - idx - 1)))\n log_every_n_seconds(\n logging.INFO,\n \"Inference done {}/{}. {:.4f} s / img. ETA={}\".format(\n idx + 1, total, seconds_per_img, str(eta)\n ),\n n=5,\n )\n\n # Measure the time only for this worker (before the synchronization barrier)\n total_time = time.perf_counter() - start_time\n total_time_str = str(datetime.timedelta(seconds=total_time))\n # NOTE this format is parsed by grep\n logger.info(\n \"Total inference time: {} ({:.6f} s / img per device, on {} devices)\".format(\n total_time_str, total_time / (total - num_warmup), num_devices\n )\n )\n total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time)))\n logger.info(\n \"Total inference pure compute time: {} ({:.6f} s / img per device, on {} devices)\".format(\n total_compute_time_str, total_compute_time / (total - num_warmup), num_devices\n )\n )\n\n results = evaluator.evaluate()\n # An evaluator may return None when not in main process.\n # Replace it by an empty dict instead to make it easier for downstream code to handle\n if results is None:\n results = {}\n return results\n\n\n@contextmanager\ndef inference_context(model):\n \"\"\"\n A context where the model is temporarily changed to eval mode,\n and restored to previous mode afterwards.\n\n Args:\n model: a torch Module\n \"\"\"\n training_mode = model.training\n model.eval()\n yield\n model.train(training_mode)\n" ]
[ [ "torch.no_grad" ], [ "torch.cuda.synchronize", "torch.distributed.is_initialized", "torch.no_grad", "torch.cuda.is_available", "torch.distributed.get_world_size" ] ]
data-centric-ai/dcbench
[ "831ab2359d686739d0b0c7a589974ce08448e58d" ]
[ "dcbench/common/modeling.py" ]
[ "from abc import abstractmethod\n\nimport PIL\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nfrom torch.hub import load_state_dict_from_url\nfrom torchvision.models import DenseNet as _DenseNet\nfrom torchvision.models import ResNet as _ResNet\nfrom torchvision.models.densenet import _load_state_dict\nfrom torchvision.models.densenet import model_urls as densenet_model_urls\nfrom torchvision.models.resnet import BasicBlock, Bottleneck\nfrom torchvision.models.resnet import model_urls as resnet_model_urls\n\n\nclass Model(pl.LightningModule):\n\n DEFAULT_CONFIG = {}\n\n def __init__(self, config: dict = None):\n super().__init__()\n self.config = self.DEFAULT_CONFIG.copy()\n if config is not None:\n self.config.update(config)\n\n self._set_model()\n\n @abstractmethod\n def _set_model(self):\n raise NotImplementedError()\n\n\nclass ResNet(_ResNet):\n\n ACTIVATION_DIMS = [64, 128, 256, 512]\n ACTIVATION_WIDTH_HEIGHT = [64, 32, 16, 8]\n RESNET_TO_ARCH = {\"resnet18\": [2, 2, 2, 2], \"resnet50\": [3, 4, 6, 3]}\n\n def __init__(\n self,\n num_classes: int,\n arch: str = \"resnet18\",\n dropout: float = 0.0,\n pretrained: bool = True,\n ):\n if arch not in self.RESNET_TO_ARCH:\n raise ValueError(\n f\"config['classifier'] must be one of: {self.RESNET_TO_ARCH.keys()}\"\n )\n\n block = BasicBlock if arch == \"resnet18\" else Bottleneck\n super().__init__(block, self.RESNET_TO_ARCH[arch])\n if pretrained:\n state_dict = load_state_dict_from_url(\n resnet_model_urls[arch], progress=True\n )\n self.load_state_dict(state_dict)\n\n # self.fc = nn.Linear(512 * block.expansion, num_classes)\n self.fc = nn.Sequential(\n nn.Dropout(dropout), nn.Linear(512 * block.expansion, num_classes)\n )\n\n\ndef default_transform(img: PIL.Image.Image):\n return transforms.Compose(\n [\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ]\n )(img)\n\n\ndef default_train_transform(img: PIL.Image.Image):\n return transforms.Compose(\n [\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ]\n )(img)\n\n\nclass DenseNet(_DenseNet):\n\n DENSENET_TO_ARCH = {\n \"densenet121\": {\n \"growth_rate\": 32,\n \"block_config\": (6, 12, 24, 16),\n \"num_init_features\": 64,\n }\n }\n\n def __init__(\n self, num_classes: int, arch: str = \"densenet121\", pretrained: bool = True\n ):\n if arch not in self.DENSENET_TO_ARCH:\n raise ValueError(\n f\"config['classifier'] must be one of: {self.DENSENET_TO_ARCH.keys()}\"\n )\n\n super().__init__(**self.DENSENET_TO_ARCH[arch])\n if pretrained:\n _load_state_dict(self, densenet_model_urls[arch], progress=True)\n\n self.classifier = nn.Linear(self.classifier.in_features, num_classes)\n\n\nclass VisionClassifier(Model):\n\n DEFAULT_CONFIG = {\n \"lr\": 1e-4,\n \"model_name\": \"resnet\",\n \"arch\": \"resnet18\",\n \"pretrained\": True,\n \"num_classes\": 2,\n \"transform\": default_transform,\n \"train_transform\": default_train_transform,\n }\n\n def _set_model(self):\n if self.config[\"model_name\"] == \"resnet\":\n self.model = ResNet(\n num_classes=self.config[\"num_classes\"],\n arch=self.config[\"arch\"],\n pretrained=self.config[\"pretrained\"],\n )\n elif self.config[\"model_name\"] == \"densenet\":\n self.model = DenseNet(\n num_classes=self.config[\"num_classes\"], arch=self.config[\"arch\"]\n )\n else:\n raise ValueError(f\"Model name {self.config['model_name']} not supported.\")\n\n def forward(self, x):\n return self.model(x)\n\n def training_step(self, batch, batch_idx):\n inputs, targets, _ = batch[\"input\"], batch[\"target\"], batch[\"id\"]\n outs = self.forward(inputs)\n\n loss = nn.functional.cross_entropy(outs, targets)\n self.log(\"train_loss\", loss, on_step=True, logger=True)\n return loss\n\n def validation_step(self, batch, batch_idx):\n inputs, targets = batch[\"input\"], batch[\"target\"]\n\n outs = self.forward(inputs)\n loss = nn.functional.cross_entropy(outs, targets)\n self.log(\"valid_loss\", loss)\n\n def validation_epoch_end(self, outputs) -> None:\n for metric_name, metric in self.metrics.items():\n self.log(f\"valid_{metric_name}\", metric.compute())\n metric.reset()\n\n def test_epoch_end(self, outputs) -> None:\n return self.validation_epoch_end(outputs)\n\n def test_step(self, batch, batch_idx):\n return self.validation_step(batch, batch_idx)\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.config[\"lr\"])\n return optimizer\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.functional.cross_entropy", "torch.hub.load_state_dict_from_url" ] ]
johnnycakes79/SpiceyPy
[ "7b63a1555df0adb7926cf5a6cfff14746a9dc4c1" ]
[ "SpiceyPy/support_types.py" ]
[ "# Collection of supporting functions for wrapper functions\n__author__ = 'AndrewAnnex'\nfrom ctypes import c_char_p, c_bool, c_int, c_double, c_char, c_void_p, sizeof, \\\n POINTER, pointer, Array, create_string_buffer, create_unicode_buffer, cast, Structure, \\\n CFUNCTYPE, string_at\n\nimport numpy\nfrom numpy import ctypeslib as numpc\nimport six\n\nerrorformat = \"\"\"\n================================================================================\n\nToolkit version: {tkvsn}\n\n{short} --\n{explain}\n{long}\n\n{traceback}\n\n================================================================================\\\n\"\"\"\n\nclass SpiceyError(Exception):\n \"\"\"\n SpiceyError wraps CSPICE errors.\n :type value: str\n \"\"\"\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return self.value\n\n\ndef toDoubleVector(x):\n return DoubleArray.from_param(param=x)\n\n\ndef toDoubleMatrix(x):\n return DoubleMatrix.from_param(param=x)\n\n\ndef toIntVector(x):\n return IntArray.from_param(param=x)\n\n\ndef toBoolVector(x):\n return BoolArray.from_param(param=x)\n\n\ndef toPythonString(inString):\n if six.PY2:\n if isinstance(inString, c_char_p):\n return toPythonString(inString.value)\n return string_at(inString)\n elif six.PY3:\n if isinstance(inString, c_char_p):\n return toPythonString(inString.value)\n return bytes.decode(string_at(inString))\n\n\ndef listtocharvector(x):\n assert (isinstance(x, list))\n return (c_char_p * len(x))(*[stringToCharP(y) for y in x])\n\n\ndef charvector(ndim=1, lenvals=10):\n return ((c_char * lenvals) * ndim)()\n\n\ndef listtodoublematrix(data, x=3, y=3):\n matrix = ((c_double * x) * y)()\n for i, row in enumerate(data):\n matrix[i] = tuple(row)\n return matrix\n\n\ndef emptyCharArray(xLen=None, yLen=None):\n if not yLen:\n yLen = 1\n if not xLen:\n xLen = 1\n if isinstance(xLen, c_int):\n xLen = xLen.value\n if isinstance(yLen, c_int):\n yLen = yLen.value\n return ((c_char * xLen) * yLen)()\n\n\ndef emptyDoubleMatrix(x=3, y=3):\n return ((c_double * x) * y)()\n\n\ndef emptyDoubleVector(n):\n if isinstance(n, c_int):\n n = n.value\n assert(isinstance(n, int))\n return (c_double * n)()\n\n\ndef emptyIntVector(n):\n if isinstance(n, c_int):\n n = n.value\n assert (isinstance(n, int))\n return (c_int * n)()\n\n\ndef vectorToList(x):\n if isinstance(x[0], bool):\n return numpy.fromiter(x, numpy.bool, count=len(x))\n elif isinstance(x[0], int):\n return numpy.fromiter(x, numpy.int_, count=len(x))\n elif isinstance(x[0], float):\n return numpy.fromiter(x, numpy.float64, count=len(x))\n elif isinstance(x[0].value, bytes):\n return [toPythonString(y) for y in x]\n\n\ndef matrixToList(x):\n return numpc.as_array(x)\n\n\ndef stringToCharP(inobject, inlen=None):\n\n \"\"\"\n :param inobject: input string, int for getting null string of length of int\n :param inlen: optional parameter, length of a given string can be specified\n :return:\n \"\"\"\n if inlen and isinstance(inobject, str):\n return create_string_buffer(inobject.encode(encoding='UTF-8'), inlen)\n if isinstance(inobject, bytes):\n return inobject\n if isinstance(inobject, c_int):\n return stringToCharP(\" \" * inobject.value)\n if isinstance(inobject, int):\n return stringToCharP(\" \" * inobject)\n return c_char_p(inobject.encode(encoding='UTF-8'))\n\n\ndef listToCharArray(inList, xLen=None, yLen=None):\n assert (isinstance(inList, list))\n if not yLen:\n yLen = len(inList)\n if not xLen:\n xLen = max(len(s) for s in inList) + 1\n if isinstance(xLen, c_int):\n xLen = xLen.value\n if isinstance(yLen, c_int):\n yLen = yLen.value\n return ((c_char * xLen) * yLen)(*[stringToCharP(l, inlen=xLen) for l in inList])\n\n\ndef listToCharArrayPtr(inList, xLen=None, yLen=None):\n assert (isinstance(inList, list))\n if not yLen:\n yLen = len(inList)\n if not xLen:\n xLen = max(len(s) for s in inList) + 1\n if isinstance(xLen, c_int):\n xLen = xLen.value\n if isinstance(yLen, c_int):\n yLen = yLen.value\n return cast(((c_char * xLen) * yLen)(*[stringToCharP(l, inlen=xLen) for l in inList]), c_char_p)\n\n\nclass DoubleArrayType:\n # Class type that will handle all double vectors, inspiration from python cookbook 3rd edition\n def from_param(self, param):\n typename = type(param).__name__\n if hasattr(self, 'from_' + typename):\n return getattr(self, 'from_' + typename)(param)\n elif isinstance(param, Array):\n return param\n else:\n raise TypeError(\"Can't convert %s\" % typename)\n\n # Cast from lists/tuples\n def from_list(self, param):\n val = ((c_double) * len(param))(*param)\n return val\n\n # Cast from Tuple\n def from_tuple(self, param):\n val = ((c_double) * len(param))(*param)\n return val\n\n # Cast from a numpy array,\n def from_ndarray(self, param):\n # return param.data_as(POINTER(c_double))\n # the above older method does not work with functions which take vectors of known size\n return numpy.ctypeslib.as_ctypes(param)\n\n # Cast from array.array objects\n def from_array(self, param):\n if param.typecode != 'd':\n raise TypeError('must be an array of doubles')\n ptr, _ = param.buffer_info()\n return cast(ptr, POINTER(c_double))\n\n\nclass DoubleMatrixType:\n # Class type that will handle all double matricies, inspiration from python cookbook 3rd edition\n def from_param(self, param):\n typename = type(param).__name__\n if hasattr(self, 'from_' + typename):\n return getattr(self, 'from_' + typename)(param)\n elif isinstance(param, Array):\n return param\n else:\n raise TypeError(\"Can't convert %s\" % typename)\n\n # Cast from lists/tuples\n def from_list(self, param):\n val = ((c_double * len(param[0])) * len(param))(*[DoubleArray.from_param(x) for x in param])\n return val\n\n # Cast from Tuple\n def from_tuple(self, param):\n val = ((c_double * len(param[0])) * len(param))(*[DoubleArray.from_param(x) for x in param])\n return val\n\n # Cast from a numpy array\n def from_ndarray(self, param):\n #return param.data_as(POINTER(c_double))\n return numpy.ctypeslib.as_ctypes(param)\n\n # Cast from a numpy matrix\n def from_matrix(self, param):\n #return param.data_as(POINTER(c_double))\n return numpy.ctypeslib.as_ctypes(param)\n\n\nclass IntArrayType:\n # Class type that will handle all int vectors, inspiration from python cookbook 3rd edition\n def from_param(self, param):\n typename = type(param).__name__\n if hasattr(self, 'from_' + typename):\n return getattr(self, 'from_' + typename)(param)\n elif isinstance(param, Array):\n return param\n else:\n raise TypeError(\"Can't convert %s\" % typename)\n\n # Cast from lists/tuples\n def from_list(self, param):\n val = ((c_int) * len(param))(*param)\n return val\n\n # Cast from Tuple\n def from_tuple(self, param):\n val = ((c_int) * len(param))(*param)\n return val\n\n # Cast from a numpy array\n def from_ndarray(self, param):\n #return param.data_as(POINTER(c_int)) # not sure if long is same as int, it should be..\n #return numpy.ctypeslib.as_ctypes(param)\n return self.from_param(param.tolist())\n\n # Cast from array.array objects\n def from_array(self, param):\n if param.typecode != 'i':\n raise TypeError('must be an array of ints')\n ptr, _ = param.buffer_info()\n return cast(ptr, POINTER(c_int))\n\n\nclass BoolArrayType:\n # Class type that will handle all int vectors, inspiration from python cookbook 3rd edition\n def from_param(self, param):\n typename = type(param).__name__\n if hasattr(self, 'from_' + typename):\n return getattr(self, 'from_' + typename)(param)\n elif isinstance(param, Array):\n return param\n else:\n raise TypeError(\"Can't convert %s\" % typename)\n\n # Cast from lists/tuples\n def from_list(self, param):\n val = ((c_bool) * len(param))(*param)\n return val\n\n # Cast from Tuple\n def from_tuple(self, param):\n val = ((c_bool) * len(param))(*param)\n return val\n\n # Cast from a numpy array\n def from_ndarray(self, param):\n #return param.data_as(POINTER(c_int)) # not sure if long is same as int, it should be..\n #return numpy.ctypeslib.as_ctypes(param)\n return self.from_param(param.tolist())\n\n\nDoubleArray = DoubleArrayType()\n\nIntArray = IntArrayType()\n\nBoolArray = BoolArrayType()\n\nDoubleMatrix = DoubleMatrixType()\n\n\nclass Plane(Structure):\n _fields_ = [\n ('_normal', c_double * 3),\n ('_constant', c_double)\n ]\n\n @property\n def normal(self):\n return vectorToList(self._normal)\n\n @property\n def constant(self):\n return self._constant\n\n def __str__(self):\n return '<Plane: normal=%s; constant=%s>' % (', '.join([str(x) for x in self._normal]), self._constant)\n\n\nclass Ellipse(Structure):\n _fields_ = [\n ('_center', c_double * 3),\n ('_semi_major', c_double * 3),\n ('_semi_minor', c_double * 3)\n ]\n\n @property\n def center(self):\n return vectorToList(self._center)\n\n @property\n def semi_major(self):\n return vectorToList(self._semi_major)\n\n @property\n def semi_minor(self):\n return vectorToList(self._semi_minor)\n\n def __str__(self):\n return '<SpiceEllipse: center = %s, semi_major = %s, semi_minor = %s>' % \\\n (self.center, self.semi_major, self.semi_minor)\n\n\nclass DataType(object):\n SPICE_CHR = 0\n SPICE_DP = 1\n SPICE_INT = 2\n SPICE_TIME = 3\n SPICE_BOOL = 4\n CHR = 0\n DP = 1\n INT = 2\n TIME = 3\n BOOL = 4\n\n def __init__(self):\n pass\n\n\nclass SpiceEKDataType(c_int):\n _fields_ = [\n ('SPICE_CHR', c_int(0)),\n ('SPICE_DP', c_int(1)),\n ('SPICE_INT', c_int(2)),\n ('SPICE_TIME', c_int(3)),\n ('SPICE_BOOL', c_int(4)),\n ]\n\n\nclass SpiceEKExprClass(c_int):\n _fields_ = [\n ('SPICE_EK_EXP_COL', c_int(0)),\n ('SPICE_EK_EXP_FUNC', c_int(1)),\n ('SPICE_EK_EXP_EXPR', c_int(2))\n ]\n\n\nclass SpiceEKAttDsc(Structure):\n _fields_ = [\n ('_cclass', c_int),\n ('_dtype', SpiceEKDataType),\n ('_strlen', c_int),\n ('_size', c_int),\n ('_indexd', c_bool),\n ('_nullok', c_bool)\n ]\n\n @property\n def cclass(self):\n return self._cclass\n\n @property\n def dtype(self):\n return self._dtype.value\n\n @property\n def strlen(self):\n return self._strlen\n\n @property\n def size(self):\n return self._size\n\n @property\n def indexd(self):\n return self._indexd\n\n @property\n def nullok(self):\n return self._nullok\n\n def __str__(self):\n return '<SpiceEKAttDsc cclass = %s, dtype = %s, strlen = %s, size = %s, indexd = %s, nullok = %s >' % \\\n (self.cclass, self.dtype, self.strlen, self.size, self.indexd, self.nullok)\n\n\nclass SpiceEKSegSum(Structure):\n _fields_ = [\n ('_tabnam', c_char * 65),\n ('_nrows', c_int),\n ('_ncols', c_int),\n ('_cnames', (c_char * 100) * 33),\n ('_cdescrs', SpiceEKAttDsc * 100)\n ]\n\n @property\n def tabnam(self):\n return toPythonString(self._tabnam)\n\n @property\n def nrows(self):\n return self._nrows\n\n @property\n def ncols(self):\n return self._ncols\n\n @property\n def cnames(self):\n return vectorToList(self._cnames)[0:self.ncols]\n\n @property\n def cdescrs(self):\n return self._cdescrs[0:self.ncols]\n\n def __str__(self):\n return '<SpiceEKSegSum tabnam = %s, nrows = %s, ncols = %s, cnames = %s, cdescrs = %s >' % (self.tabnam, self.nrows, self.ncols, self.cnames, self.cdescrs)\n\n\n#SpiceCell implementation below is inpart from github.com/DaRasch/spiceminer/\n# and modified as needed for this author, maybe we should work together?\n\n### helper classes/functions ###\nBITSIZE = {'char': sizeof(c_char), 'int': sizeof(c_int), 'double': sizeof(c_double)}\n\n\ndef _char_getter(data_p, index, length):\n return toPythonString((c_char * length).from_address(data_p + index * length * BITSIZE['char']))\n\n\ndef _double_getter(data_p, index, length):\n return c_double.from_address(data_p + index * BITSIZE['double']).value\n\n\ndef _int_getter(data_p, index, length):\n return c_int.from_address(data_p + index * BITSIZE['int']).value\n\n\ndef SPICEDOUBLE_CELL(size):\n return SpiceCell.double(size)\n\n\ndef SPICEINT_CELL(size):\n return SpiceCell.integer(size)\n\n\ndef SPICECHAR_CELL(size, length):\n return SpiceCell.character(size, length)\n\n\nclass SpiceCell(Structure):\n #Most written by DaRasch\n DATATYPES_ENUM = {'char': 0, 'double': 1, 'int': 2, 'time': 3, 'bool': 4}\n DATATYPES_GET = [_char_getter, _double_getter] + [_int_getter] * 3\n baseSize = 6\n minCharLen = 6\n CTRLBLOCK = 6\n _fields_ = [\n ('dtype', c_int),\n ('length', c_int),\n ('size', c_int),\n ('card', c_int),\n ('isSet', c_int),\n ('adjust', c_int),\n ('init', c_int),\n ('base', c_void_p),\n ('data', c_void_p)\n ]\n\n def __init__(self, dtype=None, length=None, size=None, card=None, isSet=None, base=None, data=None):\n super(SpiceCell, self).__init__()\n self.dtype = dtype\n self.length = length\n self.size = size\n self.card = card\n self.isSet = isSet\n self.adjust = 0 # Always False, because not implemented\n self.init = 0 # Always False, because this is the constructor\n self.base = base # void pointer\n self.data = data\n\n def __str__(self):\n return '<SpiceCell dtype = %s, length = %s, size = %s, card = %s, isSet = %s, adjust = %s, init = %s, base = %s, data = %s>' % (self.dtype, self.length, self.size, self.card, self.isSet, self.adjust, self.init, self.base, self.data)\n\n def is_int(self):\n return self.dtype == 2\n\n def is_double(self):\n return self.dtype == 1\n\n def is_char(self):\n return self.dtype == 0\n\n def is_time(self):\n return self.dtype == 3\n\n def is_bool(self):\n return self.dtype == 4\n\n def is_set(self):\n return self.isSet == 1\n\n @classmethod\n def character(cls, size, length):\n base = (c_char * ((cls.CTRLBLOCK + size) * length))()\n data = (c_char * (size * length)).from_buffer(\n base, cls.CTRLBLOCK * BITSIZE['char'] * length)\n instance = cls(cls.DATATYPES_ENUM['char'], length, size, 0, 1,\n cast(base, c_void_p),\n cast(data, c_void_p))\n return instance\n\n @classmethod\n def integer(cls, size):\n base = (c_int * (cls.CTRLBLOCK + size))()\n data = (c_int * size).from_buffer(\n base, cls.CTRLBLOCK * BITSIZE['int'])\n instance = cls(cls.DATATYPES_ENUM['int'], 0, size, 0, 1,\n cast(base, c_void_p),\n cast(data, c_void_p))\n return instance\n\n @classmethod\n def double(cls, size):\n base = (c_double * (cls.CTRLBLOCK + size))()\n data = (c_double * size).from_buffer(\n base, cls.CTRLBLOCK * BITSIZE['double'])\n instance = cls(cls.DATATYPES_ENUM['double'], 0, size, 0, 1,\n cast(base, c_void_p),\n cast(data, c_void_p))\n return instance\n\n def __len__(self):\n return self.card\n\n def __iter__(self):\n getter = SpiceCell.DATATYPES_GET[self.dtype]\n length, card, data = self.length, self.card, self.data\n for i in range(card):\n yield (getter(data, i, length))\n\n def __contains__(self, key):\n return key in self.__iter__()\n\n def __getitem__(self, key):\n getter = SpiceCell.DATATYPES_GET[self.dtype]\n length, card, data = self.length, self.card, self.data\n if isinstance(key, slice):\n start, stop, step = key.start or 0, key.stop or -1, key.step or 1\n #TODO Typechecking\n if card == 0:\n return []\n else:\n return list(getter(data, i, length)\n for i in range(start % card, stop % card + 1, step))\n if key in range(-card, card):\n return getter(data, key, length)\n elif not isinstance(key, int):\n msg = 'SpiceCell inices must be integers, not {}'.format(type(key))\n raise TypeError(msg)\n else:\n raise IndexError('SpiceCell index out of range')\n\n def reset(self):\n self.card = 0\n self.init = 0" ]
[ [ "numpy.ctypeslib.as_array", "numpy.ctypeslib.as_ctypes" ] ]
matthiaskoenig/memote
[ "7c14cd304523dda83eaf4835ee007243e8673f85" ]
[ "memote/experimental/growth.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Copyright 2018 Novo Nordisk Foundation Center for Biosustainability,\n# Technical University of Denmark.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Provide an interface for growth experiments.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\n\nfrom pandas import DataFrame\n\nfrom memote.experimental.experiment import Experiment\n\n__all__ = (\"GrowthExperiment\",)\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass GrowthExperiment(Experiment):\n \"\"\"Represent a growth experiment.\"\"\"\n\n SCHEMA = \"growth.json\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize a growth experiment.\n\n Parameters\n ----------\n kwargs\n\n \"\"\"\n super(GrowthExperiment, self).__init__(**kwargs)\n\n def load(self, dtype_conversion=None):\n \"\"\"\n Load the data table and corresponding validation schema.\n\n Parameters\n ----------\n dtype_conversion : dict\n Column names as keys and corresponding type for loading the data.\n Please take a look at the `pandas documentation\n <https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__\n for detailed explanations.\n\n \"\"\"\n if dtype_conversion is None:\n dtype_conversion = {\"growth\": str}\n super(GrowthExperiment, self).load(dtype_conversion=dtype_conversion)\n self.data[\"growth\"] = self.data[\"growth\"].isin(self.TRUTHY)\n\n def evaluate(self, model, threshold=0.1):\n \"\"\"Evaluate in silico growth rates.\"\"\"\n with model:\n if self.medium is not None:\n self.medium.apply(model)\n if self.objective is not None:\n model.objective = self.objective\n model.add_cons_vars(self.constraints)\n threshold *= model.slim_optimize()\n growth = list()\n for row in self.data.itertuples(index=False):\n with model:\n exchange = model.reactions.get_by_id(row.exchange)\n if bool(exchange.reactants):\n exchange.lower_bound = -row.uptake\n else:\n exchange.upper_bound = row.uptake\n growth.append(model.slim_optimize() >= threshold)\n return DataFrame({\n \"exchange\": self.data[\"exchange\"],\n \"growth\": growth\n })\n" ]
[ [ "pandas.DataFrame" ] ]
DaoDaoer/PaddleSeg
[ "7fe2e41de0f192494b8f2088ee500bb55d17708e", "7fe2e41de0f192494b8f2088ee500bb55d17708e", "7fe2e41de0f192494b8f2088ee500bb55d17708e" ]
[ "contrib/DomainAdaptation/train.py", "paddleseg/transforms/transforms.py", "contrib/DomainAdaptation/utils/utils.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport random\nimport argparse\nimport numpy as np\nimport paddle\n\nfrom paddleseg.utils import logger, get_sys_env\n\nimport utils\nfrom cvlibs import Config\nfrom script.train import Trainer\nfrom datasets import CityDataset, GTA5Dataset, SYNTHIADataset\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Model training')\n\n # params of training\n parser.add_argument(\n \"--config\", dest=\"cfg\", help=\"The config file.\", default=None, type=str)\n parser.add_argument(\n '--iters',\n dest='iters',\n help='iters for training',\n type=int,\n default=None)\n parser.add_argument(\n '--batch_size',\n dest='batch_size',\n help='Mini batch size of one gpu or cpu',\n type=int,\n default=None)\n parser.add_argument(\n '--learning_rate',\n dest='learning_rate',\n help='Learning rate',\n type=float,\n default=None)\n parser.add_argument(\n '--save_interval',\n dest='save_interval',\n help='How many iters to save a model snapshot once during training.',\n type=int,\n default=1000)\n parser.add_argument(\n '--resume_model',\n dest='resume_model',\n help='The path of resume model',\n type=str,\n default=None)\n parser.add_argument(\n '--save_dir',\n dest='save_dir',\n help='The directory for saving the model snapshot',\n type=str,\n default='./output')\n parser.add_argument(\n '--keep_checkpoint_max',\n dest='keep_checkpoint_max',\n help='Maximum number of checkpoints to save',\n type=int,\n default=5)\n parser.add_argument(\n '--num_workers',\n dest='num_workers',\n help='Num workers for data loader',\n type=int,\n default=0)\n parser.add_argument(\n '--do_eval',\n dest='do_eval',\n help='Eval while training',\n action='store_true')\n parser.add_argument(\n '--log_iters',\n dest='log_iters',\n help='Display logging information at every log_iters',\n default=10,\n type=int)\n parser.add_argument(\n '--use_vdl',\n dest='use_vdl',\n help='Whether to record the data to VisualDL during training',\n action='store_true')\n parser.add_argument(\n '--seed',\n dest='seed',\n help='Set the random seed during training.',\n default=42,\n type=int)\n parser.add_argument(\n '--fp16', dest='fp16', help='Whther to use amp', action='store_true')\n parser.add_argument(\n '--data_format',\n dest='data_format',\n help=\n 'Data format that specifies the layout of input. It can be \"NCHW\" or \"NHWC\". Default: \"NCHW\".',\n type=str,\n default='NCHW')\n\n return parser.parse_args()\n\n\ndef main(args):\n if args.seed is not None:\n paddle.seed(args.seed)\n np.random.seed(args.seed)\n random.seed(args.seed)\n logger.info('Set seed to {}'.format(args.seed))\n\n env_info = get_sys_env()\n info = ['{}: {}'.format(k, v) for k, v in env_info.items()]\n info = '\\n'.join(['', format('Environment Information', '-^48s')] + info +\n ['-' * 48])\n logger.info(info)\n\n place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[\n 'GPUs used'] else 'cpu'\n\n paddle.set_device(place)\n\n if not args.cfg:\n raise RuntimeError('No configuration file specified.')\n\n cfg = Config(\n args.cfg,\n learning_rate=args.learning_rate,\n iters=args.iters,\n batch_size=args.batch_size)\n\n if cfg.dic[\"data\"][\"source\"][\"dataset\"] == 'synthia':\n train_dataset_src = SYNTHIADataset(\n split='train', **cfg.dic[\"data\"][\"source\"][\"kwargs\"])\n val_dataset_src = SYNTHIADataset(\n split='val', **cfg.dic[\"data\"][\"source\"][\"kwargs\"])\n elif cfg.dic[\"data\"][\"source\"][\"dataset\"] == 'gta5':\n train_dataset_src = GTA5Dataset(\n split='train', **cfg.dic[\"data\"][\"source\"][\"kwargs\"])\n val_dataset_src = GTA5Dataset(\n split='val', **cfg.dic[\"data\"][\"source\"][\"kwargs\"])\n else:\n raise NotImplementedError()\n if cfg.dic[\"data\"][\"target\"][\"dataset\"] == 'cityscapes':\n train_dataset_tgt = CityDataset(\n split='train', **cfg.dic[\"data\"][\"target\"][\"kwargs\"])\n val_dataset_tgt = CityDataset(\n split='val', **cfg.dic[\"data\"][\"target\"][\"kwargs\"])\n else:\n raise NotImplementedError()\n\n val_dataset_tgt = val_dataset_tgt if args.do_eval else None\n val_dataset_src = val_dataset_src if args.do_eval else None\n\n if train_dataset_src is None:\n raise RuntimeError(\n 'The training dataset is not specified in the configuration file.')\n elif len(train_dataset_src) == 0:\n raise ValueError(\n 'The length of train_dataset is 0. Please check if your dataset is valid'\n )\n\n msg = '\\n---------------Config Information---------------\\n'\n msg += str(cfg)\n msg += '------------------------------------------------'\n logger.info(msg)\n\n trainer = Trainer(model=cfg.model, cfg=cfg.dic)\n trainer.train(\n train_dataset_src,\n train_dataset_tgt,\n val_dataset_tgt=val_dataset_tgt,\n val_dataset_src=val_dataset_src,\n optimizer=cfg.optimizer,\n save_dir=args.save_dir,\n iters=cfg.iters,\n batch_size=cfg.batch_size,\n resume_model=args.resume_model,\n save_interval=args.save_interval,\n log_iters=args.log_iters,\n num_workers=args.num_workers,\n use_vdl=args.use_vdl,\n keep_checkpoint_max=args.keep_checkpoint_max,\n test_config=cfg.test_config)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n", "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport random\nimport math\n\nimport cv2\nimport numpy as np\nfrom PIL import Image\n\nfrom paddleseg.cvlibs import manager\nfrom paddleseg.transforms import functional\n\n\[email protected]_component\nclass Compose:\n \"\"\"\n Do transformation on input data with corresponding pre-processing and augmentation operations.\n The shape of input data to all operations is [height, width, channels].\n\n Args:\n transforms (list): A list contains data pre-processing or augmentation. Empty list means only reading images, no transformation.\n to_rgb (bool, optional): If converting image to RGB color space. Default: True.\n\n Raises:\n TypeError: When 'transforms' is not a list.\n ValueError: when the length of 'transforms' is less than 1.\n \"\"\"\n\n def __init__(self, transforms, to_rgb=True):\n if not isinstance(transforms, list):\n raise TypeError('The transforms must be a list!')\n self.transforms = transforms\n self.to_rgb = to_rgb\n\n def __call__(self, im, label=None):\n \"\"\"\n Args:\n im (str|np.ndarray): It is either image path or image object.\n label (str|np.ndarray): It is either label path or label ndarray.\n\n Returns:\n (tuple). A tuple including image, image info, and label after transformation.\n \"\"\"\n if isinstance(im, str):\n im = cv2.imread(im).astype('float32')\n if isinstance(label, str):\n label = np.asarray(Image.open(label))\n if im is None:\n raise ValueError('Can\\'t read The image file {}!'.format(im))\n if self.to_rgb:\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n\n for op in self.transforms:\n outputs = op(im, label)\n im = outputs[0]\n if len(outputs) == 2:\n label = outputs[1]\n im = np.transpose(im, (2, 0, 1))\n return (im, label)\n\n\[email protected]_component\nclass RandomHorizontalFlip:\n \"\"\"\n Flip an image horizontally with a certain probability.\n\n Args:\n prob (float, optional): A probability of horizontally flipping. Default: 0.5.\n \"\"\"\n\n def __init__(self, prob=0.5):\n self.prob = prob\n\n def __call__(self, im, label=None):\n if random.random() < self.prob:\n im = functional.horizontal_flip(im)\n if label is not None:\n label = functional.horizontal_flip(label)\n if label is None:\n return (im, )\n else:\n return (im, label)\n\n\[email protected]_component\nclass RandomVerticalFlip:\n \"\"\"\n Flip an image vertically with a certain probability.\n\n Args:\n prob (float, optional): A probability of vertical flipping. Default: 0.1.\n \"\"\"\n\n def __init__(self, prob=0.1):\n self.prob = prob\n\n def __call__(self, im, label=None):\n if random.random() < self.prob:\n im = functional.vertical_flip(im)\n if label is not None:\n label = functional.vertical_flip(label)\n if label is None:\n return (im, )\n else:\n return (im, label)\n\n\[email protected]_component\nclass Resize:\n \"\"\"\n Resize an image.\n\n Args:\n target_size (list|tuple, optional): The target size of image. Default: (512, 512).\n interp (str, optional): The interpolation mode of resize is consistent with opencv.\n ['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM']. Note that when it is\n 'RANDOM', a random interpolation mode would be specified. Default: \"LINEAR\".\n\n Raises:\n TypeError: When 'target_size' type is neither list nor tuple.\n ValueError: When \"interp\" is out of pre-defined methods ('NEAREST', 'LINEAR', 'CUBIC',\n 'AREA', 'LANCZOS4', 'RANDOM').\n \"\"\"\n\n # The interpolation mode\n interp_dict = {\n 'NEAREST': cv2.INTER_NEAREST,\n 'LINEAR': cv2.INTER_LINEAR,\n 'CUBIC': cv2.INTER_CUBIC,\n 'AREA': cv2.INTER_AREA,\n 'LANCZOS4': cv2.INTER_LANCZOS4\n }\n\n def __init__(self, target_size=(512, 512), interp='LINEAR'):\n self.interp = interp\n if not (interp == \"RANDOM\" or interp in self.interp_dict):\n raise ValueError(\"`interp` should be one of {}\".format(\n self.interp_dict.keys()))\n if isinstance(target_size, list) or isinstance(target_size, tuple):\n if len(target_size) != 2:\n raise ValueError(\n '`target_size` should include 2 elements, but it is {}'.\n format(target_size))\n else:\n raise TypeError(\n \"Type of `target_size` is invalid. It should be list or tuple, but it is {}\"\n .format(type(target_size)))\n\n self.target_size = target_size\n\n def __call__(self, im, label=None):\n \"\"\"\n Args:\n im (np.ndarray): The Image data.\n label (np.ndarray, optional): The label data. Default: None.\n\n Returns:\n (tuple). When label is None, it returns (im, ), otherwise it returns (im, label),\n\n Raises:\n TypeError: When the 'img' type is not numpy.\n ValueError: When the length of \"im\" shape is not 3.\n \"\"\"\n\n if not isinstance(im, np.ndarray):\n raise TypeError(\"Resize: image type is not numpy.\")\n if len(im.shape) != 3:\n raise ValueError('Resize: image is not 3-dimensional.')\n if self.interp == \"RANDOM\":\n interp = random.choice(list(self.interp_dict.keys()))\n else:\n interp = self.interp\n im = functional.resize(im, self.target_size, self.interp_dict[interp])\n if label is not None:\n label = functional.resize(label, self.target_size,\n cv2.INTER_NEAREST)\n\n if label is None:\n return (im, )\n else:\n return (im, label)\n\n\[email protected]_component\nclass ResizeByLong:\n \"\"\"\n Resize the long side of an image to given size, and then scale the other side proportionally.\n\n Args:\n long_size (int): The target size of long side.\n \"\"\"\n\n def __init__(self, long_size):\n self.long_size = long_size\n\n def __call__(self, im, label=None):\n \"\"\"\n Args:\n im (np.ndarray): The Image data.\n label (np.ndarray, optional): The label data. Default: None.\n\n Returns:\n (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).\n \"\"\"\n\n im = functional.resize_long(im, self.long_size)\n if label is not None:\n label = functional.resize_long(label, self.long_size,\n cv2.INTER_NEAREST)\n\n if label is None:\n return (im, )\n else:\n return (im, label)\n\n\[email protected]_component\nclass ResizeByShort:\n \"\"\"\n Resize the short side of an image to given size, and then scale the other side proportionally.\n\n Args:\n short_size (int): The target size of short side.\n \"\"\"\n\n def __init__(self, short_size):\n self.short_size = short_size\n\n def __call__(self, im, label=None):\n \"\"\"\n Args:\n im (np.ndarray): The Image data.\n label (np.ndarray, optional): The label data. Default: None.\n\n Returns:\n (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).\n \"\"\"\n\n im = functional.resize_short(im, self.short_size)\n if label is not None:\n label = functional.resize_short(label, self.short_size,\n cv2.INTER_NEAREST)\n\n if label is None:\n return (im, )\n else:\n return (im, label)\n\n\[email protected]_component\nclass LimitLong:\n \"\"\"\n Limit the long edge of image.\n\n If the long edge is larger than max_long, resize the long edge\n to max_long, while scale the short edge proportionally.\n\n If the long edge is smaller than min_long, resize the long edge\n to min_long, while scale the short edge proportionally.\n\n Args:\n max_long (int, optional): If the long edge of image is larger than max_long,\n it will be resize to max_long. Default: None.\n min_long (int, optional): If the long edge of image is smaller than min_long,\n it will be resize to min_long. Default: None.\n \"\"\"\n\n def __init__(self, max_long=None, min_long=None):\n if max_long is not None:\n if not isinstance(max_long, int):\n raise TypeError(\n \"Type of `max_long` is invalid. It should be int, but it is {}\"\n .format(type(max_long)))\n if min_long is not None:\n if not isinstance(min_long, int):\n raise TypeError(\n \"Type of `min_long` is invalid. It should be int, but it is {}\"\n .format(type(min_long)))\n if (max_long is not None) and (min_long is not None):\n if min_long > max_long:\n raise ValueError(\n '`max_long should not smaller than min_long, but they are {} and {}'\n .format(max_long, min_long))\n self.max_long = max_long\n self.min_long = min_long\n\n def __call__(self, im, label=None):\n \"\"\"\n Args:\n im (np.ndarray): The Image data.\n label (np.ndarray, optional): The label data. Default: None.\n\n Returns:\n (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).\n \"\"\"\n h, w = im.shape[0], im.shape[1]\n long_edge = max(h, w)\n target = long_edge\n if (self.max_long is not None) and (long_edge > self.max_long):\n target = self.max_long\n elif (self.min_long is not None) and (long_edge < self.min_long):\n target = self.min_long\n\n if target != long_edge:\n im = functional.resize_long(im, target)\n if label is not None:\n label = functional.resize_long(label, target, cv2.INTER_NEAREST)\n\n if label is None:\n return (im, )\n else:\n return (im, label)\n\n\[email protected]_component\nclass ResizeRangeScaling:\n \"\"\"\n Resize the long side of an image into a range, and then scale the other side proportionally.\n\n Args:\n min_value (int, optional): The minimum value of long side after resize. Default: 400.\n max_value (int, optional): The maximum value of long side after resize. Default: 600.\n \"\"\"\n\n def __init__(self, min_value=400, max_value=600):\n if min_value > max_value:\n raise ValueError('min_value must be less than max_value, '\n 'but they are {} and {}.'.format(\n min_value, max_value))\n self.min_value = min_value\n self.max_value = max_value\n\n def __call__(self, im, label=None):\n \"\"\"\n Args:\n im (np.ndarray): The Image data.\n label (np.ndarray, optional): The label data. Default: None.\n\n Returns:\n (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).\n \"\"\"\n\n if self.min_value == self.max_value:\n random_size = self.max_value\n else:\n random_size = int(\n np.random.uniform(self.min_value, self.max_value) + 0.5)\n im = functional.resize_long(im, random_size, cv2.INTER_LINEAR)\n if label is not None:\n label = functional.resize_long(label, random_size,\n cv2.INTER_NEAREST)\n\n if label is None:\n return (im, )\n else:\n return (im, label)\n\n\[email protected]_component\nclass ResizeStepScaling:\n \"\"\"\n Scale an image proportionally within a range.\n\n Args:\n min_scale_factor (float, optional): The minimum scale. Default: 0.75.\n max_scale_factor (float, optional): The maximum scale. Default: 1.25.\n scale_step_size (float, optional): The scale interval. Default: 0.25.\n\n Raises:\n ValueError: When min_scale_factor is smaller than max_scale_factor.\n \"\"\"\n\n def __init__(self,\n min_scale_factor=0.75,\n max_scale_factor=1.25,\n scale_step_size=0.25):\n if min_scale_factor > max_scale_factor:\n raise ValueError(\n 'min_scale_factor must be less than max_scale_factor, '\n 'but they are {} and {}.'.format(min_scale_factor,\n max_scale_factor))\n self.min_scale_factor = min_scale_factor\n self.max_scale_factor = max_scale_factor\n self.scale_step_size = scale_step_size\n\n def __call__(self, im, label=None):\n \"\"\"\n Args:\n im (np.ndarray): The Image data.\n label (np.ndarray, optional): The label data. Default: None.\n\n Returns:\n (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).\n \"\"\"\n\n if self.min_scale_factor == self.max_scale_factor:\n scale_factor = self.min_scale_factor\n\n elif self.scale_step_size == 0:\n scale_factor = np.random.uniform(self.min_scale_factor,\n self.max_scale_factor)\n\n else:\n num_steps = int((self.max_scale_factor - self.min_scale_factor) /\n self.scale_step_size + 1)\n scale_factors = np.linspace(self.min_scale_factor,\n self.max_scale_factor,\n num_steps).tolist()\n np.random.shuffle(scale_factors)\n scale_factor = scale_factors[0]\n w = int(round(scale_factor * im.shape[1]))\n h = int(round(scale_factor * im.shape[0]))\n\n im = functional.resize(im, (w, h), cv2.INTER_LINEAR)\n if label is not None:\n label = functional.resize(label, (w, h), cv2.INTER_NEAREST)\n\n if label is None:\n return (im, )\n else:\n return (im, label)\n\n\[email protected]_component\nclass Normalize:\n \"\"\"\n Normalize an image.\n\n Args:\n mean (list, optional): The mean value of a data set. Default: [0.5, 0.5, 0.5].\n std (list, optional): The standard deviation of a data set. Default: [0.5, 0.5, 0.5].\n\n Raises:\n ValueError: When mean/std is not list or any value in std is 0.\n \"\"\"\n\n def __init__(self, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)):\n self.mean = mean\n self.std = std\n if not (isinstance(self.mean, (list, tuple))\n and isinstance(self.std, (list, tuple))):\n raise ValueError(\n \"{}: input type is invalid. It should be list or tuple\".format(\n self))\n from functools import reduce\n if reduce(lambda x, y: x * y, self.std) == 0:\n raise ValueError('{}: std is invalid!'.format(self))\n\n def __call__(self, im, label=None):\n \"\"\"\n Args:\n im (np.ndarray): The Image data.\n label (np.ndarray, optional): The label data. Default: None.\n\n Returns:\n (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).\n \"\"\"\n\n mean = np.array(self.mean)[np.newaxis, np.newaxis, :]\n std = np.array(self.std)[np.newaxis, np.newaxis, :]\n im = functional.normalize(im, mean, std)\n\n if label is None:\n return (im, )\n else:\n return (im, label)\n\n\[email protected]_component\nclass Padding:\n \"\"\"\n Add bottom-right padding to a raw image or annotation image.\n\n Args:\n target_size (list|tuple): The target size after padding.\n im_padding_value (list, optional): The padding value of raw image.\n Default: [127.5, 127.5, 127.5].\n label_padding_value (int, optional): The padding value of annotation image. Default: 255.\n\n Raises:\n TypeError: When target_size is neither list nor tuple.\n ValueError: When the length of target_size is not 2.\n \"\"\"\n\n def __init__(self,\n target_size,\n im_padding_value=(127.5, 127.5, 127.5),\n label_padding_value=255):\n if isinstance(target_size, list) or isinstance(target_size, tuple):\n if len(target_size) != 2:\n raise ValueError(\n '`target_size` should include 2 elements, but it is {}'.\n format(target_size))\n else:\n raise TypeError(\n \"Type of target_size is invalid. It should be list or tuple, now is {}\"\n .format(type(target_size)))\n self.target_size = target_size\n self.im_padding_value = im_padding_value\n self.label_padding_value = label_padding_value\n\n def __call__(self, im, label=None):\n \"\"\"\n Args:\n im (np.ndarray): The Image data.\n label (np.ndarray, optional): The label data. Default: None.\n\n Returns:\n (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).\n \"\"\"\n\n im_height, im_width = im.shape[0], im.shape[1]\n if isinstance(self.target_size, int):\n target_height = self.target_size\n target_width = self.target_size\n else:\n target_height = self.target_size[1]\n target_width = self.target_size[0]\n pad_height = target_height - im_height\n pad_width = target_width - im_width\n if pad_height < 0 or pad_width < 0:\n raise ValueError(\n 'The size of image should be less than `target_size`, but the size of image ({}, {}) is larger than `target_size` ({}, {})'\n .format(im_width, im_height, target_width, target_height))\n else:\n im = cv2.copyMakeBorder(\n im,\n 0,\n pad_height,\n 0,\n pad_width,\n cv2.BORDER_CONSTANT,\n value=self.im_padding_value)\n if label is not None:\n label = cv2.copyMakeBorder(\n label,\n 0,\n pad_height,\n 0,\n pad_width,\n cv2.BORDER_CONSTANT,\n value=self.label_padding_value)\n if label is None:\n return (im, )\n else:\n return (im, label)\n\n\[email protected]_component\nclass PaddingByAspectRatio:\n \"\"\"\n\n Args:\n aspect_ratio (int|float, optional): The aspect ratio = width / height. Default: 1.\n \"\"\"\n\n def __init__(self,\n aspect_ratio=1,\n im_padding_value=(127.5, 127.5, 127.5),\n label_padding_value=255):\n self.aspect_ratio = aspect_ratio\n self.im_padding_value = im_padding_value\n self.label_padding_value = label_padding_value\n\n def __call__(self, im, label=None):\n \"\"\"\n Args:\n im (np.ndarray): The Image data.\n label (np.ndarray, optional): The label data. Default: None.\n\n Returns:\n (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).\n \"\"\"\n\n img_height = im.shape[0]\n img_width = im.shape[1]\n ratio = img_width / img_height\n if ratio == self.aspect_ratio:\n if label is None:\n return (im, )\n else:\n return (im, label)\n elif ratio > self.aspect_ratio:\n img_height = int(img_width / self.aspect_ratio)\n else:\n img_width = int(img_height * self.aspect_ratio)\n padding = Padding((img_width, img_height),\n im_padding_value=self.im_padding_value,\n label_padding_value=self.label_padding_value)\n return padding(im, label)\n\n\[email protected]_component\nclass RandomPaddingCrop:\n \"\"\"\n Crop a sub-image from a raw image and annotation image randomly. If the target cropping size\n is larger than original image, then the bottom-right padding will be added.\n\n Args:\n crop_size (tuple, optional): The target cropping size. Default: (512, 512).\n im_padding_value (list, optional): The padding value of raw image.\n Default: [127.5, 127.5, 127.5].\n label_padding_value (int, optional): The padding value of annotation image. Default: 255.\n\n Raises:\n TypeError: When crop_size is neither list nor tuple.\n ValueError: When the length of crop_size is not 2.\n \"\"\"\n\n def __init__(self,\n crop_size=(512, 512),\n im_padding_value=(127.5, 127.5, 127.5),\n label_padding_value=255):\n if isinstance(crop_size, list) or isinstance(crop_size, tuple):\n if len(crop_size) != 2:\n raise ValueError(\n 'Type of `crop_size` is list or tuple. It should include 2 elements, but it is {}'\n .format(crop_size))\n else:\n raise TypeError(\n \"The type of `crop_size` is invalid. It should be list or tuple, but it is {}\"\n .format(type(crop_size)))\n self.crop_size = crop_size\n self.im_padding_value = im_padding_value\n self.label_padding_value = label_padding_value\n\n def __call__(self, im, label=None):\n \"\"\"\n Args:\n im (np.ndarray): The Image data.\n label (np.ndarray, optional): The label data. Default: None.\n\n Returns:\n (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).\n \"\"\"\n\n if isinstance(self.crop_size, int):\n crop_width = self.crop_size\n crop_height = self.crop_size\n else:\n crop_width = self.crop_size[0]\n crop_height = self.crop_size[1]\n\n img_height = im.shape[0]\n img_width = im.shape[1]\n\n if img_height == crop_height and img_width == crop_width:\n if label is None:\n return (im, )\n else:\n return (im, label)\n else:\n pad_height = max(crop_height - img_height, 0)\n pad_width = max(crop_width - img_width, 0)\n if (pad_height > 0 or pad_width > 0):\n im = cv2.copyMakeBorder(\n im,\n 0,\n pad_height,\n 0,\n pad_width,\n cv2.BORDER_CONSTANT,\n value=self.im_padding_value)\n if label is not None:\n label = cv2.copyMakeBorder(\n label,\n 0,\n pad_height,\n 0,\n pad_width,\n cv2.BORDER_CONSTANT,\n value=self.label_padding_value)\n img_height = im.shape[0]\n img_width = im.shape[1]\n\n if crop_height > 0 and crop_width > 0:\n h_off = np.random.randint(img_height - crop_height + 1)\n w_off = np.random.randint(img_width - crop_width + 1)\n\n im = im[h_off:(crop_height + h_off), w_off:(\n w_off + crop_width), :]\n if label is not None:\n label = label[h_off:(crop_height + h_off), w_off:(\n w_off + crop_width)]\n if label is None:\n return (im, )\n else:\n return (im, label)\n\n \[email protected]_component\nclass RandomCenterCrop:\n \"\"\"\n Crops the given the input data at the center.\n Args:\n retain_ratio (tuple or list, optional): The length of the input list or tuple must be 2. Default: (0.5, 0.5).\n the first value is used for width and the second is for height.\n In addition, the minimum size of the cropped image is [width * retain_ratio[0], height * retain_ratio[1]].\n Raises:\n TypeError: When retain_ratio is neither list nor tuple. Default: None.\n ValueError: When the value of retain_ratio is not in [0-1].\n \"\"\"\n\n def __init__(self,\n retain_ratio=(0.5, 0.5)):\n if isinstance(retain_ratio, list) or isinstance(retain_ratio, tuple):\n if len(retain_ratio) != 2:\n raise ValueError(\n 'When type of `retain_ratio` is list or tuple, it shoule include 2 elements, but it is {}'.format(\n retain_ratio)\n )\n if retain_ratio[0] > 1 or retain_ratio[1] > 1 or retain_ratio[0] < 0 or retain_ratio[1] < 0:\n raise ValueError(\n 'Value of `retain_ratio` should be in [0, 1], but it is {}'.format(retain_ratio)\n )\n else:\n raise TypeError(\n \"The type of `retain_ratio` is invalid. It should be list or tuple, but it is {}\"\n .format(type(retain_ratio)))\n self.retain_ratio = retain_ratio\n\n def __call__(self, im, label=None):\n \"\"\"\n Args:\n im (np.ndarray): The Image data.\n label (np.ndarray, optional): The label data. Default: None.\n Returns:\n (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).\n \"\"\"\n retain_width = self.retain_ratio[0]\n retain_height = self.retain_ratio[1]\n\n img_height = im.shape[0]\n img_width = im.shape[1]\n\n if retain_width == 1. and retain_height == 1.:\n if label is None:\n return (im,)\n else:\n return (im, label)\n else:\n randw = np.random.randint(img_width * (1 - retain_width))\n randh = np.random.randint(img_height * (1 - retain_height))\n offsetw = 0 if randw == 0 else np.random.randint(randw)\n offseth = 0 if randh == 0 else np.random.randint(randh)\n p0, p1, p2, p3 = offseth, img_height + offseth - randh, offsetw, img_width + offsetw - randw\n im = im[p0:p1, p2:p3, :]\n if label is not None:\n label = label[p0:p1, p2:p3, :]\n\n if label is None:\n return (im,)\n else:\n return (im, label)\n \n \[email protected]_component\nclass ScalePadding:\n \"\"\"\n Add center padding to a raw image or annotation image,then scale the\n image to target size.\n\n Args:\n target_size (list|tuple, optional): The target size of image. Default: (512, 512).\n im_padding_value (list, optional): The padding value of raw image.\n Default: [127.5, 127.5, 127.5].\n label_padding_value (int, optional): The padding value of annotation image. Default: 255.\n\n Raises:\n TypeError: When target_size is neither list nor tuple.\n ValueError: When the length of target_size is not 2.\n \"\"\"\n\n def __init__(self,\n target_size=(512, 512),\n im_padding_value=(127.5, 127.5, 127.5),\n label_padding_value=255):\n if isinstance(target_size, list) or isinstance(target_size, tuple):\n if len(target_size) != 2:\n raise ValueError(\n '`target_size` should include 2 elements, but it is {}'.\n format(target_size))\n else:\n raise TypeError(\n \"Type of `target_size` is invalid. It should be list or tuple, but it is {}\"\n .format(type(target_size)))\n\n self.target_size = target_size\n self.im_padding_value = im_padding_value\n self.label_padding_value = label_padding_value\n\n def __call__(self, im, label=None):\n \"\"\"\n Args:\n im (np.ndarray): The Image data.\n label (np.ndarray, optional): The label data. Default: None.\n\n Returns:\n (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).\n \"\"\"\n height = im.shape[0]\n width = im.shape[1]\n\n new_im = np.zeros(\n (max(height, width), max(height, width), 3)) + self.im_padding_value\n if label is not None:\n new_label = np.zeros((max(height, width), max(\n height, width))) + self.label_padding_value\n\n if height > width:\n padding = int((height - width) / 2)\n new_im[:, padding:padding + width, :] = im\n if label is not None:\n new_label[:, padding:padding + width] = label\n else:\n padding = int((width - height) / 2)\n new_im[padding:padding + height, :, :] = im\n if label is not None:\n new_label[padding:padding + height, :] = label\n\n im = np.uint8(new_im)\n im = functional.resize(im, self.target_size, interp=cv2.INTER_CUBIC)\n if label is not None:\n label = np.uint8(new_label)\n label = functional.resize(\n label, self.target_size, interp=cv2.INTER_CUBIC)\n if label is None:\n return (im, )\n else:\n return (im, label)\n\n\[email protected]_component\nclass RandomNoise:\n \"\"\"\n Superimposing noise on an image with a certain probability.\n\n Args:\n prob (float, optional): A probability of blurring an image. Default: 0.5.\n max_sigma(float, optional): The maximum value of standard deviation of the distribution.\n Default: 10.0.\n \"\"\"\n\n def __init__(self, prob=0.5, max_sigma=10.0):\n self.prob = prob\n self.max_sigma = max_sigma\n\n def __call__(self, im, label=None):\n \"\"\"\n Args:\n im (np.ndarray): The Image data.\n label (np.ndarray, optional): The label data. Default: None.\n\n Returns:\n (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).\n \"\"\"\n if random.random() < self.prob:\n mu = 0\n sigma = random.random() * self.max_sigma\n im = np.array(im, dtype=np.float32)\n im += np.random.normal(mu, sigma, im.shape)\n im[im > 255] = 255\n im[im < 0] = 0\n\n if label is None:\n return (im, )\n else:\n return (im, label)\n\n\[email protected]_component\nclass RandomBlur:\n \"\"\"\n Blurring an image by a Gaussian function with a certain probability.\n\n Args:\n prob (float, optional): A probability of blurring an image. Default: 0.1.\n blur_type(str, optional): A type of blurring an image,\n gaussian stands for cv2.GaussianBlur,\n median stands for cv2.medianBlur,\n blur stands for cv2.blur,\n random represents randomly selected from above.\n Default: gaussian.\n \"\"\"\n\n def __init__(self, prob=0.1, blur_type=\"gaussian\"):\n self.prob = prob\n self.blur_type = blur_type\n\n def __call__(self, im, label=None):\n \"\"\"\n Args:\n im (np.ndarray): The Image data.\n label (np.ndarray, optional): The label data. Default: None.\n\n Returns:\n (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).\n \"\"\"\n\n if self.prob <= 0:\n n = 0\n elif self.prob >= 1:\n n = 1\n else:\n n = int(1.0 / self.prob)\n if n > 0:\n if np.random.randint(0, n) == 0:\n radius = np.random.randint(3, 10)\n if radius % 2 != 1:\n radius = radius + 1\n if radius > 9:\n radius = 9\n im = np.array(im, dtype='uint8')\n if self.blur_type == \"gaussian\":\n im = cv2.GaussianBlur(im, (radius, radius), 0, 0)\n elif self.blur_type == \"median\":\n im = cv2.medianBlur(im, radius)\n elif self.blur_type == \"blur\":\n im = cv2.blur(im, (radius, radius))\n elif self.blur_type == \"random\":\n select = random.random()\n if select < 0.3:\n im = cv2.GaussianBlur(im, (radius, radius), 0)\n elif select < 0.6:\n im = cv2.medianBlur(im, radius)\n else:\n im = cv2.blur(im, (radius, radius))\n else:\n im = cv2.GaussianBlur(im, (radius, radius), 0, 0)\n im = np.array(im, dtype='float32')\n if label is None:\n return (im, )\n else:\n return (im, label)\n\n\[email protected]_component\nclass RandomRotation:\n \"\"\"\n Rotate an image randomly with padding.\n\n Args:\n max_rotation (float, optional): The maximum rotation degree. Default: 15.\n im_padding_value (list, optional): The padding value of raw image.\n Default: [127.5, 127.5, 127.5].\n label_padding_value (int, optional): The padding value of annotation image. Default: 255.\n \"\"\"\n\n def __init__(self,\n max_rotation=15,\n im_padding_value=(127.5, 127.5, 127.5),\n label_padding_value=255):\n self.max_rotation = max_rotation\n self.im_padding_value = im_padding_value\n self.label_padding_value = label_padding_value\n\n def __call__(self, im, label=None):\n \"\"\"\n Args:\n im (np.ndarray): The Image data.\n label (np.ndarray, optional): The label data. Default: None.\n\n Returns:\n (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).\n \"\"\"\n\n if self.max_rotation > 0:\n (h, w) = im.shape[:2]\n do_rotation = np.random.uniform(-self.max_rotation,\n self.max_rotation)\n pc = (w // 2, h // 2)\n r = cv2.getRotationMatrix2D(pc, do_rotation, 1.0)\n cos = np.abs(r[0, 0])\n sin = np.abs(r[0, 1])\n\n nw = int((h * sin) + (w * cos))\n nh = int((h * cos) + (w * sin))\n\n (cx, cy) = pc\n r[0, 2] += (nw / 2) - cx\n r[1, 2] += (nh / 2) - cy\n dsize = (nw, nh)\n im = cv2.warpAffine(\n im,\n r,\n dsize=dsize,\n flags=cv2.INTER_LINEAR,\n borderMode=cv2.BORDER_CONSTANT,\n borderValue=self.im_padding_value)\n if label is not None:\n label = cv2.warpAffine(\n label,\n r,\n dsize=dsize,\n flags=cv2.INTER_NEAREST,\n borderMode=cv2.BORDER_CONSTANT,\n borderValue=self.label_padding_value)\n\n if label is None:\n return (im, )\n else:\n return (im, label)\n\n\[email protected]_component\nclass RandomScaleAspect:\n \"\"\"\n Crop a sub-image from an original image with a range of area ratio and aspect and\n then scale the sub-image back to the size of the original image.\n\n Args:\n min_scale (float, optional): The minimum area ratio of cropped image to the original image. Default: 0.5.\n aspect_ratio (float, optional): The minimum aspect ratio. Default: 0.33.\n \"\"\"\n\n def __init__(self, min_scale=0.5, aspect_ratio=0.33):\n self.min_scale = min_scale\n self.aspect_ratio = aspect_ratio\n\n def __call__(self, im, label=None):\n \"\"\"\n Args:\n im (np.ndarray): The Image data.\n label (np.ndarray, optional): The label data. Default: None.\n\n Returns:\n (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).\n \"\"\"\n\n if self.min_scale != 0 and self.aspect_ratio != 0:\n img_height = im.shape[0]\n img_width = im.shape[1]\n for i in range(0, 10):\n area = img_height * img_width\n target_area = area * np.random.uniform(self.min_scale, 1.0)\n aspectRatio = np.random.uniform(self.aspect_ratio,\n 1.0 / self.aspect_ratio)\n\n dw = int(np.sqrt(target_area * 1.0 * aspectRatio))\n dh = int(np.sqrt(target_area * 1.0 / aspectRatio))\n if (np.random.randint(10) < 5):\n tmp = dw\n dw = dh\n dh = tmp\n\n if (dh < img_height and dw < img_width):\n h1 = np.random.randint(0, img_height - dh)\n w1 = np.random.randint(0, img_width - dw)\n\n im = im[h1:(h1 + dh), w1:(w1 + dw), :]\n im = cv2.resize(\n im, (img_width, img_height),\n interpolation=cv2.INTER_LINEAR)\n if label is not None:\n label = label[h1:(h1 + dh), w1:(w1 + dw)]\n label = cv2.resize(\n label, (img_width, img_height),\n interpolation=cv2.INTER_NEAREST)\n break\n if label is None:\n return (im, )\n else:\n return (im, label)\n\n\[email protected]_component\nclass RandomDistort:\n \"\"\"\n Distort an image with random configurations.\n\n Args:\n brightness_range (float, optional): A range of brightness. Default: 0.5.\n brightness_prob (float, optional): A probability of adjusting brightness. Default: 0.5.\n contrast_range (float, optional): A range of contrast. Default: 0.5.\n contrast_prob (float, optional): A probability of adjusting contrast. Default: 0.5.\n saturation_range (float, optional): A range of saturation. Default: 0.5.\n saturation_prob (float, optional): A probability of adjusting saturation. Default: 0.5.\n hue_range (int, optional): A range of hue. Default: 18.\n hue_prob (float, optional): A probability of adjusting hue. Default: 0.5.\n sharpness_range (float, optional): A range of sharpness. Default: 0.5.\n sharpness_prob (float, optional): A probability of adjusting saturation. Default: 0.\n \"\"\"\n\n def __init__(self,\n brightness_range=0.5,\n brightness_prob=0.5,\n contrast_range=0.5,\n contrast_prob=0.5,\n saturation_range=0.5,\n saturation_prob=0.5,\n hue_range=18,\n hue_prob=0.5,\n sharpness_range=0.5,\n sharpness_prob=0):\n self.brightness_range = brightness_range\n self.brightness_prob = brightness_prob\n self.contrast_range = contrast_range\n self.contrast_prob = contrast_prob\n self.saturation_range = saturation_range\n self.saturation_prob = saturation_prob\n self.hue_range = hue_range\n self.hue_prob = hue_prob\n self.sharpness_range = sharpness_range\n self.sharpness_prob = sharpness_prob\n\n def __call__(self, im, label=None):\n \"\"\"\n Args:\n im (np.ndarray): The Image data.\n label (np.ndarray, optional): The label data. Default: None.\n\n Returns:\n (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).\n \"\"\"\n\n brightness_lower = 1 - self.brightness_range\n brightness_upper = 1 + self.brightness_range\n contrast_lower = 1 - self.contrast_range\n contrast_upper = 1 + self.contrast_range\n saturation_lower = 1 - self.saturation_range\n saturation_upper = 1 + self.saturation_range\n hue_lower = -self.hue_range\n hue_upper = self.hue_range\n sharpness_lower = 1 - self.sharpness_range\n sharpness_upper = 1 + self.sharpness_range\n ops = [\n functional.brightness, functional.contrast, functional.saturation,\n functional.hue, functional.sharpness\n ]\n random.shuffle(ops)\n params_dict = {\n 'brightness': {\n 'brightness_lower': brightness_lower,\n 'brightness_upper': brightness_upper\n },\n 'contrast': {\n 'contrast_lower': contrast_lower,\n 'contrast_upper': contrast_upper\n },\n 'saturation': {\n 'saturation_lower': saturation_lower,\n 'saturation_upper': saturation_upper\n },\n 'hue': {\n 'hue_lower': hue_lower,\n 'hue_upper': hue_upper\n },\n 'sharpness': {\n 'sharpness_lower': sharpness_lower,\n 'sharpness_upper': sharpness_upper,\n }\n }\n prob_dict = {\n 'brightness': self.brightness_prob,\n 'contrast': self.contrast_prob,\n 'saturation': self.saturation_prob,\n 'hue': self.hue_prob,\n 'sharpness': self.sharpness_prob\n }\n im = im.astype('uint8')\n im = Image.fromarray(im)\n for id in range(len(ops)):\n params = params_dict[ops[id].__name__]\n prob = prob_dict[ops[id].__name__]\n params['im'] = im\n if np.random.uniform(0, 1) < prob:\n im = ops[id](**params)\n im = np.asarray(im).astype('float32')\n if label is None:\n return (im, )\n else:\n return (im, label)\n\n\[email protected]_component\nclass RandomAffine:\n \"\"\"\n Affine transform an image with random configurations.\n\n Args:\n size (tuple, optional): The target size after affine transformation. Default: (224, 224).\n translation_offset (float, optional): The maximum translation offset. Default: 0.\n max_rotation (float, optional): The maximum rotation degree. Default: 15.\n min_scale_factor (float, optional): The minimum scale. Default: 0.75.\n max_scale_factor (float, optional): The maximum scale. Default: 1.25.\n im_padding_value (float, optional): The padding value of raw image. Default: (128, 128, 128).\n label_padding_value (int, optional): The padding value of annotation image. Default: (255, 255, 255).\n \"\"\"\n\n def __init__(self,\n size=(224, 224),\n translation_offset=0,\n max_rotation=15,\n min_scale_factor=0.75,\n max_scale_factor=1.25,\n im_padding_value=(128, 128, 128),\n label_padding_value=(255, 255, 255)):\n self.size = size\n self.translation_offset = translation_offset\n self.max_rotation = max_rotation\n self.min_scale_factor = min_scale_factor\n self.max_scale_factor = max_scale_factor\n self.im_padding_value = im_padding_value\n self.label_padding_value = label_padding_value\n\n def __call__(self, im, label=None):\n \"\"\"\n Args:\n im (np.ndarray): The Image data.\n label (np.ndarray, optional): The label data. Default: None.\n\n Returns:\n (tuple). When label is None, it returns (im, ), otherwise it returns (im, label).\n \"\"\"\n\n w, h = self.size\n bbox = [0, 0, im.shape[1] - 1, im.shape[0] - 1]\n x_offset = (random.random() - 0.5) * 2 * self.translation_offset\n y_offset = (random.random() - 0.5) * 2 * self.translation_offset\n dx = (w - (bbox[2] + bbox[0])) / 2.0\n dy = (h - (bbox[3] + bbox[1])) / 2.0\n\n matrix_trans = np.array([[1.0, 0, dx], [0, 1.0, dy], [0, 0, 1.0]])\n\n angle = random.random() * 2 * self.max_rotation - self.max_rotation\n scale = random.random() * (self.max_scale_factor - self.min_scale_factor\n ) + self.min_scale_factor\n scale *= np.mean(\n [float(w) / (bbox[2] - bbox[0]),\n float(h) / (bbox[3] - bbox[1])])\n alpha = scale * math.cos(angle / 180.0 * math.pi)\n beta = scale * math.sin(angle / 180.0 * math.pi)\n\n centerx = w / 2.0 + x_offset\n centery = h / 2.0 + y_offset\n matrix = np.array(\n [[alpha, beta, (1 - alpha) * centerx - beta * centery],\n [-beta, alpha, beta * centerx + (1 - alpha) * centery],\n [0, 0, 1.0]])\n\n matrix = matrix.dot(matrix_trans)[0:2, :]\n im = cv2.warpAffine(\n np.uint8(im),\n matrix,\n tuple(self.size),\n flags=cv2.INTER_LINEAR,\n borderMode=cv2.BORDER_CONSTANT,\n borderValue=self.im_padding_value)\n if label is not None:\n label = cv2.warpAffine(\n np.uint8(label),\n matrix,\n tuple(self.size),\n flags=cv2.INTER_NEAREST,\n borderMode=cv2.BORDER_CONSTANT)\n if label is None:\n return (im, )\n else:\n return (im, label)\n", "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle\nimport os\nimport cv2\nimport imageio\nimport numpy as np\n\nfrom paddleseg.utils import logger\nimport PIL\n\n\ndef load_ema_model(model, resume_model):\n if resume_model is not None:\n logger.info('Load ema model from {}'.format(resume_model))\n if os.path.exists(resume_model):\n resume_model = os.path.normpath(resume_model)\n ckpt_path = os.path.join(resume_model, 'model.pdparams')\n para_state_dict = paddle.load(ckpt_path)\n model.set_state_dict(para_state_dict)\n else:\n raise ValueError(\n 'Directory of the model needed to resume is not Found: {}'.\n format(resume_model))\n else:\n logger.info('No model needed to resume.')\n\n\ndef save_edge(edges_src, name):\n tmp = edges_src.detach().clone().squeeze().numpy()\n tmp[tmp == 1] == 255\n imageio.imwrite('edge_pics/edge_{}.png'.format(name), tmp)\n\n\ndef get_color_map_list(num_classes):\n colormap = [\n 128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153,\n 153, 153, 153, 250, 170, 30, 220, 220, 0, 107, 142, 35, 152, 251, 152,\n 0, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70, 0, 60, 100, 0,\n 80, 100, 0, 0, 230, 119, 11, 32\n ] + [0, 0, 0] * (256 - num_classes)\n\n return colormap\n\n\ndef get_pseudo_color_map(pred, color_map=None):\n \"\"\"\n Get the pseudo color image.\n\n Args:\n pred (numpy.ndarray): the origin predicted image, H*W .\n color_map (list, optional): the palette color map. Default: None,\n use paddleseg's default color map.\n\n Returns:\n (numpy.ndarray): the pseduo image.\n \"\"\"\n if len(pred.shape) > 2:\n pred = np.squeeze(pred)\n\n pred_mask = PIL.Image.fromarray(pred.astype(np.uint8), mode='P')\n color_map = get_color_map_list(19)\n pred_mask.putpalette(color_map)\n return pred_mask\n\n\ndef save_imgs(results, imgs, save_dir='.'):\n for i in range(results.shape[0]):\n result = get_pseudo_color_map(results[i])\n basename = imgs[i] + 'val'\n basename = f'{basename}.png'\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n\n result.save(os.path.join(save_dir, basename))\n" ]
[ [ "numpy.random.seed" ], [ "numpy.abs", "numpy.sqrt", "numpy.linspace", "numpy.asarray", "numpy.uint8", "numpy.random.shuffle", "numpy.random.normal", "numpy.transpose", "numpy.random.uniform", "numpy.array", "numpy.random.randint" ], [ "numpy.squeeze" ] ]
fumiyanll23/AtCoder
[ "362ca9fcacb5415c1458bc8dee5326ba2cc70b65" ]
[ "ABC/186/b_ans.py" ]
[ "import numpy as np\n\ndef main():\n # input\n H, W = map(int, input().split())\n Ass = [[*map(int, input().split())] for _ in range(H)]\n\n # compute\n Ass = np.array(Ass)\n\n # output\n print(np.sum(Ass - np.min(Ass)))\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array", "numpy.min" ] ]
frozenburst/download_audioset
[ "a4ce2fbdeaf23c155717800bd17a986b5c1f51ad" ]
[ "as_download.py" ]
[ "'''\n================================================\n DOWNLOAD_AUDIOSET REPOSITORY\n================================================\nOriginal:\n repository name: download_audioset\n repository version: 1.0\n repository link: https://github.com/jim-schwoebel/download_audioset\n author: Jim Schwoebel\n author contact: [email protected]\n description: downloads the raw audio files from AudioSet (released by Google).\n license category: opensource\n license: Apache 2.0 license\n organization name: NeuroLex Laboratories, Inc.\n location: Seattle, WA\n website: https://neurolex.ai\n release date: 2018-11-08\n\nEdit:\n repository name: download_audioset\n repository version: 1.1\n repository link: https://github.com/frozenburst/download_audioset\n author: POYU WU\n release date: 2020-11-10\n\nThis code (download_audioset) is hereby released under a Apache 2.0 license license.\n\nFor more information, check out the license terms below.\n\n================================================\n SPECIAL NOTES\n================================================\n\nThis script parses through the entire balanced audioset dataset and downloads\nall the raw audio files. The files are arranged in folders according to their\nrepresentative classes.\n\nPlease ensure that you have roughly 35GB of free space on your computer before\ndownloading the files. Note that it may take up to 2 days to fully download\nall the files.\n\nEnjoy! - :)\n\n#-Jim\n\n================================================\n LICENSE TERMS\n================================================\n\nCopyright 2018 NeuroLex Laboratories, Inc.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n================================================\n SERVICE STATEMENT\n================================================\n\nIf you are using the code written for a larger project, we are\nhappy to consult with you and help you with deployment. Our team\nhas >10 world experts in Kafka distributed architectures, microservices\nbuilt on top of Node.js / Python / Docker, and applying machine learning to\nmodel speech and text data.\n\nWe have helped a wide variety of enterprises - small businesses,\nresearchers, enterprises, and/or independent developers.\n\nIf you would like to work with us let us know @ [email protected].\n\nusage: as_download.py [options]\n\noptions:\n --data_pth=<data path>\n --label_pth=<labels.xlsx>\n --segment_file=<xlsx file>\n --partial=<0, 1, 2, ...> # The unbalance csv could split to parts for parallel.\n'''\n\n################################################################################\n## IMPORT STATEMENTS ##\n################################################################################\n\nimport pafy, os, shutil, time, ffmpy\nimport os.path as op\nimport pandas as pd\nimport soundfile as sf\n\nfrom natsort import natsorted\nfrom tqdm import tqdm\nfrom pathlib import Path\nfrom docopt import docopt\n\n################################################################################\n## HELPER FUNCTIONS ##\n################################################################################\n\n#function to clean labels\ndef convertlabels(sortlist,labels,textlabels):\n\n clabels=list()\n # Debug for sortlist data type, split with each label ids.\n sortlist = sortlist.split(',')\n\n for i in range(len(sortlist)):\n #find index in list corresponding\n index=labels.index(sortlist[i])\n clabel=textlabels[index]\n #pull out converted label\n clabels.append(clabel)\n\n return clabels\n\n\ndef download_audio(link):\n listdir=os.listdir()\n cmd = f\"youtube-dl --quiet -f 'bestaudio[ext=m4a]' '{link}'\"\n print(cmd)\n os.system(cmd)\n listdir2=os.listdir()\n filename=''\n for i in range(len(listdir2)):\n if listdir2[i] not in listdir and listdir2[i].endswith('.m4a'):\n filename=listdir2[i]\n break\n\n return filename\n\n################################################################################\n## MAIN SCRIPT ##\n################################################################################\n\nif __name__ == '__main__':\n args = docopt(__doc__)\n print(args)\n data_pth = args['--data_pth']\n label_pth = args['--label_pth']\n segment_file = args['--segment_file']\n partial = args['--partial']\n\n if data_pth is None:\n raise ValueError(\"Please set the path for model's output.\")\n if label_pth is None:\n raise ValueError(\"Please set the path for model's output.\")\n if segment_file is None:\n raise ValueError(\"Please set the path for model's output.\")\n if partial is not None:\n print(\"Partial detected. The naming of wav would follow the partial name.\")\n \n defaultdir=os.getcwd()\n os.chdir(defaultdir)\n\n #load labels of the videos\n\n #number, label, words\n loadfile=pd.read_excel(label_pth)\n\n number=loadfile.iloc[:,0].tolist()\n labels=loadfile.iloc[:,1].tolist()\n textlabels=loadfile.iloc[:,2].tolist()\n #remove spaces for folders\n for i in range(len(textlabels)):\n textlabels[i]=textlabels[i].replace(' ','')\n\n #now load data for download\n xlsx_filename = segment_file\n if op.isfile(xlsx_filename) is False:\n raise ValueError(\"Xlsx file of segment is not exits with value:\", xlsx_filename) \n loadfile2=pd.read_excel(xlsx_filename)\n\n # ylabels have to be cleaned to make a good list (CSV --> LIST)\n yid=loadfile2.iloc[:,0].tolist()[2:]\n ystart=loadfile2.iloc[:,1].tolist()[2:]\n yend=loadfile2.iloc[:,2].tolist()[2:]\n ylabels=loadfile2.iloc[:,3].tolist()[2:]\n\n dataset_dir = data_pth\n if op.isdir(dataset_dir) is False:\n raise ValueError(\"Dataset directory is not exits with path:\", dataset_dir)\n\n #make folders\n if partial is not None:\n # segment_folder_name = op.basename(xlsx_filename).split('.')[0]\n # Easy method is the best solution.\n segment_folder_name = 'unbalanced_train_segments'\n else:\n segment_folder_name = op.basename(xlsx_filename).split('.')[0]\n try:\n defaultdir2=op.join(dataset_dir, segment_folder_name)\n os.chdir(defaultdir2)\n except:\n defaultdir2=op.join(dataset_dir, segment_folder_name)\n os.mkdir(defaultdir2)\n os.chdir(defaultdir2)\n\n # Should implement the check of existed file as well.\n # Implemented by frozenburst\n existing_wavfiles=list()\n for dirname in tqdm(sorted(Path(defaultdir2).glob('*'))):\n if partial is not None:\n for filename in sorted(Path(dirname).glob(f'{partial}_*')):\n existing_wavfiles.append(op.basename(filename))\n else:\n for filename in sorted(Path(dirname).glob(f'*')):\n existing_wavfiles.append(op.basename(filename))\n\n # get last file checkpoint to leave off\n existing_wavfiles=natsorted(existing_wavfiles)\n print(existing_wavfiles)\n try:\n lastfile=int(existing_wavfiles[-1].split('.')[0][7:])\n except:\n lastfile=0\n\n #iterate through entire CSV file, look for '--' if found, find index, delete section, then go to next index\n slink='https://www.youtube.com/watch?v='\n\n for i in tqdm(range(len(yid))):\n if i < lastfile:\n # print('Skipping, already downloaded file...')\n continue\n else:\n link=slink+yid[i]\n start=float(ystart[i])\n end=float(yend[i])\n # print(ylabels[i])\n clabels=convertlabels(ylabels[i],labels,textlabels)\n # print(clabels)\n\n if clabels != []:\n #change to the right directory\n for j in range(len(clabels)):\n newdir = op.join(defaultdir2, clabels[j])\n if op.isdir(newdir) is False:\n os.mkdir(newdir)\n os.chdir(newdir)\n #if it is the first download, pursue this path to download video\n lastdir=os.getcwd()\n\n if partial is not None:\n filename_check = f'{partial}_snipped'+str(i)+'.wav'\n else:\n filename_check = 'snipped'+str(i)+'.wav'\n\n if filename_check not in os.listdir():\n try:\n # use YouTube DL to download audio\n filename=download_audio(link)\n extension='.m4a'\n #get file extension and convert to .wav for processing later\n os.rename(filename,'%s%s'%(str(i),extension))\n filename='%s%s'%(str(i),extension)\n if extension not in ['.wav']:\n xindex=filename.find(extension)\n filename=filename[0:xindex]\n ff=ffmpy.FFmpeg(\n inputs={filename+extension:None},\n outputs={filename+'.wav':None}\n )\n ff.run()\n os.remove(filename+extension)\n\n file=filename+'.wav'\n data,samplerate=sf.read(file)\n totalframes=len(data)\n totalseconds=totalframes/samplerate\n startsec=start\n startframe=samplerate*startsec\n endsec=end\n endframe=samplerate*endsec\n # print(startframe)\n # print(endframe)\n if partial is not None:\n newname = f'{partial}_snipped'+file\n else:\n newname = 'snipped'+file\n sf.write(newname, data[int(startframe):int(endframe)], samplerate)\n snippedfile=newname\n os.remove(file)\n\n except:\n print('no urls')\n\n #sleep 3 second sleep to prevent IP from getting banned\n time.sleep(2)\n else:\n print('skipping, already downloaded file...')\n\n" ]
[ [ "pandas.read_excel" ] ]
wuwuwuyuanhang/python
[ "eb5ac23cb46c4beeab1638fda963dd154b9db1b7", "eb5ac23cb46c4beeab1638fda963dd154b9db1b7", "eb5ac23cb46c4beeab1638fda963dd154b9db1b7" ]
[ "opencv/q23.py", "tf2.0/Chapter5/5_8.py", "opencv/q11.py" ]
[ "# @Auther : wuwuwu \n# @Time : 2020/4/15 \n# @File : q23.py\n# @Description : 直方图均衡化\n\nimport cv2 as cv\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef histogramEqualization(img, Zmax=255):\n \"\"\"\n 直方图均衡化\n :param img:\n :param Zmax: 像素的最大取值\n :return:\n \"\"\"\n H, W, C = img.shape\n S = H * W * C\n\n dst = img.copy()\n\n sum_h = 0\n\n for i in range(1, 255):\n index = np.where(img == i)\n sum_h += len(img[index])\n dst[index] = Zmax / S * sum_h\n\n return np.clip(dst, 0, 255).astype(np.uint8)\n\nif __name__ == '__main__':\n img = cv.imread('lenna.jpg')\n dst = histogramEqualization(img, Zmax=255)\n plt.figure()\n plt.hist(img.flatten(), bins=255, rwidth=0.8, range=(0, 255))\n plt.title('input histogram')\n plt.figure()\n plt.hist(dst.flatten(), bins=255, rwidth=0.8, range=(0, 255))\n plt.title('output histogram')\n plt.show()\n cv.imshow('input', img)\n cv.imshow('output', dst)\n cv.waitKey(0)\n cv.destroyAllWindows()", "# @Auther : wuwuwu \n# @Time : 2020/2/1 \n# @File : 5_8.py\n# @Description : MNIST测试实战\n\nimport tensorflow as tf\nfrom tensorflow import keras\nimport matplotlib\nfrom matplotlib import pyplot as plt\n\n# Default parameters for plots\nmatplotlib.rcParams['font.size'] = 20\nmatplotlib.rcParams['figure.titlesize'] = 20\nmatplotlib.rcParams['figure.figsize'] = [9, 7]\nmatplotlib.rcParams['font.family'] = ['STKaiTi']\nmatplotlib.rcParams['axes.unicode_minus']=False\n\ndef preprocess(x, y): # 自定义的预处理函数\n # 调用此函数时自动传入x, y对象,shape为[b, 28, 28], [b]\n # 标准化到0-1\n x = tf.cast(x, dtype=tf.float32) / 255.\n x = tf.reshape(x, [-1, 28*28]) # 打平\n y = tf.cast(y, dtype=tf.int32)\n y = tf.one_hot(y, depth=10)\n\n # 返回的x, y将替换传入的x, y参数,从而实现数据的预处理\n return x, y\n\n# 加载数据集\n(x, y), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n# 批训练长度\nbatch_size = 512\ntrain_db = tf.data.Dataset.from_tensor_slices((x, y)) # 转为Dataset对象\ntrain_db = train_db.shuffle(1000) # 随机打散\ntrain_db = train_db.batch(batch_size) # 批训练\ntrain_db = train_db.map(preprocess) # 预处理\ntrain_db = train_db.repeat(20) # 内部循环20次\n\n# 测试数据集处理\ntest_db = tf.data.Dataset.from_tensor_slices((x_test, y_test))\ntest_db = test_db.shuffle(1000).batch(batch_size).map(preprocess)\n\ndef main():\n\n # 学习率\n lr = 1e-2\n accs, losses = [], []\n\n # 神经层参数\n w1, b1 = tf.Variable(tf.random.normal([784, 256], stddev=0.1)), tf.Variable(tf.zeros([256]))\n w2, b2 = tf.Variable(tf.random.normal([256, 128], stddev=0.1)), tf.Variable(tf.zeros([128]))\n w3, b3 = tf.Variable(tf.random.normal([128, 10], stddev=0.1)), tf.Variable(tf.zeros([10]))\n\n for step, (x, y) in enumerate(train_db):\n\n x = tf.reshape(x, (-1, 784))\n\n with tf.GradientTape() as tape:\n # layer1\n h1 = x @ w1 + b1\n h1 = tf.nn.relu(h1)\n # layer2\n h2 = h1 @ w2 + b2\n h2 = tf.nn.relu(h2)\n # output\n out = h2 @ w3 + b3\n\n # 计算损失函数\n # [b, 10] - [b, 10]\n loss = tf.square(y - out)\n # [b, 10] => scalar\n loss = tf.reduce_mean(loss)\n\n grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])\n for p, g in zip([w1, b1, w2, b2, w3, b3], grads):\n p.assign_sub(lr * g)\n\n if step % 80 == 0:\n print(step, 'loss: ', float(loss))\n losses.append(float(loss))\n\n if step %80 == 0:\n # evaluate/test\n total, total_correct = 0., 0\n\n for x, y in test_db:\n h1 = tf.nn.relu(x @ w1 + b1)\n h2 = tf.nn.relu(h1 @ w2 + b2)\n out = h2 @ w3 + b3\n # [b, 10] => [b]\n pred = tf.argmax(out, axis=1)\n # convert one_hot y to number y\n y = tf.argmax(y, axis=1)\n # bool type\n correct = tf.equal(pred, y)\n # bool tensor => int tensor => numpy\n total_correct += tf.reduce_sum(tf.cast(correct, dtype=tf.int32)).numpy()\n total += x.shape[0]\n\n print(step, 'Evaluate Acc:', total_correct/total)\n\n accs.append(total_correct/total)\n\n plt.figure()\n x = [i * 80 for i in range(len(losses))]\n plt.plot(x, losses, color='r', marker='+', label='训练')\n plt.ylabel('MSE')\n plt.xlabel('Step')\n #plt.show()\n\n plt.figure()\n plt.plot(x, accs, color='b', marker='*', label='测试')\n plt.ylabel('准确率')\n plt.xlabel('Step')\n plt.show()\n\nif __name__ == '__main__':\n main()\n\n", "'''\n@Author: wuwuwu\n@Date: 2019-12-01 16:43:50\n@LastEditors: wuwuwu\n@LastEditTime: 2019-12-01 16:50:46\n@Description: 均值滤波\n'''\n\nimport cv2 as cv\nimport numpy as np\n\ndef meanFilter(img, kernel_size=3):\n \"\"\"\n 均值滤波\n :param img: 输入图片\n :param kernel_size: 卷积和尺寸\n :return: 输出图片\n \"\"\"\n H, W, C = img.shape\n\n padding = kernel_size // 2\n\n # Zero Padding\n dst = np.zeros((H + padding * 2, W + padding * 2, C), dtype=np.float32)\n dst[padding : H + padding, padding : W + padding] = img.copy().astype(np.float32)\n\n tmp = dst.copy()\n\n # Mean Filter\n for y in range(H):\n for x in range(W):\n for c in range(C):\n dst[y + padding, x + padding, c] = np.mean(\n tmp[y : y + kernel_size, x : x + kernel_size, c]\n )\n\n dst = np.clip(dst, 0, 255)\n dst = dst[padding : H + padding, padding : W + padding].copy().astype(np.uint8)\n\n return dst\n\nif __name__ == '__main__':\n\n img = cv.imread(\"lenna.jpg\")\n dst = meanFilter(img, kernel_size=3)\n cv.imshow(\"input\", img)\n cv.imshow(\"output\", dst)\n cv.waitKey(0)\n cv.destroyAllWindows()" ]
[ [ "matplotlib.pyplot.title", "numpy.clip", "matplotlib.pyplot.show", "numpy.where", "matplotlib.pyplot.figure" ], [ "tensorflow.nn.relu", "tensorflow.zeros", "tensorflow.reduce_mean", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.cast", "tensorflow.reshape", "tensorflow.equal", "tensorflow.keras.datasets.mnist.load_data", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "tensorflow.GradientTape", "tensorflow.one_hot", "tensorflow.square", "matplotlib.pyplot.xlabel", "tensorflow.argmax", "matplotlib.pyplot.show", "tensorflow.random.normal", "matplotlib.pyplot.figure" ], [ "numpy.zeros", "numpy.mean", "numpy.clip" ] ]
voodoohop/Creative-Adversarial-Networks
[ "7d8632b7bfe12a698f61c442aa9c1a07d68d21c9" ]
[ "utils.py" ]
[ "\"\"\"\nSome codes from https://github.com/Newmu/dcgan_code\n\"\"\"\nfrom __future__ import division\nimport math\nimport json\nimport random\nimport pprint\nimport scipy.misc\nimport numpy as np\nfrom time import gmtime, strftime\nfrom six.moves import xrange\nfrom glob import glob\nimport cv2\nimport imageio\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\n\npp = pprint.PrettyPrinter()\n\nget_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1])\n\ndef show_all_variables():\n model_vars = tf.trainable_variables()\n slim.model_analyzer.analyze_vars(model_vars, print_info=True)\n\ndef get_image(image_path, input_height, input_width,\n resize_height=64, resize_width=64,\n crop=True, grayscale=False):\n image = imread(image_path, grayscale)\n try:\n return transform(image, input_height, input_width,\n resize_height, resize_width, crop)\n except ValueError :\n print(\"Bad image. filepath: \", image_path)\n except AttributeError:\n print(\"Bad image. filepath: \", image_path)\ndef save_images(images, size, image_path):\n return imsave(inverse_transform(images), size, image_path)\n\ndef imread(path, grayscale = False):\n try:\n if (grayscale):\n img = cv2.imread(path)\n new_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return cv2.imread(new_img, flatten = True).astype(np.float)\n else:\n img = cv2.imread(path)\n new_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return new_img.astype(np.float)\n except(TypeError):\n print(path)\n\n#Do\ndef test_images(path_glob):\n for path in path_glob:\n imread(path)\n\ndef merge_images(images, size):\n return inverse_transform(images)\n\ndef merge(images, size):\n h, w = images.shape[1], images.shape[2]\n if (images.shape[3] in (3,4)):\n c = images.shape[3]\n img = np.zeros((h * size[0], w * size[1], c))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx // size[1]\n img[j * h:j * h + h, i * w:i * w + w, :] = image\n return img\n elif images.shape[3]==1:\n img = np.zeros((h * size[0], w * size[1]))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx // size[1]\n img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]\n return img\n else:\n raise ValueError('in merge(images,size) images parameter '\n 'must have dimensions: HxW or HxWx3 or HxWx4')\n\ndef imsave(images, size, path):\n image = np.squeeze(merge(images, size))\n return scipy.misc.imsave(path, image)\n\ndef center_crop(x, crop_h, crop_w,\n resize_h=64, resize_w=64):\n if crop_w is None:\n crop_w = crop_h\n h, w = x.shape[:2]\n j = int(round((h - crop_h)/2.))\n i = int(round((w - crop_w)/2.))\n return scipy.misc.imresize(\n x[j:j+crop_h, i:i+crop_w], [resize_h, resize_w])\n\ndef transform(image, input_height, input_width,\n resize_height=64, resize_width=64, crop=True):\n if crop:\n cropped_image = center_crop(\n image, input_height, input_width,\n resize_height, resize_width)\n else:\n cropped_image = cv2.resize(image, (resize_height, resize_width))\n return np.array(cropped_image)/127.5 - 1.\n\ndef inverse_transform(images):\n return (images+1.)/2.\n\ndef make_gif(images, fname, duration=2, true_image=False):\n import moviepy.editor as mpy\n\n def make_frame(t):\n try:\n x = images[int(len(images)/duration*t)]\n except:\n x = images[-1]\n\n if true_image:\n return x.astype(np.uint8)\n else:\n return ((x+1)/2*255).astype(np.uint8)\n\n clip = mpy.VideoClip(make_frame, duration=duration)\n clip.write_gif(fname, fps = len(images) / duration)\n\ndef visualize(sess, dcgan, config, option):\n image_frame_dim = int(math.ceil(config.batch_size**.5))\n if option == 0:\n z_sample = np.random.normal(0, 1, size=(config.batch_size, dcgan.z_dim))\n z_sample /= np.linalg.norm(z_sample, axis=0)\n samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})\n save_images(samples, [image_frame_dim, image_frame_dim], '/content/gdrive/My Drive/samples/test_%s.png' % strftime(\"%Y%m%d%H%M%S\", gmtime()))\n elif option == 1:\n values = np.arange(0, 1, 1./config.batch_size)\n for idx in xrange(100):\n print(\" [*] %d\" % idx)\n z_sample = np.zeros([config.batch_size, dcgan.z_dim])\n for kdx, z in enumerate(z_sample):\n z[idx] = values[kdx]\n\n if config.dataset == \"mnist\":\n y = np.random.choice(10, config.batch_size)\n y_one_hot = np.zeros((config.batch_size, 10))\n y_one_hot[np.arange(config.batch_size), y] = 1\n\n samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})\n elif config.dataset == 'wikiart':\n y = np.random.choice(27, config.batch_size)\n y_one_hot = np.zeros((config.batch_size, 27))\n y_one_hot[np.arange(config.batch_size), y] = 1\n\n samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})\n save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_arange_%s.png' % (idx))\n elif option == 2:\n values = np.arange(0, 1, 1./config.batch_size)\n for idx in [random.randint(0, 99) for _ in xrange(100)]:\n print(\" [*] %d\" % idx)\n z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))\n z_sample = np.tile(z, (config.batch_size, 1))\n #z_sample = np.zeros([config.batch_size, dcgan.z_dim])\n for kdx, z in enumerate(z_sample):\n z[idx] = values[kdx]\n\n if config.dataset == \"mnist\":\n y = np.random.choice(10, config.batch_size)\n y_one_hot = np.zeros((config.batch_size, 10))\n y_one_hot[np.arange(config.batch_size), y] = 1\n\n samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})\n elif config.dataset == 'wikiart':\n y = np.random.choice(27, config.batch_size)\n y_one_hot = np.zeros((config.batch_size, 27))\n y_one_hot[np.arange(config.batch_size), y] = 1\n\n samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})\n\n try:\n make_gif(samples, './samples/test_gif_%s.gif' % (idx))\n except:\n save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_%s.png' % strftime(\"%Y%m%d%H%M%S\", gmtime()))\n elif option == 3:\n values = np.arange(0, 1, 1./config.batch_size)\n for idx in xrange(100):\n print(\" [*] %d\" % idx)\n z_sample = np.zeros([config.batch_size, dcgan.z_dim])\n for kdx, z in enumerate(z_sample):\n z[idx] = values[kdx]\n\n samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})\n make_gif(samples, './samples/test_gif_%s.gif' % (idx))\n elif option == 4:\n image_set = []\n values = np.arange(0, 1, 1./config.batch_size)\n\n for idx in xrange(100):\n print(\" [*] %d\" % idx)\n z_sample = np.zeros([config.batch_size, dcgan.z_dim])\n for kdx, z in enumerate(z_sample): z[idx] = values[kdx]\n\n image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))\n make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))\n\n new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) \\\n for idx in range(64) + range(63, -1, -1)]\n make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8)\ndef get_max_end(path_dir, num_len=3, fname_pattern='*.jpg'):\n max_ = 0\n for f in glob(path_dir + fname_pattern):\n curr = int(f[-num_len-4:-4])\n if curr > max_:\n max_ = curr\n return max_\n\ndef image_manifold_size(num_images):\n print(num_images)\n manifold_h = int(np.floor(np.sqrt(num_images)))\n manifold_w = int(np.ceil(np.sqrt(num_images)))\n assert manifold_h * manifold_w == num_images\n return manifold_h, manifold_w\n\nif __name__ == '__main__':\n print('Getting image!')\n import time\n start = time.time()\n get_image(\"albert-gleizes_acrobats-1916.jpg\",256,256,256,256)\n end = (time.time() - start)\n print ('Took : {.:%4f}'.format(end))" ]
[ [ "numpy.sqrt", "numpy.random.choice", "numpy.arange", "numpy.linalg.norm", "numpy.tile", "numpy.random.normal", "tensorflow.trainable_variables", "numpy.random.uniform", "numpy.array", "tensorflow.contrib.slim.model_analyzer.analyze_vars", "numpy.zeros" ] ]
Vaden4d/logo-classifier
[ "18c397e52352da8e79868158123c13bf0417130f" ]
[ "train.py" ]
[ "import os\nimport argparse\nimport pandas as pd\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import ModelCheckpoint \n\nfrom utils.models import EfficientNetModel, EfficientNetSSL\nfrom utils.transforms import get_transforms\nfrom utils.loaders import get_loaders, get_loader\nfrom utils.losses import LabelSmoothingLoss\nfrom utils.misc import seed_everything, predict_on_loader\nfrom utils.visualization import display_metrics\nfrom utils.dataset import ImageDataset\n\nfrom mixmatch_pytorch import MixMatchLoader, get_mixmatch_loss\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import f1_score, confusion_matrix, precision_score, recall_score\n\nparser = argparse.ArgumentParser(description='PyTorch Lightning Training')\nparser.add_argument('--epochs', default=20, type=int, metavar='N',\n help='Number of total training epochs')\nparser.add_argument('--batch_size', default=32, type=int, metavar='N',\n help='Train and test batch size') \nparser.add_argument('--gpu', default=1, type=int,\n help='0 if CPU mode, 1 if GPU')\nparser.add_argument(\"--ssl\", action=\"store_true\",\n help=\"Use semi-supervised pipeline\")\nparser.add_argument('--csv', default='dataset_with_weak_labels.csv', type=str,\n help='Training .csv file with target column')\nparser.add_argument('--target_column', default='weak_label', type=str,\n help='Name of target column of the .csv file'\n )\nparser.add_argument('--validate', action=\"store_true\",\n help=\"Validate model on labeled dataset\"\n)\nparser.add_argument('--validation_csv', default=\"labeled_part.csv\",\n help=\"Validation .csv file with labeled target\"\n)\nparser.add_argument('--target_validation', default=\"label\",\n help=\"Name of target column in validation dataset\"\n)\nparser.add_argument('--test_size', default=0.2, type=float,\n help='Test dataset size'\n )\nparser.add_argument('--image_size', default=224, type=int,\n help='Desired image size'\n)\nparser.add_argument('--num_workers', default=2, type=int,\n help='Number of processes for PyTorch data loaders'\n )\nparser.add_argument('--random_state', default=42, type=int,\n help='Random seed for all random operations'\n )\nargs = parser.parse_args()\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nargs.gpu = args.gpu if torch.cuda.is_available() else 0\nseed_everything(args.random_state)\n\n# target_column has unique values in set -1, 0, 1\n# -1 corresponds to the unlabeled data\ndf = pd.read_csv(args.csv)\nlabeled = df[df[args.target_column] > -1]\nif args.ssl:\n print(\"Semi-supervised learning model is on...\")\n unlabeled = df[df[args.target_column] == -1]\n\n# weights to initialize bias of FC layer of classifier\nweight = labeled.groupby(args.target_column).count()[\"path\"] / labeled.shape[0]\nweight = torch.Tensor(weight.values).log()\n\ntrain_labeled, test_labeled = train_test_split(labeled, test_size=args.test_size, stratify=labeled[args.target_column], random_state=args.random_state)\n\ntrain_transform, valid_transform = get_transforms(img_size=args.image_size)\ntrain_labeled_loader, valid_labeled_loader = get_loaders(\n train_labeled,\n test_labeled,\n train_transform,\n valid_transform,\n target_column=args.target_column,\n batch_size=args.batch_size,\n num_workers=args.num_workers,\n shuffle=True\n)\n\nif args.ssl:\n dataset_unlabeled = ImageDataset(unlabeled, train_transform, target_column=None)\n\nloss = LabelSmoothingLoss(num_classes=2, smoothing=0.2, weight=None)\n\nif args.ssl:\n print(\"Semi-supervised learning model is configured...\")\n model = EfficientNetSSL(loss=loss, num_classes=2, weight=weight)\nelse:\n model = EfficientNetModel(loss=loss, num_classes=2, weight=weight)\n\nmodel_checkpoint = ModelCheckpoint(monitor=\"val_acc_f1\",\n verbose=True,\n dirpath=\"models/\",\n mode=\"max\",\n filename=\"{epoch}_{val_acc_f1:.4f}\")\n\nif args.ssl:\n # SSL approach changes only train dataloader and model class\n train_loader = MixMatchLoader(\n train_labeled_loader,\n dataset_unlabeled,\n model,\n output_transform=nn.Softmax(dim=-1),\n K=2,\n T=0.5,\n alpha=0.75\n )\nelse:\n train_loader = train_labeled_loader\n\ntrainer = pl.Trainer(gpus=args.gpu,\n max_epochs=args.epochs,\n precision=16,\n auto_lr_find=True,\n callbacks=[model_checkpoint])\ntrainer.fit(model, train_loader, valid_labeled_loader)\n\ntest_labeled[\"pred\"] = predict_on_loader(valid_labeled_loader, model, device)\ntest_labeled.to_csv(\"test_labeled_with_preds.csv\", index=False)\n\n# TODO\n# threshold tuning\nprint(\"Metrics results on the test sample with weak labels:\")\ndisplay_metrics(test_labeled[args.target_column], test_labeled[\"pred\"], threshold=0.5)\n\nif args.validate:\n\n validation = pd.read_csv(args.validation_csv)\n validation[args.target_validation] = validation[args.target_validation].apply(lambda x: 1 if x == \"logo\" else 0)\n\n labeled_loader = get_loader(\n validation,\n \"label\",\n valid_transform,\n batch_size=args.batch_size,\n num_workers=args.num_workers,\n shuffle=False\n )\n\n validation[\"pred\"] = predict_on_loader(labeled_loader, model, device)\n validation.to_csv(\"labeled_with_preds.csv\", index=False)\n print(\"Metrics results on the labeled sample with strong labels:\")\n display_metrics(validation[\"label\"], validation[\"pred\"], threshold=0.5)\n\n" ]
[ [ "torch.nn.Softmax", "pandas.read_csv", "torch.Tensor", "sklearn.model_selection.train_test_split", "torch.cuda.is_available" ] ]
rudranshsharma123/jina
[ "cdc66eb44fe1ae5c84ba6ddfe0a6173476f773bb" ]
[ "tests/unit/types/document/test_converters.py" ]
[ "import os\n\nimport numpy as np\nimport pytest\n\nfrom jina import Document, __windows__\n\ncur_dir = os.path.dirname(os.path.abspath(__file__))\n\n\ndef test_uri_to_blob():\n doc = Document(uri=os.path.join(cur_dir, 'test.png'))\n doc.convert_image_uri_to_blob()\n assert isinstance(doc.blob, np.ndarray)\n assert doc.mime_type == 'image/png'\n assert doc.blob.shape == (85, 152, 3) # h,w,c\n\n\ndef test_datauri_to_blob():\n doc = Document(uri=os.path.join(cur_dir, 'test.png'))\n doc.convert_uri_to_datauri()\n doc.convert_image_datauri_to_blob()\n assert isinstance(doc.blob, np.ndarray)\n assert doc.mime_type == 'image/png'\n assert doc.blob.shape == (85, 152, 3) # h,w,c\n\n\ndef test_buffer_to_blob():\n doc = Document(uri=os.path.join(cur_dir, 'test.png'))\n doc.convert_uri_to_buffer()\n doc.convert_image_buffer_to_blob()\n assert isinstance(doc.blob, np.ndarray)\n assert doc.mime_type == 'image/png'\n assert doc.blob.shape == (85, 152, 3) # h,w,c\n\n\ndef test_convert_buffer_to_blob():\n rand_state = np.random.RandomState(0)\n array = rand_state.random([10, 10])\n doc = Document(content=array.tobytes())\n assert doc.content_type == 'buffer'\n intialiazed_buffer = doc.buffer\n\n doc.convert_buffer_to_blob()\n assert doc.content_type == 'blob'\n converted_buffer_in_one_of = doc.buffer\n assert intialiazed_buffer != converted_buffer_in_one_of\n np.testing.assert_almost_equal(doc.content.reshape([10, 10]), array)\n\n\[email protected]('resize_method', ['BILINEAR', 'NEAREST', 'BICUBIC', 'LANCZOS'])\[email protected](\n 'arr_size, channel_axis, height, width',\n [\n ((32 * 28), -1, None, None), # single line\n ([32, 28], -1, None, None), # without channel info\n ([32, 28, 3], -1, None, None), # h, w, c (rgb)\n ([3, 32, 28], 0, None, None), # c, h, w (rgb)\n ([1, 32, 28], 0, None, None), # c, h, w, (greyscale)\n ([32, 28, 1], -1, None, None), # h, w, c, (greyscale)\n ((32 * 28), -1, 896, 1), # single line\n ([32, 28], -1, 32, 28), # without channel info\n ([32, 28, 3], -1, 32, 28), # h, w, c (rgb)\n ([3, 32, 28], 0, 32, 28), # c, h, w (rgb)\n ([1, 32, 28], 0, 32, 28), # c, h, w, (greyscale)\n ([32, 28, 1], -1, 32, 28), # h, w, c, (greyscale)\n ],\n)\ndef test_convert_image_blob_to_uri(arr_size, channel_axis, width, height, resize_method):\n doc = Document(content=np.random.randint(0, 255, arr_size))\n assert doc.blob.any()\n assert not doc.uri\n doc.convert_image_blob_to_uri(\n channel_axis=channel_axis, width=width, height=height, resize_method=resize_method\n )\n assert doc.uri.startswith('data:image/png;base64,')\n assert doc.mime_type == 'image/png'\n\n\[email protected](\n condition=__windows__, reason='x-python is not detected on windows CI'\n)\[email protected](\n 'uri, mimetype',\n [\n (__file__, 'text/x-python'),\n ('http://google.com/index.html', 'text/html'),\n ('https://google.com/index.html', 'text/html'),\n ],\n)\ndef test_convert_uri_to_buffer(uri, mimetype):\n d = Document(uri=uri)\n assert not d.buffer\n d.convert_uri_to_buffer()\n assert d.buffer\n assert d.mime_type == mimetype\n\n\[email protected](\n 'converter', ['convert_buffer_to_uri', 'convert_content_to_uri']\n)\ndef test_convert_buffer_to_uri(converter):\n d = Document(content=open(__file__).read().encode(), mime_type='text/x-python')\n assert d.buffer\n getattr(d, converter)()\n assert d.uri.startswith('data:text/x-python;')\n\n\[email protected]('converter', ['convert_text_to_uri', 'convert_content_to_uri'])\ndef test_convert_text_to_uri(converter):\n d = Document(content=open(__file__).read(), mime_type='text/x-python')\n assert d.text\n getattr(d, converter)()\n assert d.uri.startswith('data:text/x-python;')\n\n\[email protected](\n condition=__windows__, reason='x-python is not detected on windows CI'\n)\[email protected](\n 'uri, mimetype',\n [\n pytest.param(\n __file__,\n 'text/x-python',\n marks=pytest.mark.xfail(\n condition=__windows__, reason='x-python is not detected on windows CI'\n ),\n ),\n ('http://google.com/index.html', 'text/html'),\n ('https://google.com/index.html', 'text/html'),\n ],\n)\ndef test_convert_uri_to_text(uri, mimetype):\n doc = Document(uri=uri, mime_type=mimetype)\n doc.convert_uri_to_text()\n if mimetype == 'text/html':\n assert '<!doctype html>' in doc.text\n elif mimetype == 'text/x-python':\n text_from_file = open(__file__).read()\n assert doc.text == text_from_file\n\n\ndef test_convert_text_to_uri_and_back():\n text_from_file = open(__file__).read()\n doc = Document(content=text_from_file, mime_type='text/x-python')\n assert doc.text\n assert doc.mime_type == 'text/x-python'\n doc.convert_text_to_uri()\n doc.convert_uri_to_text()\n assert doc.mime_type == 'text/plain'\n assert doc.text == text_from_file\n\n\ndef test_convert_content_to_uri():\n d = Document(content=np.random.random([10, 10]))\n with pytest.raises(NotImplementedError):\n d.convert_content_to_uri()\n\n\[email protected](\n 'uri, mimetype',\n [\n (__file__, 'text/x-python'),\n ('http://google.com/index.html', 'text/html'),\n ('https://google.com/index.html', 'text/html'),\n ],\n)\ndef test_convert_uri_to_data_uri(uri, mimetype):\n doc = Document(uri=uri, mime_type=mimetype)\n doc.convert_uri_to_datauri()\n assert doc.uri.startswith(f'data:{mimetype}')\n assert doc.mime_type == mimetype\n" ]
[ [ "numpy.random.RandomState", "numpy.random.random", "numpy.random.randint" ] ]
evgeniya-egupova/mmsegmentation
[ "3857f19321ad6af41c8a6af364898ee050225f4c" ]
[ "mmseg/models/scalar_schedulers/step.py" ]
[ "import numpy as np\n\nfrom ..builder import SCALAR_SCHEDULERS\nfrom .base import BaseScalarScheduler\n\n\n@SCALAR_SCHEDULERS.register_module()\nclass StepScalarScheduler(BaseScalarScheduler):\n def __init__(self, scales, num_iters, by_epoch=False):\n super(StepScalarScheduler, self).__init__()\n\n self.by_epoch = by_epoch\n\n assert len(scales) == len(num_iters) + 1\n assert len(scales) > 0\n\n self._scales = list(scales)\n self._iter_ranges = list(num_iters) + [np.iinfo(np.int32).max]\n\n def _get_value(self, step, epoch_size):\n if step is None:\n return float(self._scales[-1])\n\n out_scale_idx = 0\n for iter_range in self._iter_ranges:\n if self.by_epoch:\n iter_threshold = epoch_size * iter_range\n else:\n iter_threshold = iter_range\n\n if step < iter_threshold:\n break\n\n out_scale_idx += 1\n\n return float(self._scales[out_scale_idx])\n" ]
[ [ "numpy.iinfo" ] ]
Raeyi/multipooling-AdaPECT
[ "9632b98ff1612344de798321298f6488f1c303b0" ]
[ "ArbitraryCS_main.py" ]
[ "# coding:utf8\n\nimport torch as t\nimport torchvision as tv\nimport torchnet as tnt\n\nfrom torch.utils import data\nfrom transformer_net import TransformerNet\nimport utils\nfrom PackedVGG import Vgg16\nfrom torch.nn import functional as F\nimport tqdm\nimport os\nimport ipdb\n# from WCT2_train import WCT2\n# import model\n\nfrom LapSobGaus_train import Lap_Sob_Gaus\nimport net\n\nimport Ovodus_Laplace_model\nimport utils_\nfrom WCT2_train import train_transform\nfrom tensorboardX import SummaryWriter\n\nfrom pathlib import Path\n\nfrom torchvision.utils import save_image\n\nmean = [0.485, 0.456, 0.406]\nstd = [0.229, 0.224, 0.225]\n\nIMAGENET_MEAN = [0.485, 0.456, 0.406]\nIMAGENET_STD = [0.229, 0.224, 0.225]\n\n\nclass Config(object):\n # General Args\n use_gpu = True\n model_path = None # pretrain model path (for resume training or test)\n\n # Train Args\n image_size = 448 # image crop_size for training\n batch_size = 2\n data_root = r'F:\\DataSets\\train2017' # 'data/' dataset root:$data_root/coco/a.jpg D:\\CoCo_Dataset\\train2017\n num_workers = 4 # dataloader num of workers\n\n lr = 1e-4\n epoches = 20 # total epoch to train\n content_weight = 1e10 # weight of content_loss\n style_weight = 1e2 # weight of style_loss\n\n style_path = 'style_input' # style image path\n env = 'onlyencodercontent_58_Laps_test_nores_noDynamic_10_2' # visdom env\n plot_every = 1 # visualize in visdom for every 10 batch\n\n debug_file = '/tmp/debugnn' # touch $debug_fie to interrupt and enter ipdb\n\n # Test Args\n content_path = 'input.png' # input file to do style transfer [for test]\n result_path = 'output.png' # style transfer result [for test]\n\n option_unpool = 'sum'\n cpu = False\n transfer_at_encoder = True\n transfer_at_decoder = True\n transfer_at_skip = True\n verbose = True\n save_dir = './onlyencodercontent/nores_noDynamic/58_LapSobGaus_experiments_10_2'\n log_dir = './onlyencodercontent/nores_noDynamic/58_LapSobGaus_logs_10_2'\n\n lr_decay = 5e-5\n\n\ndef adjust_learning_rate(lr ,optimizer, iteration_count, lr_decay):\n \"\"\"Imitating the original implementation\"\"\"\n lr = lr / (1.0 + lr_decay * iteration_count)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\ndef train(**kwargs):\n opt = Config()\n for k_, v_ in kwargs.items():\n setattr(opt, k_, v_)\n\n device = 'cpu' if opt.cpu or not t.cuda.is_available() else 'cuda:0'\n device = t.device(device)\n # device=t.device('cuda') if opt.use_gpu else t.device('cpu')\n vis = utils_.Visualizer(opt.env)\n\n save_dir = Path(opt.save_dir)\n save_dir.mkdir(exist_ok=True, parents=True)\n log_dir = Path(opt.log_dir)\n log_dir.mkdir(exist_ok=True, parents=True)\n writer = SummaryWriter(log_dir=str(log_dir))\n # Data loading\n transfroms = tv.transforms.Compose([\n tv.transforms.Resize(opt.image_size),\n tv.transforms.CenterCrop(opt.image_size),\n tv.transforms.ToTensor(),\n #tv.transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),\n #tv.transforms.Lambda(lambda x: x*255)\n ])\n dataset = tv.datasets.ImageFolder(opt.data_root, transfroms)\n dataloader = data.DataLoader(dataset, opt.batch_size)\n\n # style transformer network\n # transformer = TransformerNet()\n print('come!')\n # visualizer = Visualizer(config) # create a visualizer that display/save images and plots\n # device = 'cpu' if opt.cpu or not t.cuda.is_available() else 'cuda:0'\n # device = t.device(device)\n\n transfer_at = set()\n if opt.transfer_at_encoder:\n transfer_at.add('encoder')\n if opt.transfer_at_decoder:\n transfer_at.add('decoder')\n if opt.transfer_at_skip:\n transfer_at.add('skip')\n # save_dir = Path(config.save_dir)\n # save_dir.mkdir(exist_ok=True, parents=True)\n # log_dir = Path(config.log_dir)\n # log_dir.mkdir(exist_ok=True, parents=True)\n # writer = SummaryWriter(log_dir=str(log_dir))\n # vgg = net.vgg\n wct2 = Lap_Sob_Gaus(transfer_at=transfer_at, option_unpool=opt.option_unpool, device=device,\n verbose=False)\n\n encoder = Ovodus_Laplace_model.Lap_Sob_GausEncoder(opt.option_unpool).to(device)\n decoder = Ovodus_Laplace_model.Lap_Sob_GausDecoder(opt.option_unpool).to(device)\n # vgg.load_state_dict(torch.load(config.vgg))\n # vgg = nn.Sequential(*list(vgg.children())[:31])\n laps = Lap_Sob_Gaus(transfer_at=transfer_at, option_unpool='sum', device=device)\n network = net.Net(encoder, decoder)\n network.train()\n network.to(device)\n transformer = network\n if opt.model_path:\n transformer.load_state_dict(t.load(opt.model_path, map_location=lambda _s, _: _s))\n transformer.to(device)\n\n # Vgg16 for Perceptual Loss\n # vgg = Vgg16().eval()\n # vgg.to(device)\n # for param in vgg.parameters():\n # param.requires_grad = False\n\n # Optimizer\n # optimizer = t.optim.Adam(transformer.parameters(), opt.lr)\n enoptimizer = t.optim.Adam(network.encoder.parameters(), lr=opt.lr, betas=(0.9, 0.999))\n deoptimizer = t.optim.Adam(network.decoder.parameters(), lr=opt.lr, betas=(0.9, 0.999))\n\n # # Get style image\n # style_dataloader = utils_.get_style_data(opt.style_path, opt.batch_size)\n # #style_list = list(enumerate(style_dataloader))\n # for ii, (style, _) in tqdm.tqdm(enumerate(style_dataloader)):\n # #a = style\n # style = style.expand(opt.batch_size, 3, 256, 256)\n # vis.img('style', (style.data[0] * 0.225 + 0.45).clamp(min=0, max=1))\n # #style_list.append(style)\n #\n # style = style.to(device)\n # #\n # # #\n # # # # gram matrix for style image\n # with t.no_grad():\n # features_style = vgg(style)\n # gram_style = [utils_.gram_matrix(y) for y in features_style]\n\n # Loss meter\n style_meter = tnt.meter.AverageValueMeter()\n content_meter = tnt.meter.AverageValueMeter()\n\n for epoch in range(opt.epoches):\n # for jj, (style, _) in tqdm.tqdm(enumerate(style_dataloader)):\n # a = style\n # vis.img('style', (style.data[0] * 0.225 + 0.45).clamp(min=0, max=1))\n # style = style.to(device)\n\n #\n\n\n content_meter.reset()\n style_meter.reset()\n for ii, (x, _) in tqdm.tqdm(enumerate(dataloader)):\n if epoch == 0:\n adjust_learning_rate(opt.lr, enoptimizer, iteration_count=ii, lr_decay=opt.lr_decay)\n adjust_learning_rate(opt.lr, deoptimizer, iteration_count=ii, lr_decay=opt.lr_decay)\n print(opt.lr)\n # style = style_list[ii][1][0]\n # # style = style_list[ii]\n # style = style.to(device)\n # # # gram matrix for style image\n # with t.no_grad():\n # features_style = vgg(style)\n # gram_style = [utils_.gram_matrix(y) for y in features_style]\n style_dataloader = utils_.get_style_data(opt.style_path, opt.batch_size)\n # style_list = list(enumerate(style_dataloader))\n for jj, (style, _) in tqdm.tqdm(enumerate(style_dataloader)):\n # a = style\n style = style.expand(opt.batch_size, 3, 256, 256)\n #vis.img('style', (style.data[0] * 0.225 + 0.45).clamp(min=0, max=1))\n vis.img('style', (style.data[0]).clamp(min=0, max=1))\n # style_list.append(style)\n\n style = style.to(device)\n #\n # #\n # # # gram matrix for style image\n # with t.no_grad():\n # features_style = vgg(style)\n # gram_style = [utils_.gram_matrix(y) for y in features_style]\n # Train\n enoptimizer.zero_grad()\n deoptimizer.zero_grad()\n x = x.to(device)\n #y = network(x, style, Laps=laps)\n # if (ii + 1) % 10 == 0:\n # print(y)\n # y = y.clamp_(0, 1) * 255\n #y = utils_.normalize_batch(y)\n #x = utils_.normalize_batch(x)\n\n # features_y = vgg(y)\n # features_x = vgg(x)\n\n # # content loss\n # content_loss = opt.content_weight * F.mse_loss(features_y.relu2_2, features_x.relu2_2)\n #\n # # style loss\n # style_loss = 0\n #\n # for ft_y, gm_s in zip(features_y, gram_style):\n # gram_y = utils_.gram_matrix(ft_y)\n # style_loss += F.mse_loss(gram_y, gm_s.expand_as(gram_y))\n y, content_feats, content_loss, style_loss = network(x, style, Laps=laps)\n content_loss *= opt.content_weight\n style_loss *= opt.style_weight\n total_loss = content_loss + style_loss\n total_loss.backward()\n enoptimizer.step()\n deoptimizer.step()\n\n # Loss smooth for visualization\n content_meter.add(content_loss.item())\n style_meter.add(style_loss.item())\n\n if ii % 50 == 1:\n print('\\n')\n print('iters:', ii, 'total_loss:', total_loss, 'loss_c:', content_loss, 'loss_s: ', style_loss)\n if (ii + 1) % opt.plot_every == 0:\n if os.path.exists(opt.debug_file):\n ipdb.set_trace()\n\n # visualization\n vis.plot('content_loss', content_meter.value()[0])\n vis.plot('style_loss', style_meter.value()[0])\n # denorm input/output, since we have applied (utils.normalize_batch)\n vis.img('output1', (y.data.cpu()[0]).clamp(min=0, max=1))\n vis.img('input1', (x.data.cpu()[0]).clamp(min=0, max=1))\n vis.img('decoder_1', (content_feats['decoder'][0][0].data.cpu()[0]).clamp(min=0, max=1))\n vis.img('decoder_2', (content_feats['decoder'][1][0].data.cpu()[0]).clamp(min=0, max=1))\n vis.img('decoder_3', (content_feats['decoder'][2][0].data.cpu()[0]).clamp(min=0, max=1))\n vis.img('decoder_4', (content_feats['decoder'][3][0].data.cpu()[0]).clamp(min=0, max=1))\n #save_image(content_feat.clamp_(0, 1), fname_output + \"decoder{:d}\".format(level), padding=0)\n\n if (ii) % 1000 == 0:\n if not os.path.exists(save_dir /'epoch_{:d}'.format(epoch)):\n os.makedirs(save_dir /'epoch_{:d}'.format(epoch))\n de_state_dict = network.decoder.state_dict()\n en_state_dict = network.encoder.state_dict()\n for key in de_state_dict.keys():\n de_state_dict[key] = de_state_dict[key].to(t.device('cpu'))\n t.save(de_state_dict, save_dir /'epoch_{:d}'.format(epoch)/\n 'decoder_iter_{:d}.pth.tar'.format(ii + 1))\n for key in en_state_dict.keys():\n en_state_dict[key] = en_state_dict[key].to(t.device('cpu'))\n t.save(en_state_dict, save_dir /'epoch_{:d}'.format(epoch)/\n 'encoder_iter_{:d}.pth.tar'.format(ii + 1))\n\n de_state_dict = network.decoder.state_dict()\n en_state_dict = network.encoder.state_dict()\n for key in de_state_dict.keys():\n de_state_dict[key] = de_state_dict[key].to(t.device('cpu'))\n t.save(de_state_dict, save_dir /\n 'epoch_decoder_iter_{:d}.pth.tar'.format(epoch + 1))\n for key in en_state_dict.keys():\n en_state_dict[key] = en_state_dict[key].to(t.device('cpu'))\n t.save(en_state_dict, save_dir /\n 'epoch_encoder_iter_{:d}.pth.tar'.format(epoch + 1))\n # save checkpoints\n vis.save([opt.env])\n t.save(network.state_dict(), 'checkpoints/epoch_%s_style.pth' % epoch)\n writer.close()\n\n\[email protected]_grad()\ndef stylize(**kwargs):\n \"\"\"\n perform style transfer\n \"\"\"\n opt = Config()\n\n for k_, v_ in kwargs.items():\n setattr(opt, k_, v_)\n device = t.device('cuda') if opt.use_gpu else t.device('cpu')\n\n # input image preprocess\n content_image = tv.datasets.folder.default_loader(opt.content_path)\n content_transform = tv.transforms.Compose([\n tv.transforms.ToTensor(),\n tv.transforms.Lambda(lambda x: x.mul(255))\n ])\n content_image = content_transform(content_image)\n content_image = content_image.unsqueeze(0).to(device).detach()\n\n # model setup\n style_model = TransformerNet().eval()\n style_model.load_state_dict(t.load(opt.model_path, map_location=lambda _s, _: _s))\n style_model.to(device)\n\n # style transfer and save output\n output = style_model(content_image)\n output_data = output.cpu().data[0]\n tv.utils.save_image(((output_data / 255)).clamp(min=0, max=1), opt.result_path)\n\n\nif __name__ == '__main__':\n import fire\n\n fire.Fire()\n train()" ]
[ [ "torch.load", "torch.utils.data.DataLoader", "torch.no_grad", "torch.cuda.is_available", "torch.device" ] ]
ypxie/keras-1
[ "f1ed8d63faa26ce6180faa685839aa32217211c6" ]
[ "keras/backend/common.py" ]
[ "import numpy as np\n\nfrom collections import defaultdict\n\n# the type of float to use throughout the session.\n_FLOATX = 'float32'\n_EPSILON = 10e-8\n_UID_PREFIXES = defaultdict(int)\n_IMAGE_DIM_ORDERING = 'tf'\n_LEGACY_WEIGHT_ORDERING = False\n\n\ndef epsilon():\n '''Returns the value of the fuzz\n factor used in numeric expressions.\n\n # Returns\n A float.\n\n # Example\n ```python\n >>> keras.backend.epsilon()\n 1e-08\n ```\n '''\n return _EPSILON\n\n\ndef set_epsilon(e):\n '''Sets the value of the fuzz\n factor used in numeric expressions.\n\n # Arguments\n e: float. New value of epsilon.\n\n # Example\n ```python\n >>> from keras import backend as K\n >>> K.epsilon()\n 1e-08\n >>> K.set_epsilon(1e-05)\n >>> K.epsilon()\n 1e-05\n ```\n '''\n global _EPSILON\n _EPSILON = e\n\n\ndef floatx():\n '''Returns the default float type, as a string\n (e.g. 'float16', 'float32', 'float64').\n\n # Returns\n String, the current default float type.\n\n # Example\n ```python\n >>> keras.backend.floatx()\n 'float32'\n ```\n '''\n return _FLOATX\n\n\ndef set_floatx(floatx):\n '''Sets the default float type.\n\n # Arguments\n String: 'float16', 'float32', or 'float64'.\n\n # Example\n ```python\n >>> from keras import backend as K\n >>> K.floatx()\n 'float32'\n >>> K.set_floatx('float16')\n >>> K.floatx()\n 'float16'\n ```\n '''\n global _FLOATX\n if floatx not in {'float16', 'float32', 'float64'}:\n raise ValueError('Unknown floatx type: ' + str(floatx))\n _FLOATX = str(floatx)\n\n\ndef cast_to_floatx(x):\n '''Cast a Numpy array to the default Keras float type.\n\n # Arguments\n x: Numpy array.\n\n # Returns\n The same Numpy array, cast to its new type.\n\n # Example\n ```python\n >>> from keras import backend as K\n >>> K.floatx()\n 'float32'\n >>> arr = numpy.array([1.0, 2.0], dtype='float64')\n >>> arr.dtype\n dtype('float64')\n >>> new_arr = K.cast_to_floatx(arr)\n >>> new_arr\n array([ 1., 2.], dtype=float32)\n >>> new_arr.dtype\n dtype('float32')\n ```\n '''\n return np.asarray(x, dtype=_FLOATX)\n\n\ndef image_dim_ordering():\n '''Returns the default image dimension ordering\n convention ('th' or 'tf').\n\n # Returns\n A string, either `'th'` or `'tf'`\n\n # Example\n ```python\n >>> keras.backend.image_dim_ordering()\n 'th'\n ```\n '''\n return _IMAGE_DIM_ORDERING\n\n\ndef set_image_dim_ordering(dim_ordering):\n '''Sets the value of the image dimension\n ordering convention ('th' or 'tf').\n\n # Arguments\n dim_ordering: string. `'th'` or `'tf'`.\n\n # Example\n ```python\n >>> from keras import backend as K\n >>> K.image_dim_ordering()\n 'th'\n >>> K.set_image_dim_ordering('tf')\n >>> K.image_dim_ordering()\n 'tf'\n ```\n '''\n global _IMAGE_DIM_ORDERING\n if dim_ordering not in {'tf', 'th'}:\n raise ValueError('Unknown dim_ordering:', dim_ordering)\n _IMAGE_DIM_ORDERING = str(dim_ordering)\n\n\ndef get_uid(prefix=''):\n '''Provides a unique UID given a string prefix.\n\n # Arguments\n prefix: string.\n\n # Returns\n An integer.\n\n # Example\n ```\n >>> keras.backend.get_uid('dense')\n >>> 1\n >>> keras.backend.get_uid('dense')\n >>> 2\n ```\n\n '''\n _UID_PREFIXES[prefix] += 1\n return _UID_PREFIXES[prefix]\n\n\ndef reset_uids():\n global _UID_PREFIXES\n _UID_PREFIXES = defaultdict(int)\n\n\ndef is_keras_tensor(x):\n '''Returns whether `x` is a Keras tensor.\n\n # Arguments\n x: a potential tensor.\n\n # Returns\n A boolean: whether the argument is a Keras tensor.\n\n # Examples\n ```python\n >>> from keras import backend as K\n >>> np_var = numpy.array([1, 2])\n >>> K.is_keras_tensor(np_var)\n False\n >>> keras_var = K.variable(np_var)\n >>> K.is_keras_tensor(keras_var) # A variable is not a Tensor.\n False\n >>> keras_placeholder = K.placeholder(shape=(2, 4, 5))\n >>> K.is_keras_tensor(keras_placeholder) # A placeholder is a Tensor.\n True\n ```\n '''\n if hasattr(x, '_keras_shape'):\n return True\n else:\n return False\n\n\ndef set_legacy_weight_ordering(value):\n global _LEGACY_WEIGHT_ORDERING\n assert value in {True, False}\n _LEGACY_WEIGHT_ORDERING = value\n\n\ndef legacy_weight_ordering():\n return _LEGACY_WEIGHT_ORDERING\n" ]
[ [ "numpy.asarray" ] ]
hrichstein/phys_50733
[ "a333bfa4dd5b0ca464bd861336bc2f32d8e72a2b" ]
[ "rh_project/rk4_two_body.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n# from scipy.constants import G\n\n# Setting plotting parameters\nfrom matplotlib import rc,rcParams\nrc('text', usetex=True)\nrc('axes', linewidth=2)\nrc('font', weight='bold')\nrc('font', **{'family': 'serif', 'serif':['Computer Modern']})\n\ndef find_vel_init(M1, M2, A):\n\tperiod = np.sqrt(4 * np.pi**2 * A**3 / G / (M1 + M2)) # period in days\n\n\tv = 2 * np.pi * A / period # AU/day\n\n\treturn v\n\ndef accel(Mass, pos1, pos2):\n\t\"\"\"\n\tMass: float-like?\n\t\tsum of the mass of both stars\n\tpos1: array-like\n\t\t[x,y] position of first star\n\tpos2: array-like\n\t\t[x,y] position of second star\n\t\"\"\"\n\n\tr_sep = np.sqrt((pos1[0] - pos1[1])**2 + (pos2[0] - pos2[1])**2)\n\n\ta_x = -G * Mass / r_sep**3 * abs(pos1[0] - pos2[0])\n\ta_y = -G * Mass / r_sep**3 * abs(pos1[1] - pos2[1])\n\n\taccel_arr = np.array([a_x, a_y])\n\n\treturn accel_arr\n\n# Gm(x1-x2)/r^3; r is the distance separating everything (both the stars)\n\n# initial velocity only in one direction\n\ndef rk4(r1, r2, v1, v2, h):\n\t\"\"\"\n\tr: array-like\n\t\thas x,y components\n\tv: array-like\n\t\thas vx, vy components\n\th: float-like\n\t\ttime step\n\t\"\"\"\n\tx0_s1 = r1[0]\n\ty0_s1 = r1[1]\n\n\tx0_s2 = r2[0]\n\ty0_s2 = r2[1]\n\n\td_x0_s1 = v1[0]\n\td_y0_s1 = v1[1]\n\n\td_x0_s2 = v2[0]\n\td_y0_s2 = v2[1]\n\n\td_v0 = accel(Mass, r1, r2) # Same for both stars (velocity is what differs)\n\n\t# First set of RK4\n\tx1_s1 = x0_s1 + 0.5*(d_x0_s1)*h\n\ty1_s1 = y0_s1 + 0.5*(d_y0_s1)*h\n\n\tx1_s2 = x0_s2 + 0.5*(d_x0_s2)*h\n\ty1_s2 = y0_s2 + 0.5*(d_y0_s2)*h\n\n\td_x1_s1 = d_x0_s1 + 0.5*(d_v0[0])*h\n\td_y1_s1 = d_y0_s1 + 0.5*(d_v0[1])*h\n\n\td_x1_s2 = d_x0_s2 + 0.5*(d_v0[0])*h\n\td_y1_s2 = d_y0_s2 + 0.5*(d_v0[1])*h\n\n\tr1_new = np.array([x1_s1,y1_s1])\n\tr2_new = np.array([x1_s2,y1_s2])\n\n\td_v1 = accel(Mass, r1_new, r2_new)\n\n\t# Second\n\n\tx2_s1 = x0_s1 + 0.5*(d_x1_s1)*h\n\ty2_s1 = y0_s1 + 0.5*(d_y1_s1)*h\n\n\tx2_s2 = x0_s2 + 0.5*(d_x1_s2)*h\n\ty2_s2 = y0_s2 + 0.5*(d_y1_s2)*h\n\n\td_x2_s1 = d_x0_s1 + 0.5*(d_v1[0])*h\n\td_y2_s1 = d_y0_s1 + 0.5*(d_v1[1])*h\n\n\td_x2_s2 = d_x0_s2 + 0.5*(d_v1[0])*h\n\td_y2_s2 = d_y0_s2 + 0.5*(d_v1[1])*h\n\n\tr1_new = np.array([x2_s1,y2_s1])\n\tr2_new = np.array([x2_s2,y2_s2])\n\n\td_v2 = accel(Mass, r1_new, r2_new)\n\n\t# Third\n\n\tx3_s1 = x0_s1 + (d_x2_s1)*h\n\ty3_s1 = y0_s1 + (d_y2_s1)*h\n\n\tx3_s2 = x0_s2 + (d_x2_s2)*h\n\ty3_s2 = y0_s2 + (d_y2_s2)*h\n\n\td_x3_s1 = d_x0_s1 + (d_v2[0])*h\n\td_y3_s1 = d_y0_s1 + (d_v2[1])*h\n\n\td_x3_s2 = d_x0_s2 + (d_v2[0])*h\n\td_y3_s2 = d_y0_s2 + (d_v2[1])*h\n\n\tr1_new = np.array([x3_s1,y3_s1])\n\tr2_new = np.array([x3_s2,y3_s2])\n\n\td_v3 = accel(1, r1_new, r2_new)\n\n\t# Combining\n\n\txf_s1 = x0_s1 + h*(d_x0_s1 + 2*d_x1_s1 + 2*d_x2_s1 + d_x3_s1)/6\n\tyf_s1 = y0_s1 + h*(d_y0_s1 + 2*d_y1_s1 + 2*d_y2_s1 + d_y3_s1)/6\n\n\trf_s1 = np.array([xf_s1,yf_s1])\n\n\txf_s2 = x0_s2 + h*(d_x0_s2 + 2*d_x1_s2 + 2*d_x2_s2 + d_x3_s2)/6\n\tyf_s2 = y0_s2 + h*(d_y0_s2 + 2*d_y1_s2 + 2*d_y2_s2 + d_y3_s2)/6\n\n\trf_s2 = np.array([xf_s2,yf_s2])\n\n\td_xf_s1 = d_x0_s1 + h*(d_v0[0] + 2*d_v1[0] + 2*d_v2[0] + d_v3[0])/6\n\td_yf_s1 = d_y0_s1 + h*(d_v0[1] + 2*d_v1[1] + 2*d_v2[1] + d_v3[1])/6\n\n\tvf_s1 = np.array([d_xf_s1,d_yf_s1])\n\n\td_xf_s2 = d_x0_s2 + h*(d_v0[0] + 2*d_v1[0] + 2*d_v2[0] + d_v3[0])/6\n\td_yf_s2 = d_y0_s2 + h*(d_v0[1] + 2*d_v1[1] + 2*d_v2[1] + d_v3[1])/6\n\n\tvf_s2 = np.array([d_xf_s2,d_yf_s2])\n\n\tresults_arr = np.array([rf_s1, rf_s2, vf_s1, vf_s2])\n\n\treturn results_arr\n\nG = 4 * np.pi**2 # AU^3 yr^-2 M_sun^-1\n\nMass = 2 # Solar masses\nA = 0.2 # AU (sep dist)\n\na = 0\nb = 0.06 # years\nN = 100000\nh = (b-a) / N\n\ntpoints = np.arange(a,b,h)\t\n\n# Setting up arrays\n\nxpts_s1 = [[] for xx in range(len(tpoints))]\nypts_s1 = [[] for xx in range(len(tpoints))]\n\nxpts_s2 = [[] for xx in range(len(tpoints))]\nypts_s2 = [[] for xx in range(len(tpoints))]\n\n# Initial conditions\nr0_s1 = np.array([0,0.1])\nr0_s2 = np.array([0,-0.1])\n\nvx0 = find_vel_init(1, 1, A)\nvy0 = 0\n\nv0_s1 = np.array([vx0,0])\nv0_s2 = np.array([-vx0,0])\n\nparam_arr = np.array([r0_s1, r0_s2, v0_s1, v0_s2])\n\nfor tt in range(len(tpoints)):\n\txpts_s1[tt] = param_arr[0][0]\n\typts_s1[tt] = param_arr[0][1]\n\n\txpts_s2[tt] = param_arr[1][0]\n\typts_s2[tt] = param_arr[1][1]\n\n\tparam_arr = rk4(param_arr[0], param_arr[1], param_arr[2], param_arr[3], h)\n\nplt.plot(xpts_s1, ypts_s1)\nplt.plot(xpts_s2, ypts_s2)\n\nplt.show()\n" ]
[ [ "numpy.sqrt", "numpy.arange", "matplotlib.rc", "matplotlib.pyplot.plot", "numpy.array", "matplotlib.pyplot.show" ] ]
xsun28/CloudMerge
[ "c4211bac841b103c77d6f9c4af633102742298ac" ]
[ "cloudmerge-hpc/mpi_merge.py" ]
[ "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 27 16:54:42 2017\n\n@author: Xiaobo\n\"\"\"\nimport numpy as np\nfrom mpi4py import MPI\nimport commands\nimport os\nimport sys\npath = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(path)\n#sys.path.append('/Users/Xiaobo/git/CloudMerge/CloudMerge/cloudmerge-hpc')\n#sys.path.append('/home/ubuntu/cloudmerge/cloudmerge-hpc/')\nimport multiway_merge as mm\nimport argparse\n###############################################################################\ndef get_source(rank,size,rounds):\n divisor = np.power(2,rounds)\n new_rank = int(rank/divisor)\n new_size = int(size/divisor)\n if (new_rank%2 != 0) or (new_rank+1>=new_size):\n return []\n elif (new_rank+2+1>=new_size) and (new_rank+2<new_size):\n return [divisor*(new_rank+1),divisor*(new_rank+2)]\n else:\n return [divisor*(new_rank+1),]\n\n#------------------------------------------------------------------------------ \ndef get_dest(rank,size,rounds):\n SELF = -1\n divisor = np.power(2,rounds)\n new_rank = int(rank/divisor)\n new_size = int(size/divisor)\n if new_rank % 2 !=0:\n dest = divisor*(new_rank-1)\n return dest\n elif (new_rank + 1) >= new_size:\n dest = divisor*(new_rank-2)\n return dest if dest >=0 else 0 \n else:\n return SELF\n#------------------------------------------------------------------------------\ndef splits(filenum,size):\n assigned = 0\n sendcounts = np.zeros(size) \n disp = np.zeros(size)\n for i in range(size): \n nxt_sz = int((filenum-assigned)/(size-i))\n disp[i] = assigned\n sendcounts[i] = nxt_sz\n assigned = assigned + nxt_sz\n return tuple(sendcounts),tuple(disp)\n\n#------------------------------------------------------------------------------\ndef get_output_name(localname,rcvname):\n if rcvname is None:\n return localname\n start = localname.split('_')[0]\n end = rcvname[-1].split('_')[-1]\n return start+'_'+end\n\n\n###############################################################################\n\n\n \nparser = argparse.ArgumentParser(description='cloudmerge-hpc')\nparser.add_argument('-i',required=True,help='input file directory path',dest='input',metavar='/home/ubuntu/cloudmerge/input/')\nparser.add_argument('-o',required=True,help='output file directory path',dest='output',metavar='/home/ubuntu/cloudmerge/output/')\nparser.add_argument('-n',required=True,help='input file number',dest='filenum',metavar='10',type=int)\nparser.add_argument('-l',required=False,default='1',help='lower boundary of chrmosomes',dest='lower_chr',metavar='1')\nparser.add_argument('-u',required=False,default='M',help='upper boundary of chromosomes',dest='upper_chr',metavar='M')\nparser.add_argument('-g',required=False,default=9,help='genotype column number',dest='gtype_col',metavar='9',type=int)\nparser.add_argument('-f',required=False,default='PASS',help='filter value',dest='filter',metavar='PASS')\nargs = parser.parse_args()\n#args = parser.parse_args('-i abc -o def -n 10 -l 1 -u 26 -g 9 -f PASS'.split())\n#input_path = '/home/ubuntu/cloudmerge/input/'\n#output_path = '/home/ubuntu/cloudmerge/output/'\n#input_path = '/Users/Xiaobo/Desktop/input/'\n#output_path = '/Users/Xiaobo/Desktop/output/'\ninput_path = args.input\noutput_path = args.output\nfilenum = args.filenum\nlower_chr = args.lower_chr\nupper_chr = args.upper_chr\nqfilter = args.filter\ngenotype_col = args.gtype_col\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\nhost = commands.getoutput(\"hostname\")\nrounds = 0\nsendcounts,disp = splits(filenum,size)\nif rank == 0:\n sendbuff = np.linspace(1,filenum,filenum)\n\nelse: \n sendbuff = None\n\nrcvbuff = np.zeros(int(sendcounts[rank])) \n \ncomm.Scatterv([sendbuff,sendcounts,disp,MPI.DOUBLE],rcvbuff,root=0)\n\n#local_input_files = map(lambda x: input_path+str(int(x))+'.bz2',rcvbuff)\n#for file in local_input_files:\n# print('unzipping files %s in rank %d' % (str(local_input_files),rank))\n# os.system('bunzip2 '+file)\n\nlocal_input_files = map(lambda x: input_path+str(int(x))+'.bz2',rcvbuff)\n#local_merged_files = \"_\".join(map(lambda x: str(int(x)),rcvbuff))\nlocal_merged_files = str(int(rcvbuff[0]))+'_'+str(int(rcvbuff[-1]))\nmerger = mm.multiway_merger(local_input_files, output_path+local_merged_files,lower_chr,upper_chr,qfilter,genotype_col,merge_type='vcf')\nmerger.start()\nprint('merged_files %s'%local_merged_files)\n\nwhile True:\n src = get_source(rank,size,rounds)\n# if len(src) == 0: #only when no source, we need a destination\n dest = get_dest(rank,size,rounds)\n rounds = rounds+1 \n if len(src) == 0:\n if rank > 0: \n comm.send(local_merged_files,dest=dest,tag=0)\n print('i am rank %d, host is %s, sent merged files is %s, source is %s, dest is %d' %(rank,host,local_merged_files,str(src),dest))\n break ## send the filename to dest process and quit\n elif len(src) == 1:\n local_files = [output_path+local_merged_files]\n rcv_merged_file = comm.recv(source=src[0],tag=0)\n local_files.extend([output_path+rcv_merged_file])\n# local_merged_files = '_'.join([local_merged_files,rcv_merged_file])\n local_merged_files = get_output_name(local_merged_files,[rcv_merged_file])\n print('i am rank %d, host is %s, local merged file is %s, src is %s, dest is %d' %(rank,host,local_merged_files,str(src),dest))\n else:\n local_files = [output_path+local_merged_files]\n rcv_merged_files = []\n for i,s in enumerate(src):\n print('i am rank %d, host is %s, src is %s, dest is %d' %(rank,host,s,dest))\n rcv_file = comm.recv(source=s,tag=0)\n local_files.extend([output_path+rcv_file])\n rcv_merged_files.extend([rcv_file])\n# local_merged_files = '_'.join([local_merged_files]+rcv_merged_files)\n local_merged_files = get_output_name(local_merged_files,rcv_merged_files)\n\n \n if rank == 0:\n src = get_source(rank,size,rounds)\n if len(src) == 0: #### the last merging step\n merger = mm.multiway_merger(local_files,output_path+local_merged_files,lower_chr,upper_chr,qfilter,genotype_col,False,merge_type='tped')\n merger.start()\n break;\n merger = mm.multiway_merger(local_files,output_path+local_merged_files,lower_chr,upper_chr,qfilter,genotype_col,merge_type='tped')\n merger.start()\n\n\n\n\n\n################################################################################ \n# if rank >0:\n# comm.send(local_merged_files,dest=dest,tag=0)\n# print('i am rank %d, host is %s, send local merged files is %s, source is %s, dest is %d' %(rank,host,local_merged_files,str(src),dest))\n \n \n \n#print('rank is %d, host is %s, data is %s' %(rank,host,str(rcvbuff))) \n# create numpy arrays to reduce\n\n#src = (np.arange(8) + rank*8).reshape(4,2)\n#dst = np.zeros_like(src)\n#\n#def myadd(xmem, ymem, dt):\n# x = np.frombuffer(xmem, dtype=src.dtype)\n# y = np.frombuffer(ymem, dtype=src.dtype)\n#\n# z = x + y\n#\n# print(\"Rank %d on host %s reducing %s (%s) and %s (%s), yielding %s\" % (rank, host, x, type(x), y, type(y), z))\n#\n# y[:] = z\n#\n#op = MPI.Op.Create(myadd)\n#\n#MPI.COMM_WORLD.Reduce(src, dst, op)\n#\n#if MPI.COMM_WORLD.rank == 0:\n# print(\"ANSWER: %s\" % dst)" ]
[ [ "numpy.zeros", "numpy.linspace", "numpy.power" ] ]