repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
SimonsRoad/UnDeepVO | [
"956598958e0dba4729a8af70ee7a4cdcc10f09ec",
"956598958e0dba4729a8af70ee7a4cdcc10f09ec"
] | [
"demo_odometry.py",
"generators_test.py"
] | [
"\n\"\"\"Example of pykitti.odometry usage.\"\"\"\nimport itertools\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport pykitti\n\n__author__ = \"Lee Clement\"\n__email__ = \"[email protected]\"\n\n# Change this to the directory where you store KITTI data\nbasedir = './data/dataset'\n\n# Specify the dataset to load\nsequence = '01'\n\n# Load the data. Optionally, specify the frame range to load.\n# Passing imformat='cv2' will convert images to uint8 and BGR for\n# easy use with OpenCV.\n# dataset = pykitti.odometry(basedir, sequence)\ndataset = pykitti.odometry(basedir, sequence, frames=range(0, 20, 5))\n\n# dataset.calib: Calibration data are accessible as a named tuple\n# dataset.timestamps: Timestamps are parsed into a list of timedelta objects\n# dataset.poses: Generator to load ground truth poses T_w_cam0\n# dataset.camN: Generator to load individual images from camera N\n# dataset.gray: Generator to load monochrome stereo pairs (cam0, cam1)\n# dataset.rgb: Generator to load RGB stereo pairs (cam2, cam3)\n# dataset.velo: Generator to load velodyne scans as [x,y,z,reflectance]\n\n# Grab some data\nsecond_pose = next(iter(itertools.islice(dataset.poses, 1, None)))\nfirst_gray = next(iter(dataset.gray))\nfirst_cam1 = next(iter(dataset.cam1))\nfirst_rgb = next(iter(dataset.rgb))\nfirst_cam2 = next(iter(dataset.cam2))\nthird_velo = next(iter(itertools.islice(dataset.velo, 2, None)))\n\n# Display some of the data\nnp.set_printoptions(precision=4, suppress=True)\nprint('\\nSequence: ' + str(dataset.sequence))\nprint('\\nFrame range: ' + str(dataset.frames))\n\n# print('\\nGray stereo pair baseline [m]: ' + str(dataset.calib.b_gray))\nprint('\\nRGB stereo pair baseline [m]: ' + str(dataset.calib.b_rgb))\n\nprint('\\nFirst timestamp: ' + str(dataset.timestamps[0]))\nprint('\\nSecond ground truth pose:\\n' + str(second_pose))\n\nf, ax = plt.subplots(2, 2, figsize=(15, 5))\nax[0, 0].imshow(first_gray[0], cmap='gray')\nax[0, 0].set_title('Left Gray Image (cam0)')\n\nax[0, 1].imshow(first_cam1, cmap='gray')\nax[0, 1].set_title('Right Gray Image (cam1)')\n\nax[1, 0].imshow(first_cam2)\nax[1, 0].set_title('Left RGB Image (cam2)')\n\nax[1, 1].imshow(first_rgb[1])\nax[1, 1].set_title('Right RGB Image (cam3)')\n\nf2 = plt.figure()\nax2 = f2.add_subplot(111, projection='3d')\n# Plot every 100th point so things don't get too bogged down\nvelo_range = range(0, third_velo.shape[0], 10)\nax2.scatter(third_velo[velo_range, 0],\n third_velo[velo_range, 1],\n third_velo[velo_range, 2],\n c=third_velo[velo_range, 3],\n cmap='gray',\n s=0.1)\nax2.axis('equal')\nax2.set_title('Third Velodyne scan (subsampled)')\n\nplt.show()\n\n\n\n",
"from image_loader import get_stereo_image_generators\nimport matplotlib.pyplot as plt\n\nimage_generator = get_stereo_image_generators('data/dataset/sequences/02/', batch_size=1, shuffle=True)\n\nimg = image_generator.__next__()\nplt.imshow(img[0][0, :, :, :])\nplt.show()\n\nimg = image_generator.__next__()\nplt.imshow(img[0][0, :, :, :])\nplt.show()\n\nimg = image_generator.__next__()\nplt.imshow(img[0][0, :, :, :])\nplt.show()\n\n"
] | [
[
"numpy.set_printoptions",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thouska/SALib | [
"5deeaf316ef58ea0a26295c8ad2ca57cdc739d45",
"fd64136192f00a9e3e65a8c5c05e30d93ed5e750"
] | [
"tests/test_test_functions.py",
"tests/test_regression.py"
] | [
"from nose.tools import assert_almost_equal, assert_equal, raises\n\nfrom numpy.testing import assert_allclose\nimport numpy as np\n\nfrom SALib.test_functions.Sobol_G import evaluate, total_variance, \\\n partial_first_order_variance, \\\n sensitivity_index, \\\n total_sensitivity_index\n\ndef test_Sobol_G():\n '''\n '''\n parameter_values = np.zeros((1, 8))\n actual = evaluate(parameter_values)\n expected = np.array([4.0583])\n assert_allclose(actual, expected, atol=1e-4, rtol=1e-4)\n\n\n@raises(ValueError)\ndef test_Sobol_G_raises_error_if_values_wrong_size():\n \"\"\"\n Tests that a value error is raised if the Sobol G function is called with\n the wrong number of variables\n \"\"\"\n a = [1, 2, 3, 4, 5, 6, 7, 8]\n evaluate(np.array([1, 2, 3, 4, 5, 6, 7]), a)\n\n\n@raises(ValueError)\ndef test_Sobol_G_raises_error_if_values_gt_one():\n \"\"\"\n Tests that a value error is raised if the Sobol G function is called with\n values greater than one\n \"\"\"\n evaluate(np.array([0, 1, .02, 0.23, 1.234, 0.02848848, 0, 0.78]))\n\n\n@raises(ValueError)\ndef test_Sobol_G_raises_error_if_values_lt_zero():\n \"\"\"\n Tests that a value error is raised if the Sobol G function is called with\n values less than zero.\n \"\"\"\n evaluate(np.array([0, -1, -.02, 1, 1, -0.1, -0, -12]))\n\n\n@raises(TypeError)\ndef test_Sobol_G_raises_error_if_values_not_numpy_array():\n \"\"\"\n Tests that a type error is raised if the Sobol G function is called with\n values argument not as a numpy array.\n \"\"\"\n fixture = [list(range(8)), str(12345678)]\n for x in fixture:\n evaluate(x)\n\n\ndef test_total_variance():\n\n a = np.array([78, 12, 0.5, 2, 97, 33])\n actual = total_variance(a)\n expected = 0.19347\n\n assert_allclose(actual, expected, rtol=1e-4)\n\n\ndef test_partial_first_order_variance():\n\n a = np.array([78, 12, 0.5, 2, 97, 33])\n actual = partial_first_order_variance(a)\n expected = (len(a),)\n\n assert_equal(a.shape, expected)\n\n expected = np.array([0.000053, 0.001972, 0.148148, 0.037037, 0.000035, 0.000288])\n\n assert_allclose(actual, expected, atol=1e-4, rtol=1e-4)\n\n\ndef test_sensitivity_index():\n a = np.array([78, 12, 0.5, 2, 97, 33])\n actual = sensitivity_index(a)\n expected = np.array([0.000276, 0.010195, 0.765743,\n 0.191436, 0.000179, 0.001490])\n assert_allclose(actual, expected, atol=1e-2, rtol=1e-6)\n\n\ndef test_total_sensitivity_index():\n a = np.array([78, 12, 0.5, 2, 97, 33])\n\n actual = total_sensitivity_index(a)\n\n expected = np.array([0.030956547, 0.040875287, 0.796423551,\n 0.222116249, 0.030859879, 0.032170899])\n\n assert_allclose(actual, expected, atol=1e-2, rtol=1e-6)\n",
"from __future__ import division\n\nfrom numpy.testing import assert_allclose\n\nfrom SALib.analyze import delta\nfrom SALib.analyze import dgsm\nfrom SALib.analyze import fast\nfrom SALib.analyze import rbd_fast\nfrom SALib.analyze import sobol\nfrom SALib.sample import fast_sampler\nfrom SALib.sample import finite_diff\nfrom SALib.sample import latin\nfrom SALib.sample import saltelli\nimport numpy as np\n\nfrom SALib.analyze import morris\nfrom SALib.sample.morris import sample\nfrom SALib.test_functions import Ishigami\nfrom SALib.util import read_param_file\n\nfrom pytest import fixture\n\n\n@fixture(scope='function')\ndef set_seed():\n \"\"\"Sets seeds for random generators so that tests can be repeated\n\n It is necessary to set seeds for both the numpy.random, and\n the stdlib.random libraries.\n \"\"\"\n seed = 123456\n np.random.seed(seed)\n\n\nclass TestMorris:\n\n def test_regression_morris_vanilla(self, set_seed):\n set_seed\n param_file = 'SALib/test_functions/params/Ishigami.txt'\n problem = read_param_file(param_file)\n param_values = sample(problem=problem, N=10000,\n num_levels=4, grid_jump=2,\n optimal_trajectories=None)\n\n Y = Ishigami.evaluate(param_values)\n\n Si = morris.analyze(problem, param_values, Y,\n conf_level=0.95, print_to_console=False,\n num_levels=4, grid_jump=2)\n\n assert_allclose(Si['mu_star'], [7.701555, 7.875, 6.288788],\n atol=0, rtol=1e-5)\n\n def test_regression_morris_groups(self, set_seed):\n set_seed\n param_file = 'SALib/test_functions/params/Ishigami_groups.txt'\n problem = read_param_file(param_file)\n\n param_values = sample(problem=problem, N=10000,\n num_levels=4, grid_jump=2,\n optimal_trajectories=None)\n\n Y = Ishigami.evaluate(param_values)\n\n Si = morris.analyze(problem, param_values, Y,\n conf_level=0.95, print_to_console=False,\n num_levels=4, grid_jump=2)\n\n assert_allclose(Si['mu_star'], [7.610322, 10.197014],\n atol=0, rtol=1e-5)\n\n def test_regression_morris_groups_brute_optim(self, set_seed):\n\n set_seed\n param_file = 'SALib/test_functions/params/Ishigami_groups.txt'\n problem = read_param_file(param_file)\n\n param_values = sample(problem=problem, N=50,\n num_levels=4, grid_jump=2,\n optimal_trajectories=6,\n local_optimization=False)\n\n Y = Ishigami.evaluate(param_values)\n\n Si = morris.analyze(problem, param_values, Y,\n conf_level=0.95, print_to_console=False,\n num_levels=4, grid_jump=2)\n\n assert_allclose(Si['mu'], [9.786986, np.NaN],\n atol=0, rtol=1e-5)\n\n assert_allclose(Si['sigma'], [6.453729, np.NaN],\n atol=0, rtol=1e-5)\n\n assert_allclose(Si['mu_star'], [9.786986, 7.875],\n atol=0, rtol=1e-5)\n\n def test_regression_morris_groups_local_optim(self, set_seed):\n set_seed\n param_file = 'SALib/test_functions/params/Ishigami_groups.txt'\n problem = read_param_file(param_file)\n\n param_values = sample(problem=problem, N=500,\n num_levels=4, grid_jump=2,\n optimal_trajectories=20,\n local_optimization=True)\n\n Y = Ishigami.evaluate(param_values)\n\n Si = morris.analyze(problem, param_values, Y,\n conf_level=0.95, print_to_console=False,\n num_levels=4, grid_jump=2)\n\n assert_allclose(Si['mu_star'],\n [13.95285, 7.875],\n rtol=1e-5)\n\n def test_regression_morris_optimal(self, set_seed):\n '''\n Tests the use of optimal trajectories with Morris.\n\n Uses brute force approach\n\n Note that the relative tolerance is set to a very high value\n (default is 1e-05) due to the coarse nature of the num_levels\n and grid_jump.\n '''\n set_seed\n param_file = 'SALib/test_functions/params/Ishigami.txt'\n problem = read_param_file(param_file)\n param_values = sample(problem=problem, N=20,\n num_levels=4, grid_jump=2,\n optimal_trajectories=9,\n local_optimization=False)\n\n Y = Ishigami.evaluate(param_values)\n\n Si = morris.analyze(problem, param_values, Y,\n conf_level=0.95, print_to_console=False,\n num_levels=4, grid_jump=2)\n\n assert_allclose(Si['mu_star'],\n [9.786986e+00, 7.875000e+00, 2.984617e-12],\n atol=0,\n rtol=1e-5)\n\n\ndef test_regression_sobol():\n param_file = 'SALib/test_functions/params/Ishigami.txt'\n problem = read_param_file(param_file)\n param_values = saltelli.sample(problem, 10000, calc_second_order=True)\n\n Y = Ishigami.evaluate(param_values)\n\n Si = sobol.analyze(problem, Y,\n calc_second_order=True, conf_level=0.95,\n print_to_console=False)\n\n assert_allclose(Si['S1'], [0.31, 0.44, 0.00], atol=5e-2, rtol=1e-1)\n assert_allclose(Si['ST'], [0.55, 0.44, 0.24], atol=5e-2, rtol=1e-1)\n assert_allclose([Si['S2'][0][1], Si['S2'][0][2], Si['S2'][1][2]], [\n 0.00, 0.25, 0.00], atol=5e-2, rtol=1e-1)\n\n\ndef test_regression_sobol_parallel():\n param_file = 'SALib/test_functions/params/Ishigami.txt'\n problem = read_param_file(param_file)\n param_values = saltelli.sample(problem, 10000, calc_second_order=True)\n\n Y = Ishigami.evaluate(param_values)\n\n Si = sobol.analyze(problem, Y,\n calc_second_order=True, parallel=True,\n conf_level=0.95, print_to_console=False)\n\n assert_allclose(Si['S1'], [0.31, 0.44, 0.00], atol=5e-2, rtol=1e-1)\n assert_allclose(Si['ST'], [0.55, 0.44, 0.24], atol=5e-2, rtol=1e-1)\n assert_allclose([Si['S2'][0][1], Si['S2'][0][2], Si['S2'][1][2]], [\n 0.00, 0.25, 0.00], atol=5e-2, rtol=1e-1)\n\n\ndef test_regression_sobol_groups():\n problem = {\n 'num_vars': 3,\n 'names': ['x1', 'x2', 'x3'],\n 'bounds': [[-np.pi, np.pi]] * 3,\n 'groups': ['G1', 'G2', 'G1']\n }\n param_values = saltelli.sample(problem, 10000, calc_second_order=True)\n\n Y = Ishigami.evaluate(param_values)\n Si = sobol.analyze(problem, Y,\n calc_second_order=True, parallel=True,\n conf_level=0.95, print_to_console=False)\n\n assert_allclose(Si['S1'], [0.55, 0.44], atol=5e-2, rtol=1e-1)\n assert_allclose(Si['ST'], [0.55, 0.44], atol=5e-2, rtol=1e-1)\n assert_allclose(Si['S2'][0][1], [0.00], atol=5e-2, rtol=1e-1)\n\n\ndef test_regression_sobol_groups_dists():\n problem = {\n 'num_vars': 3,\n 'names': ['x1', 'x2', 'x3'],\n 'bounds': [[-np.pi, np.pi], [1.0, 0.2], [3, 0.5]],\n 'groups': ['G1', 'G2', 'G1'],\n 'dists': ['unif', 'lognorm', 'triang']\n }\n param_values = saltelli.sample(problem, 10000, calc_second_order=True)\n\n Y = Ishigami.evaluate(param_values)\n Si = sobol.analyze(problem, Y,\n calc_second_order=True, parallel=True,\n conf_level=0.95, print_to_console=False)\n\n assert_allclose(Si['S1'], [0.427, 0.573], atol=5e-2, rtol=1e-1)\n assert_allclose(Si['ST'], [0.428, 0.573], atol=5e-2, rtol=1e-1)\n assert_allclose(Si['S2'][0][1], [0.001], atol=5e-2, rtol=1e-1)\n\n\ndef test_regression_fast():\n param_file = 'SALib/test_functions/params/Ishigami.txt'\n problem = read_param_file(param_file)\n param_values = fast_sampler.sample(problem, 10000)\n\n Y = Ishigami.evaluate(param_values)\n\n Si = fast.analyze(problem, Y, print_to_console=False)\n assert_allclose(Si['S1'], [0.31, 0.44, 0.00], atol=5e-2, rtol=1e-1)\n assert_allclose(Si['ST'], [0.55, 0.44, 0.24], atol=5e-2, rtol=1e-1)\n\n\ndef test_regression_rbd_fast():\n param_file = 'SALib/test_functions/params/Ishigami.txt'\n problem = read_param_file(param_file)\n param_values = latin.sample(problem, 10000)\n\n Y = Ishigami.evaluate(param_values)\n\n Si = rbd_fast.analyze(problem, Y, param_values, print_to_console=False)\n assert_allclose(Si['S1'], [0.31, 0.44, 0.00], atol=5e-2, rtol=1e-1)\n\n\ndef test_regression_dgsm():\n param_file = 'SALib/test_functions/params/Ishigami.txt'\n problem = read_param_file(param_file)\n param_values = finite_diff.sample(problem, 10000, delta=0.001)\n\n Y = Ishigami.evaluate(param_values)\n\n Si = dgsm.analyze(problem, param_values, Y,\n conf_level=0.95, print_to_console=False)\n\n assert_allclose(Si['dgsm'], [2.229, 7.066, 3.180], atol=5e-2, rtol=1e-1)\n\n\ndef test_regression_delta():\n param_file = 'SALib/test_functions/params/Ishigami.txt'\n problem = read_param_file(param_file)\n param_values = latin.sample(problem, 10000)\n\n Y = Ishigami.evaluate(param_values)\n\n Si = delta.analyze(problem, param_values, Y, num_resamples=10,\n conf_level=0.95, print_to_console=True)\n\n assert_allclose(Si['delta'], [0.210, 0.358, 0.155], atol=5e-2, rtol=1e-1)\n assert_allclose(Si['S1'], [0.31, 0.44, 0.00], atol=5e-2, rtol=1e-1)\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.testing.assert_allclose"
],
[
"numpy.random.seed",
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dineshpinto/nft_analytics | [
"99fd4adbfe786f4de6fa2a6a0c5e8a58eaaf338a"
] | [
"src/nft_analytics.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nMIT License\n\nCopyright (c) 2021 Dinesh Pinto\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport json\nimport logging\nimport os\nimport sys\nfrom json import JSONDecodeError\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom .infura_api import InfuraAPI\nfrom .opensea_api import OpenSeaAPI\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n stream=sys.stdout, level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass NFTAnalytics(OpenSeaAPI):\n def __init__(self, asset_contract_address: str):\n super().__init__(asset_contract_address)\n self.eth_api = InfuraAPI()\n\n @staticmethod\n def make_directories(folder_name: str):\n \"\"\" Set up directories for data and results if they don't exist. \"\"\"\n data_folder = os.path.join(\"data\", folder_name)\n result_folder = os.path.join(\"results\", folder_name)\n\n if not os.path.isdir(data_folder):\n logger.info(f\"Making directoy {data_folder}\")\n os.makedirs(data_folder)\n\n if not os.path.isdir(result_folder):\n logger.info(f\"Making directoy {result_folder}\")\n os.makedirs(result_folder)\n\n return data_folder, result_folder\n\n def fetch_data(self, max_offset: int = 10000, collection: str = None) -> list:\n \"\"\"\n Query OpenSea API for collection data, offset is shifted until max\n offset is reached (i.e. number of items in a collection).\n \"\"\"\n local_assets = []\n\n pbar = tqdm(range(0, max_offset + 1, 50))\n for offset in pbar:\n pbar.set_description(f\"{offset}\")\n try:\n asset_data = self.get_asset_data(offset=offset, limit=50, collection=collection)\n except JSONDecodeError:\n logger.error(f\"Only fetched data till offset={offset - 1}. \"\n f\"Warning={self.get_asset_data(offset=offset, limit=50)}\")\n return local_assets\n\n if \"assets\" not in asset_data:\n logger.error(f\"Only fetched data till offset={offset - 1}. Warning={asset_data}\")\n return local_assets\n\n for asset in asset_data[\"assets\"]:\n local_assets.append(asset)\n\n return local_assets\n\n def fetch_events(self, max_offset: int = 10000) -> list:\n \"\"\"\n Query OpenSea API for event data, offset is shifted until max\n offset is reached (i.e. number of items in a collection).\n \"\"\"\n local_events = []\n\n pbar = tqdm(range(0, max_offset + 1, 300))\n for offset in pbar:\n pbar.set_description(f\"{offset}\")\n try:\n event_data = self.get_event_data(offset=offset, limit=300)\n except JSONDecodeError:\n logger.error(f\"Only fetched data till offset={offset - 1}. \"\n f\"Warning={self.get_asset_data(offset=offset, limit=50)}\")\n return local_events\n\n if \"asset_events\" not in event_data:\n logger.error(f\"Only fetched data till offset={offset - 1}. Warning={event_data}\")\n return local_events\n\n for event in event_data[\"asset_events\"]:\n local_events.append(event)\n\n return local_events\n\n @staticmethod\n def save_json(asset_data: list, filename: str = \"data.json\"):\n with open(filename, 'w', encoding='utf-8') as f:\n json.dump(asset_data, f, ensure_ascii=False, indent=4)\n logger.info(f\"Saved asset data to {filename}\")\n\n @staticmethod\n def load_json(filename: str = \"data.json\") -> list:\n with open(filename) as f:\n asset_data = json.load(f)\n\n return asset_data\n\n @staticmethod\n def get_trait_values_for_type(asset_data: list, trait_type: str) -> list:\n \"\"\" Get all possible values of traits for a specific type of trait. \"\"\"\n trait_values = []\n for asset in asset_data:\n for traits in asset[\"traits\"]:\n if traits[\"trait_type\"] == trait_type and traits[\"value\"] not in trait_values:\n trait_values.append(traits[\"value\"])\n\n return trait_values\n\n def get_trait_type_median_price(self, asset_data: list, trait_type: str) -> dict:\n \"\"\" Get the median price of a specific trait type. \"\"\"\n trait_value_prices = {}\n for value in self.get_trait_values_for_type(asset_data, trait_type):\n listing_prices_trait = []\n\n for asset in asset_data:\n if asset[\"sell_orders\"]:\n for traits in asset[\"traits\"]:\n if traits[\"trait_type\"] == trait_type and traits[\"value\"] == value:\n listing_prices_trait.append(float(asset[\"sell_orders\"][0][\"base_price\"]) / 1e18)\n\n trait_value_prices[value] = np.median(np.array(listing_prices_trait))\n\n return dict(sorted(trait_value_prices.items(), key=lambda item: item[1], reverse=True))\n\n def get_median_prices(self, asset_data: list, traits_dict: dict) -> np.ndarray:\n \"\"\" Get median prices of all trait types. \"\"\"\n median_prices = []\n for trait_type, trait_value in traits_dict.items():\n median_prices.append(self.get_trait_type_median_price(asset_data, trait_type)[trait_value])\n\n return np.array(median_prices)\n\n def get_traits_with_median_prices(self, asset_data: list, asset: dict) -> dict:\n \"\"\" Get median prices of trait types for specific asset. \"\"\"\n traits = {}\n for trait in asset[\"traits\"]:\n traits[trait[\"trait_type\"]] = trait[\"value\"]\n\n trait_prices = {}\n\n for trait_type, trait_value in traits.items():\n price = self.get_trait_type_median_price(asset_data, trait_type)[trait_value]\n trait_prices[trait_value + \" \" + trait_type] = price\n\n return trait_prices\n\n def get_nft_holdings(self, asset_data: list, asset_name: str, eth_balances: bool = True) \\\n -> pd.DataFrame:\n \"\"\" Query the number of NFTs held and/or the ETH balances of addresses in a collection. \"\"\"\n nfts_held = {}\n\n for asset in asset_data:\n nfts_held[asset[\"owner\"][\"address\"]] = 0\n\n for asset in asset_data:\n nfts_held[asset[\"owner\"][\"address\"]] += 1\n\n logger.info(f\"Total NFTs in collection = {sum(nfts_held.values())}\")\n\n if eth_balances:\n logger.info(f\"Getting NFT holdings and ETH balances...\")\n df = pd.DataFrame(columns=[\"Address\", asset_name, \"ETH_balance\"])\n\n pbar = tqdm(nfts_held.items())\n\n for idx, (address, num_nfts) in enumerate(pbar):\n pbar.set_description(f\"{idx}\")\n df.loc[idx] = [address, num_nfts, self.eth_api.get_eth_balance(address)]\n else:\n logger.info(f\"Getting NFT holdings...\")\n df = pd.DataFrame(columns=[\"Address\", asset_name])\n\n pbar = tqdm(nfts_held.items())\n\n for idx, (address, num_nfts) in enumerate(pbar):\n pbar.set_description(f\"{idx}\")\n df.loc[idx] = [address, num_nfts]\n\n etherscan_links = []\n for address in df[\"Address\"]:\n etherscan_links.append(f\"https://etherscan.io/address/{address}\")\n df[\"Etherscan_link\"] = etherscan_links\n\n opensea_links = []\n for address in df[\"Address\"]:\n opensea_links.append(f\"https://opensea.io/{address}\")\n df[\"OpenSea_link\"] = opensea_links\n\n return df\n\n @staticmethod\n def calculate_rarity_df(asset_data: list, items_in_collection: int) -> pd.DataFrame:\n \"\"\"\n Calculate rarity of a particular trait.\n\n Uses the formula from rarity tools, full article at:\n raritytools.medium.com/ranking-rarity-understanding-rarity-calculation-methods-86ceaeb9b98c\n\n Formula:\n [Rarity Score for a Trait Value] =\n 1 / ([Number of Items with that Trait Value] / [Total Number of Items in Collection])\n\n The total Rarity Score for an NFT is the sum of the Rarity Score of all of its trait values.\n \"\"\"\n df = pd.DataFrame(columns=[\"Name\", \"Price\", \"Rarity\", \"RarityPriceRatio\"])\n\n for idx, asset in enumerate(asset_data):\n if asset[\"sell_orders\"]:\n if asset[\"sell_orders\"][0][\"payment_token_contract\"][\"symbol\"] == \"ETH\":\n price = float(asset[\"sell_orders\"][0][\"current_price\"]) / 1e18\n if price != 0:\n rarity = 0\n for trait in asset[\"traits\"]:\n trait_count = int(trait[\"trait_count\"])\n if trait_count != 0:\n rarity += 1 / (trait_count / items_in_collection)\n name = asset[\"name\"]\n df.loc[idx] = [name, price, rarity, rarity / price]\n\n return df\n"
] | [
[
"numpy.array",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
smsaladi/fixthejet | [
"b3089e6ee8cf2afbf24251de47702e0b1446eb73"
] | [
"colors_from_mpl.py"
] | [
"\"\"\"\nWrites out hex colors from color scales provided in matplotlib\ninto JS file\n\npython colors_from_mpl.py >> js/colorscales.js\n\"\"\"\n\nimport itertools\nimport json\n\nimport numpy as np\nimport matplotlib.colors\nimport matplotlib.cm\n\n# Have colormaps separated into categories:\n# http://matplotlib.org/examples/color/colormaps_reference.html\ncmap_names = [\n ('Perceptually Uniform Sequential', [\n 'viridis', 'plasma', 'inferno', 'magma']),\n ('Sequential', [\n 'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',\n 'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',\n 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']),\n ('Sequential (2)', [\n 'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink',\n 'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia',\n 'hot', 'afmhot', 'gist_heat', 'copper']),\n ('Diverging', [\n 'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',\n 'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic']),\n ('Qualitative', [\n 'Pastel1', 'Pastel2', 'Paired', 'Accent',\n 'Dark2', 'Set1', 'Set2', 'Set3',\n 'tab10', 'tab20', 'tab20b', 'tab20c']),\n ('Miscellaneous', [\n 'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern',\n 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'brg', 'hsv',\n 'gist_rainbow', 'rainbow', 'jet', 'nipy_spectral', 'gist_ncar'])\n ]\n\ncm_names = [cat[1] for cat in cmap_names]\n\nprint(\"var mpl_scales = {\")\n\nfor name in itertools.chain.from_iterable(cm_names):\n cmap = matplotlib.cm.get_cmap(name)\n values = np.linspace(0, 1, cmap.N)\n rgba = cmap(values)\n hex = np.apply_along_axis(matplotlib.colors.rgb2hex, axis=1, arr=rgba)\n print(' \"{}\": {},\\n'.format(name, json.dumps(hex.tolist())))\n\nprint(\"};\")\n"
] | [
[
"numpy.apply_along_axis",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Wesley-Tse/Road-Detection | [
"c3b444287d9b41ccc4234e737e4421b5d1b3c3da"
] | [
"train.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @author: Wesley\n# @time: 2020-12-11 10:47\n\nimport os\nimport time\nimport torch\nfrom torch import nn\nfrom models.dinknet34 import DinkNet34\nfrom loss import dice_bce_loss\nfrom models.unet import UNet\nfrom dataset import MyDataset\nfrom torch.utils.data import DataLoader\n\nimg_path = r'E:\\PyCharmProject\\datasets\\5k\\train_set\\JPEGImages'\nmask_path = r'E:\\PyCharmProject\\datasets\\5k\\train_set\\SegmentationClass'\nval_img_path = r'E:\\PyCharmProject\\datasets\\5k\\validate_set\\JPEGImages'\nval_mask_path = r'E:\\PyCharmProject\\datasets\\5k\\validate_set\\SegmentationClass'\nlog = './dinknet.txt'\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nbatch_size_per = 16\nbatch_size = batch_size_per * torch.cuda.device_count()\nepoch_limit = 10\nnet = DinkNet34().to(device)\nnet = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\n\nweight = r'E:\\PyCharmProject\\Road-Detection\\weights\\dinknet34.pt'\n# if os.path.exists(weight):\n # net.load_state_dict(torch.load(weight))\n\ntrain_dataset = MyDataset(img_path, mask_path)\nval_dataset = MyDataset(val_img_path, val_mask_path)\ntrain_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\nval_dataloader = DataLoader(val_dataset, batch_size=batch_size)\n\nadam = torch.optim.Adam(net.parameters(), lr=2e-4)\nsgd = torch.optim.SGD(net.parameters(), lr=0.01, momentum=0.9)\n\nloss_fun = dice_bce_loss()\n\nif __name__ == '__main__':\n\n epoch = 1\n log = open(log, 'w', encoding='utf-8')\n log.write('epoch' + '\\t' + 'loss' + '\\t' + 'pa' + '\\t' + 'iou' + '\\t' + 'precision' + '\\n')\n log.flush()\n while epoch < 300:\n s_time = time.time()\n print('epoch - {} - training'.format(epoch))\n net.train()\n TP = FP = TN = FN = 0\n pa = 0\n iou = 0\n stop = 0\n flag = 0\n train_loss = 0\n batch = len(train_dataloader)\n for i, (img, mask) in enumerate(train_dataloader):\n img = img.to(device)\n mask = mask.to(device)\n out = net(img)\n loss = loss_fun(mask, out)\n\n adam.zero_grad()\n loss.backward()\n adam.step()\n\n if i % 10 == 0:\n print('{}: {}/{} - loss: {}'.format(epoch, i, batch, loss.item()))\n # torch.save(net.state_dict(), weight)\n # print('save success')\n train_loss += loss.item()\n epoch_loss = train_loss / len(train_dataloader)\n\n e_time = time.time()\n print('epoch - {} - epoch_loss: {}'.format(epoch, epoch_loss))\n print('total-time: ', e_time - s_time)\n print('epoch - {} - evaluating'.format(epoch))\n\n net.eval()\n for img, mask in val_dataloader:\n img = img.to(device)\n mask = mask.to(device)\n with torch.no_grad():\n pred = net(img)\n pred[pred >= 0.5] = 1\n pred[pred < 0.5] = 0\n\n TP += ((pred == 1) & (mask == 1)).cpu().sum().item()\n TN += ((pred == 0) & (mask == 0)).cpu().sum().item()\n FN += ((pred == 0) & (mask == 1)).cpu().sum().item()\n FP += ((pred == 1) & (mask == 0)).cpu().sum().item()\n\n pa = (TP + TN) / (TP + TN + FP + FN)\n precision = TP / (TP + FN)\n iou = TP / (TP + FP + FN)\n\n print('pa: ', pa)\n print('iou: ', iou)\n print('precision', precision)\n log.write(\n str(epoch) + '\\t' + str(epoch_loss) + '\\t' + str(pa) + '\\t' + str(iou) + '\\t' + str(precision) + '\\n')\n log.flush()\n\n if iou > stop:\n stop = iou\n torch.save(net.state_dict(), weight)\n print(\"save success,iou updated to: {}\".format(iou))\n flag = 0\n else:\n flag += 1\n print(\"pa为{},没有提升,参数未更新,iou为{},第{}次未更新\".format(iou, stop, flag))\n if flag >= epoch_limit:\n print(\"early stop at epoch {}, finally iou: {}\".format(epoch, stop))\n break\n epoch += 1\n log.close()\n"
] | [
[
"torch.no_grad",
"torch.cuda.device_count",
"torch.utils.data.DataLoader",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
p5a0u9l/clamm | [
"a41ce2526e9792ce08263bf27eb9c417608d1f5d"
] | [
"clamm/streams/plot_big_stft.py"
] | [
"\"\"\"\nConvert a large audio wav file (album length, i.e. > 30 minutes typically)\ninto a series of videos consisting of the audio synchronized with images of the\nspectrogram.\n\"\"\"\nimport os\nimport sys\nimport multiprocessing as mp\nimport subprocess\n\nimport tqdm\nimport numpy as np\nimport librosa.core\nimport librosa.display\nimport librosa.feature\nimport matplotlib.pyplot as plt\nplt.switch_backend(\"agg\")\n\nSAMPLERATE = 44.1e3 # samples/sec\nWAVPATH = sys.argv[1]\nBASENAME = os.path.basename(WAVPATH).replace(\".wav\", \"\")\nROOT = \"/mnt/nfs-share/music/data\"\nFRAMEROOT = ROOT + \"/frames/\" + BASENAME\nDURATION = 20 #\nNUMPROC = 8\nFFTFREQ = librosa.fft_frequencies(sr=SAMPLERATE)\nF_MAX = np.max(FFTFREQ)\nN_FFT = 2048\nN_HOP = int(1.0 / 4 * N_FFT)\nFILETIME = librosa.core.get_duration(filename=WAVPATH)\nNFRAME = int(FILETIME) / DURATION # allow truncation\nDUMPFILE = \"data.npy\"\nFPS = 5\n\n\ndef single_image(argtuple):\n y, i_frame, i_second = argtuple\n fractional_second = float(i_second) / FPS\n abs_index = i_frame * DURATION * FPS + i_second\n time = DURATION*i_frame + fractional_second\n titlestr = \"%s - file time %0.2f seconds\" % (BASENAME, time)\n\n # display the spectrogram\n plt.figure(figsize=(18, 8))\n librosa.display.specshow(\n y, x_axis='time', y_axis='mel', sr=SAMPLERATE, hop_length=N_HOP)\n\n plt.vlines(\n fractional_second, 0, F_MAX,\n linestyles='dashed', colors='w', alpha=0.6)\n\n plt.title(titlestr)\n plt.savefig(FRAMEROOT + \"/%05d.png\" % (abs_index))\n plt.tight_layout()\n plt.close()\n\n\ndef main():\n \"\"\" main\n \"\"\"\n\n pbar = tqdm.tqdm(total=NFRAME)\n pool = mp.Pool(NUMPROC)\n init = False\n if not os.path.exists(FRAMEROOT):\n os.makedirs(FRAMEROOT)\n\n for i_frame in range(10, NFRAME):\n # load the audio\n x, sr = librosa.core.load(\n WAVPATH, sr=SAMPLERATE,\n offset=DURATION * i_frame, duration=DURATION)\n\n # compute the spectrogram\n x = librosa.power_to_db(\n librosa.feature.melspectrogram(\n y=x, hop_length=N_HOP, n_fft=N_FFT, sr=SAMPLERATE),\n ref=np.max)\n\n if not init:\n f_mean = np.sum(x, axis=1)\n init = True\n else:\n f_mean += np.sum(x, axis=1)\n\n # loop updates\n pbar.update(1)\n pool.map(\n single_image,\n [(x, i_frame, i_second) for i_second in range(FPS*DURATION)])\n\n np.save(BASENAME + 'f_mean.npy', f_mean)\n pbar.close()\n\n subprocess.call([\n \"ffmpeg\", '-r', '5', '-i', FRAMEROOT + '%05d.png', '-i', WAVPATH,\n '-shortest', '-c:v', 'libx264', '-c:a', 'aac', '-strict', '-2',\n '-pix_fmt', 'yuv420p', '-crf', '23', '-r', '5', '-y',\n ROOT + \"/videos/\" + BASENAME + '.mp4'])\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.switch_backend",
"matplotlib.pyplot.savefig",
"numpy.save",
"numpy.max",
"matplotlib.pyplot.close",
"matplotlib.pyplot.vlines",
"numpy.sum",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mbooali/introduction-to-machine | [
"3f75f9897f1f63f07bb6eace312fa35e16786623"
] | [
"mglearn/plot_animal_tree.py"
] | [
"from imageio import imread\nimport matplotlib.pyplot as plt\n\n\ndef plot_animal_tree(ax=None):\n import graphviz\n if ax is None:\n ax = plt.gca()\n mygraph = graphviz.Digraph(node_attr={'shape': 'box'},\n edge_attr={'labeldistance': \"10.5\"},\n format=\"png\")\n mygraph.node(\"0\", \"Has feathers?\")\n mygraph.node(\"1\", \"Can fly?\")\n mygraph.node(\"2\", \"Has fins?\")\n mygraph.node(\"3\", \"Hawk\")\n mygraph.node(\"4\", \"Penguin\")\n mygraph.node(\"5\", \"Dolphin\")\n mygraph.node(\"6\", \"Bear\")\n mygraph.edge(\"0\", \"1\", label=\"True\")\n mygraph.edge(\"0\", \"2\", label=\"False\")\n mygraph.edge(\"1\", \"3\", label=\"True\")\n mygraph.edge(\"1\", \"4\", label=\"False\")\n mygraph.edge(\"2\", \"5\", label=\"True\")\n mygraph.edge(\"2\", \"6\", label=\"False\")\n mygraph.render(\"tmp\")\n ax.imshow(imread(\"tmp.png\"))\n ax.set_axis_off()\n"
] | [
[
"matplotlib.pyplot.gca"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Shihao-Feng-98/RRP_Hopper_simulation | [
"444dbcce90d5ffb6bf577ed03adc9717183e21ae"
] | [
"controller.py"
] | [
"'''\nonly for RRP Hopper\nShihao Feng\n2021.10.28\n'''\n\nimport numpy as np\nimport pybullet as p\nfrom leg_kinematics import LegKinematicsRRP\nimport pinocchio as pin\n\nclass JointPDController(object):\n def __init__ (self):\n self.kp = np.array([70, 70, 1500])\n self.kd = np.array([2, 2, 10])\n\n def solve(self, q_d, dq_d, q_state, dq_state):\n q = q_state[7:10]\n dq = dq_state[6:9]\n ddq_d = np.zeros(3) # 期望加速度计算量大,简单地设为0\n tau_d = ddq_d + self.kd*(dq_d - dq) + self.kp*(q_d - q) # (3,)\n return tau_d \n\n \nclass SLIPController(object):\n def __init__(self):\n self.q_d = np.array([0., 0., 0.]) # q_d[2] always 0\n self.dq_d = np.array([0., 0., 0.]) # always 0\n # 关节增益 \n self.kp = np.array([70., 70., 3000.]) \n self.kd = np.array([2., 2., 10.]) # 阻尼模拟能量损失, 同时防止腿抖动\n # 身体姿态增益\n self.kp_pose = 5. * np.ones(2) \n self.kd_pose = 1. * np.ones(2)\n # 水平速度增益\n self.kp_vel = 0.1 * np.ones(2) \n \n self.leg_length_normal = 0.55\n self.RRP = LegKinematicsRRP(L=self.leg_length_normal)\n\n # private methods\n def __w_to_drpy(self, rpy, w):\n '''\n rpy -> (3,), w -> (3,),drpy -> (3,)\n '''\n H = np.array([[np.cos(rpy[2])/np.cos(rpy[1]), np.sin(rpy[2])/np.cos(rpy[1]), 0.],\n [-np.sin(rpy[2]), np.cos(rpy[2]), 0.],\n [np.cos(rpy[2])*np.tan(rpy[1]), np.sin(rpy[2])*np.tan(rpy[1]), 0.]])\n drpy = (H @ w.reshape(-1,1)).ravel()\n return drpy\n\n def solve(self, q_state, dq_state, robot_state_machine, T_s, vel, dir, F_thrust):\n tau_d = np.zeros(3) # 初始化\n orn_body = q_state[3:7] # 身体姿态 四元数\n rpy = np.array(p.getEulerFromQuaternion(orn_body))\n w_body = dq_state[3:6] # 身体角速度 w\n drpy = self.__w_to_drpy(rpy, w_body)\n q = q_state[7:10] # 关节位置\n dq = dq_state[6:9] # 关节速度\n\n # 控制虚拟弹簧力\n tau_d[2] = self.kd[2]*(self.dq_d[2] - dq[2]) \\\n + self.kp[2]*(self.q_d[2] - q[2]) \n\n # 弹簧伸长时,施加推力抵消能量损耗\n if robot_state_machine == 'THRUST':\n tau_d[2] += F_thrust\n\n # 触地或者离地时,关节扭矩为0\n if (robot_state_machine == 'LOADING' or robot_state_machine == 'UNLOADING'):\n tau_d[0:2] = np.zeros(2)\n\n # 弹簧压缩或者伸长时,施加关节扭矩控制身体姿态 \n if (robot_state_machine == 'COMPRESSION' or robot_state_machine == 'THRUST'): \n # 姿态线性伺服控制\n tau_d[0:2] = - (self.kd_pose*(np.zeros(2) - drpy[0:2]) \\\n + self.kp_pose*(np.zeros(2) - rpy[0:2])) # (2,) \n\n # 飞行时,控制足端移动到落地点 \n if robot_state_machine == 'FLIGHT':\n vel_xy_d = np.array([vel*np.cos(dir), vel*np.sin(dir)])\n v_body = dq_state[0:2] # 当前水平速度\n # 相对于H系:坐标系原点与身体坐标系重合,方向与世界坐标系平行\n xy_d = v_body*T_s/2 - self.kp_vel*(vel_xy_d - v_body) # 计算落脚点\n r = q[2] + self.leg_length_normal \n z_d = - (r**2 - xy_d[0]**2 - xy_d[1]**2)**0.5\n # 转换到B系:身体坐标系\n R_HB = pin.rpy.rpyToMatrix(rpy)\n R_BH = R_HB.T \n p_H = np.array([xy_d[0], xy_d[1], z_d])\n p_B = (R_BH @ p_H.reshape(-1,1)).ravel() # (3,)\n q_d = self.RRP.IK(p_B)\n self.q_d[0:2] = q_d[0:2]\n # 关节PD控制\n tau_d[0:2] = self.kd[0:2]*(self.dq_d[0:2] - dq[0:2]) \\\n + self.kp[0:2]*(self.q_d[0:2] - q[0:2]) # (2,)\n \n print('tau_d: ', tau_d)\n return tau_d\n"
] | [
[
"numpy.cos",
"numpy.sin",
"numpy.ones",
"numpy.tan",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
XinlongSBU/pynucastro | [
"4f1547e99208ad03d8f79d748601219591a157b5",
"4f1547e99208ad03d8f79d748601219591a157b5"
] | [
"pynucastro/networks/rate_collection.py",
"pynucastro/templates/fortran-vode/weak.py"
] | [
"\"\"\"A collection of classes and methods to deal with collections of\nrates that together make up a network.\"\"\"\n\n# Common Imports\nfrom __future__ import print_function\n\nimport functools\nimport math\nfrom operator import mul\nimport os\nfrom collections import OrderedDict\n\nfrom ipywidgets import interact\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n#from mpl_toolkits.axes_grid1 import make_axes_locatable\nimport networkx as nx\n\n# Import Rate\nfrom pynucastro.rates import Rate, Nucleus, Library\n\nmpl.rcParams['figure.dpi'] = 100\n\nclass Composition(object):\n \"\"\"a composition holds the mass fractions of the nuclei in a network\n -- useful for evaluating the rates\n\n \"\"\"\n def __init__(self, nuclei, small=1.e-16):\n \"\"\"nuclei is an iterable of the nuclei (Nucleus objects) in the network\"\"\"\n if not isinstance(nuclei[0], Nucleus):\n raise ValueError(\"must supply an iterable of Nucleus objects\")\n else:\n self.X = {k: small for k in nuclei}\n\n def set_solar_like(self, Z=0.02):\n \"\"\" approximate a solar abundance, setting p to 0.7, He4 to 0.3 - Z and\n the remainder evenly distributed with Z \"\"\"\n num = len(self.X)\n rem = Z/(num-2)\n for k in self.X:\n if k == Nucleus(\"p\"):\n self.X[k] = 0.7\n elif k.raw == \"he4\":\n self.X[k] = 0.3 - Z\n else:\n self.X[k] = rem\n\n self.normalize()\n\n def set_all(self, xval):\n \"\"\" set all species to a particular value \"\"\"\n for k in self.X:\n self.X[k] = xval\n\n def set_nuc(self, name, xval):\n \"\"\" set nuclei name to the mass fraction xval \"\"\"\n for k in self.X:\n if k.raw == name:\n self.X[k] = xval\n break\n\n def normalize(self):\n \"\"\" normalize the mass fractions to sum to 1 \"\"\"\n X_sum = sum([self.X[k] for k in self.X])\n\n for k in self.X:\n self.X[k] /= X_sum\n\n def get_molar(self):\n \"\"\" return a dictionary of molar fractions\"\"\"\n molar_frac = {k: v/k.A for k, v in self.X.items()}\n return molar_frac\n\n def __str__(self):\n ostr = \"\"\n for k in self.X:\n ostr += \" X({}) : {}\\n\".format(k, self.X[k])\n return ostr\n\nclass RateCollection(object):\n \"\"\" a collection of rates that together define a network \"\"\"\n\n pynucastro_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\n def __init__(self, rate_files=None, libraries=None, rates=None):\n \"\"\"\n rate_files are the files that together define the network. This\n can be any iterable or single string.\n\n This can include Reaclib library files storing multiple rates.\n\n If libraries is supplied, initialize a RateCollection using the rates\n in the Library object(s) in list 'libraries'.\n\n If rates is supplied, initialize a RateCollection using the\n Rate objects in the list 'rates'.\n\n Any combination of these options may be combined.\n \"\"\"\n\n self.files = []\n self.rates = []\n self.library = None\n\n if rate_files:\n if isinstance(rate_files, str):\n rate_files = [rate_files]\n self._read_rate_files(rate_files)\n\n if rates:\n if isinstance(rates, Rate):\n rates = [rates]\n try:\n for r in rates:\n assert(isinstance(r, Rate))\n except:\n print('Expected Rate object or list of Rate objects passed as the rates argument.')\n raise\n else:\n rlib = Library(rates=rates)\n if not self.library:\n self.library = rlib\n else:\n self.library = self.library + rlib\n\n if libraries:\n if isinstance(libraries, Library):\n libraries = [libraries]\n try:\n for lib in libraries:\n assert(isinstance(lib, Library))\n except:\n print('Expected Library object or list of Library objects passed as the libraries argument.')\n raise\n else:\n if not self.library:\n self.library = libraries.pop(0)\n for lib in libraries:\n self.library = self.library + lib\n\n if self.library:\n self.rates = self.rates + self.library.get_rates()\n\n # get the unique nuclei\n u = []\n for r in self.rates:\n t = set(r.reactants + r.products)\n u = set(list(u) + list(t))\n\n self.unique_nuclei = sorted(u)\n\n # now make a list of each rate that touches each nucleus\n # we'll store this in a dictionary keyed on the nucleus\n self.nuclei_consumed = OrderedDict()\n self.nuclei_produced = OrderedDict()\n\n for n in self.unique_nuclei:\n self.nuclei_consumed[n] = [r for r in self.rates if n in r.reactants]\n self.nuclei_produced[n] = [r for r in self.rates if n in r.products]\n\n # Re-order self.rates so Reaclib rates come first,\n # followed by Tabular rates. This is needed if\n # reaclib coefficients are targets of a pointer array\n # in the Fortran network.\n # It is desired to avoid wasting array size\n # storing meaningless Tabular coefficient pointers.\n self.rates = sorted(self.rates,\n key=lambda r: r.chapter == 't')\n\n self.tabular_rates = []\n self.reaclib_rates = []\n for n, r in enumerate(self.rates):\n if r.chapter == 't':\n self.tabular_rates.append(n)\n elif isinstance(r.chapter, int):\n self.reaclib_rates.append(n)\n else:\n print('ERROR: Chapter type unknown for rate chapter {}'.format(\n str(r.chapter)))\n exit()\n\n def _read_rate_files(self, rate_files):\n # get the rates\n self.files = rate_files\n for rf in self.files:\n try:\n rflib = Library(rf)\n except:\n print(\"Error reading library from file: {}\".format(rf))\n raise\n else:\n if not self.library:\n self.library = rflib\n else:\n self.library = self.library + rflib\n\n def get_nuclei(self):\n \"\"\" get all the nuclei that are part of the network \"\"\"\n return self.unique_nuclei\n\n def evaluate_rates(self, rho, T, composition):\n \"\"\"evaluate the rates for a specific density, temperature, and\n composition\"\"\"\n rvals = OrderedDict()\n ys = composition.get_molar()\n\n for r in self.rates:\n val = r.prefactor * rho**r.dens_exp * r.eval(T)\n yfac = functools.reduce(mul, [ys[q] for q in r.reactants])\n rvals[r] = yfac * val\n\n return rvals\n\n def network_overview(self):\n \"\"\" return a verbose network overview \"\"\"\n ostr = \"\"\n for n in self.unique_nuclei:\n ostr += \"{}\\n\".format(n)\n ostr += \" consumed by:\\n\"\n for r in self.nuclei_consumed[n]:\n ostr += \" {}\\n\".format(r.string)\n\n ostr += \" produced by:\\n\"\n for r in self.nuclei_produced[n]:\n ostr += \" {}\\n\".format(r.string)\n\n ostr += \"\\n\"\n return ostr\n\n def write_network(self, *args, **kwargs):\n \"\"\"Before writing the network, check to make sure the rates\n are distinguishable by name.\"\"\"\n assert self._distinguishable_rates(), \"ERROR: Rates not uniquely identified by Rate.fname\"\n self._write_network(*args, **kwargs)\n\n def _distinguishable_rates(self):\n \"\"\"Every Rate in this RateCollection should have a unique Rate.fname,\n as the network writers distinguish the rates on this basis.\"\"\"\n names = [r.fname for r in self.rates]\n return len(set(names)) == len(self.rates)\n\n def _write_network(self, *args, **kwargs):\n \"\"\"A stub for function to output the network -- this is implementation\n dependent.\"\"\"\n print('To create network integration source code, use a class that implements a specific network type.')\n return\n\n def plot(self, outfile=None, rho=None, T=None, comp=None, size=(800, 600), dpi=100):\n \"\"\"Make a plot of the network structure showing the links between nuclei\"\"\"\n\n G = nx.MultiDiGraph()\n G.position = {}\n G.labels = {}\n\n fig, ax = plt.subplots()\n #divider = make_axes_locatable(ax)\n #cax = divider.append_axes('right', size='15%', pad=0.05)\n\n ax.plot([0, 0], [8, 8], 'b-')\n\n # nodes -- the node nuclei will be all of the heavies, but not\n # p, n, alpha, unless we have p + p, 3-a, etc.\n node_nuclei = []\n for n in self.unique_nuclei:\n if n.raw not in [\"p\", \"n\", \"he4\"]:\n node_nuclei.append(n)\n else:\n for r in self.rates:\n if r.reactants.count(n) > 1:\n node_nuclei.append(n)\n break\n\n for n in node_nuclei:\n G.add_node(n)\n G.position[n] = (n.N, n.Z)\n G.labels[n] = r\"${}$\".format(n.pretty)\n\n if rho is not None and T is not None and comp is not None:\n ydots = self.evaluate_rates(rho, T, comp)\n else:\n ydots = None\n\n #for rr in ydots:\n # print(\"{}: {}\".format(rr, ydots[rr]))\n\n # edges\n for n in node_nuclei:\n for r in self.nuclei_consumed[n]:\n for p in r.products:\n if p in node_nuclei:\n # networkx doesn't seem to keep the edges in\n # any particular order, so we associate data\n # to the edges here directly, in this case,\n # the reaction rate, which will be used to\n # color it\n if ydots is None:\n G.add_edges_from([(n, p)], weight=0.5)\n else:\n try:\n rate_weight = math.log10(ydots[r])\n except ValueError:\n # if ydots[r] is zero, then set the weight\n # to roughly the minimum exponent possible\n # for python floats\n rate_weight = -308\n except:\n raise\n G.add_edges_from([(n, p)], weight=rate_weight)\n\n nx.draw_networkx_nodes(G, G.position,\n node_color=\"#A0CBE2\", alpha=1.0,\n node_shape=\"o\", node_size=1000, linewidth=2.0, zorder=10, ax=ax)\n\n nx.draw_networkx_labels(G, G.position, G.labels,\n font_size=13, font_color=\"w\", zorder=100, ax=ax)\n\n # get the edges and weights coupled in the same order\n edges, weights = zip(*nx.get_edge_attributes(G, 'weight').items())\n\n edges_lc = nx.draw_networkx_edges(G, G.position, width=3,\n edgelist=edges, edge_color=weights,\n node_size=1000,\n edge_cmap=plt.cm.viridis, zorder=1, ax=ax)\n\n # for networkx <= 2.0 draw_networkx_edges returns a\n # LineCollection matplotlib type which we can use for the\n # colorbar directly. For networkx >= 2.1, it is a collection\n # of FancyArrowPatch-s, which we need to run through a\n # PatchCollection. See: \n # https://stackoverflow.com/questions/18658047/adding-a-matplotlib-colorbar-from-a-patchcollection\n\n if ydots is not None:\n pc = mpl.collections.PatchCollection(edges_lc, cmap=plt.cm.viridis)\n pc.set_array(weights)\n plt.colorbar(pc, label=\"log10(rate)\")\n\n Ns = [n.N for n in node_nuclei]\n Zs = [n.Z for n in node_nuclei]\n\n plt.xlim(min(Ns)-1, max(Ns)+1)\n #plt.ylim(min(Zs)-1, max(Zs)+1)\n plt.xlabel(r\"$N$\", fontsize=\"large\")\n plt.ylabel(r\"$Z$\", fontsize=\"large\")\n\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n ax.set_aspect(\"equal\", \"datalim\")\n\n fig.set_size_inches(size[0]/dpi, size[1]/dpi)\n\n if outfile is None:\n plt.show()\n else:\n plt.tight_layout()\n plt.savefig(outfile, dpi=dpi)\n\n def __repr__(self):\n string = \"\"\n for r in self.rates:\n string += \"{}\\n\".format(r.string)\n return string\n\n\nclass Explorer(object):\n \"\"\" interactively explore a rate collection \"\"\"\n def __init__(self, rc, comp, size=(800, 600)):\n \"\"\" take a RateCollection and a composition \"\"\"\n self.rc = rc\n self.comp = comp\n self.size = size\n\n def _make_plot(self, logrho, logT):\n self.rc.plot(rho=10.0**logrho, T=10.0**logT, comp=self.comp, size=self.size)\n\n def explore(self, logrho=(2, 6, 0.1), logT=(7, 9, 0.1)):\n \"\"\"Perform interactive exploration of the network structure.\"\"\"\n interact(self._make_plot, logrho=logrho, logT=logT)\n",
"'''\nFigure out what the rho-T grid looks like for data tables\nfrom Toki, et al. 2015.\n\nDonald Willcox\n'''\n\nimport numpy as np\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('infile',type=str, help='The input file to process.')\nargs = parser.parse_args()\n\ntry: ifile = open(args.infile, 'r')\nexcept: raise\n\n# Eat header\nfor n in xrange(0,6):\n ifile.readline()\n\ndens = []\ntemp = []\n\nfor l in ifile:\n if l.strip() != '':\n ls = l.split()\n dens.append(ls[0])\n temp.append(ls[1])\nifile.close()\n\ndens = list(set(dens))\ntemp = list(set(temp))\n\ndens_f = [float(s) for s in dens]\ntemp_f = [float(s) for s in temp]\n\ndens = np.array(dens_f)\ntemp = np.array(temp_f)\n\nprint(args.infile)\nprint('')\nprint('dens, # : ' + str(len(dens)))\nprint('dens, min: ' + str(np.amin(dens)))\nprint('dens, max: ' + str(np.amax(dens)))\nprint('')\nprint('temp, # : ' + str(len(temp)))\nprint('temp, min: ' + str(np.amin(temp)))\nprint('temp, max: ' + str(np.amax(temp)))\n"
] | [
[
"matplotlib.collections.PatchCollection",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"numpy.amin",
"numpy.amax",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MGIMM/dynamic_balancing | [
"74482a970996ec75f5fb3f433b8285420787ccd7"
] | [
"notebooks/static_simulation.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\n# from MMDBalancing import MMDBalancing as MMDB\n# from OptimalTransportBalancing import OptimalTransportBalancing as OTB\n# from NeuralAdversarialBalancing import NeuralAdversarialBalancing as NAB\n#get_ipython().run_line_magic('matplotlib', 'inline')\nimport pandas as pd\n\n\n# utils\nfrom utils_balancing import *\n\n\n# In[2]:\n\n\ndef static_simulation():\n n = 5000\n m = 5000\n d = 1\n r = lambda x:(x-3).square() + (x>-2)*(x+3).square() +x.abs()\n #r = lambda x:x.square()\n def get_data(n = 500,m = 500, r = r, d = d):\n def pi(x):\n return torch.sin(x)+ 2*torch.rand(x.shape)-1\n def pi_ring(x):\n return torch.sin(x)+ 1*torch.rand(x.shape)-0.5\n \n \n xi = torch.normal(mean = -1, std = 2, size = (n,d))\n xi_ring = torch.zeros(size = (m,d))\n for i in range(m):\n if torch.rand(1).item()>0.3:\n xi_ring[i,0] = torch.normal(mean = -4, std = 2, size = (1,)).item()\n else:\n xi_ring[i,0] = torch.normal(mean = 3, std = 0.2, size = (1,)).item()\n w = torch.ones(n)\n w_ring = torch.ones(m)\n \n \n \n \n xi_natural = torch.cat((xi, pi(xi)),axis = 1)\n xi_ring_natural = torch.cat((xi_ring, pi_ring(xi_ring)), axis = 1)\n Z =xi_natural[:,0]+xi_natural[:,1] + torch.rand((n,)) \n Z_ring =xi_ring_natural[:,0]+xi_ring_natural[:,1]+torch.rand((m,))\n R = r(Z)\n return xi_natural,xi_ring_natural,R,Z,Z_ring\n \n # ## Reference value\n \n # In[7]:\n \n \n xi_natural, xi_ring_natural,R,Z,Z_ring = get_data(n = 50000, m = 50000)\n ref = r(Z_ring).mean()\n \n \n # ### Re-generate data set with $n=m=500$.\n \n # In[8]:\n \n \n n = 500\n m = 500\n xi_natural, xi_ring_natural,R,Z,Z_ring = get_data(n = n, m = m, r = r)\n \n \n # # GIPWE: DE and DRE\n # \n # 1. Data splitting (K-folds with K = 3)\n \n # In[9]:\n \n \n def get_split_ind(n,K = 3):\n I_n = torch.arange(n, dtype = float)\n \n rand_ind_n = torch.multinomial(I_n,len(I_n),replacement = False)\n num_folds_n = int(n/K)\n Ind = []\n for i in range(K):\n if (i+1)*num_folds_n <= n:\n Ind.append(list(rand_ind_n[i*num_folds_n:(i+1)*num_folds_n].detach().numpy()))\n else:\n Ind.append(list(rand_ind_n[i*num_folds_n:].detach().numpy()))\n \n Ind_split = []\n for i in range(K):\n list_n = []\n for j in range(n):\n if j >= i*num_folds_n and j < (i+1)*num_folds_n:\n pass\n else:\n list_n.append(rand_ind_n[j].item())\n \n Ind_split.append(list_n)\n return Ind_split,Ind\n \n \n # In[10]:\n \n \n K = 3\n Ind_out, Ind_in = get_split_ind(n,K)\n \n \n # 2. Get GIPW weights\n \n # In[11]:\n \n \n from sklearn.ensemble import RandomForestRegressor\n import xgboost as xgb\n from sklearn.linear_model import LogisticRegression\n \n \n # In[12]:\n \n \n XGB = xgb.XGBRegressor(gamma = 5e0)\n RF = RandomForestRegressor(n_estimators = 20, min_samples_split = 20)\n LR = LogisticRegression()\n def get_GIPW_weights(model):\n eta = np.zeros(n)\n for k in range(K):\n SGIPW = Shallow_GIPW(xi_natural[Ind_out[k],:], xi_ring_natural)\n \n SGIPW.train(model,xi = np.array(xi_natural[Ind_in[k],:]),log=False)\n eta[Ind_in[k]] = SGIPW.weights*(SGIPW.weights>0)\n return eta\n \n eta_XGB = get_GIPW_weights(XGB)\n eta_RF = get_GIPW_weights(RF)\n eta_LR = get_GIPW_weights(LR)\n \n \n # In[13]:\n \n # OT\n OTB = OptimalTransportBalancing()\n eta_OT = OTB.get_weights(xi_natural,xi_ring_natural)\n eta_OT = eta_OT.detach().numpy()\n \n \n # In[17]:\n \n \n # MMD weights\n lambda_RKHS = 1e2\n lambda_l2 = 1e-3\n MMDB = MMDBalancing(xi_natural,xi_ring_natural,sigma = 5e-1,D = 2000)\n eta_MMD = MMDB.get_weights(lambda_RKHS = lambda_RKHS, lambda_l2 = lambda_l2)\n eta_MMD = eta_MMD.to(\"cpu\").detach().numpy()\n \n \n # In[18]:\n \n \n \n \n \n # In[20]:\n \n \n # Neural Adversarial Balancing\n class NeuralNetwork(nn.Module):\n def __init__(self,input_dim = 1, num_nodes = 32):\n super(NeuralNetwork, self).__init__()\n self.flatten = nn.Flatten()\n self.linear_relu_stack = nn.Sequential(\n nn.Linear(input_dim, num_nodes),\n nn.ReLU(),\n #nn.Dropout(0.3),\n #nn.BatchNorm1d(num_nodes), \n \n nn.Linear(num_nodes, num_nodes),\n nn.ReLU(),\n nn.Linear(num_nodes, num_nodes),\n nn.ReLU(),\n #nn.Dropout(0.3),\n #nn.BatchNorm1d(num_nodes), \n \n #nn.Linear(num_nodes, num_nodes),\n #nn.ReLU(),\n # # #nn.Dropout(0.3),\n # # nn.BatchNorm1d(num_nodes), \n \n nn.Linear(num_nodes, 1),\n )\n \n def forward(self, x):\n x = self.flatten(x)\n target = self.linear_relu_stack(x)\n return target\n \n \n # In[21]:\n \n \n AB = Adversarial_Balancing(xi_natural,xi_ring_natural)\n num_nodes_IPM = 24\n model_IPM = NeuralNetwork(input_dim = d*2,num_nodes = 2*num_nodes_IPM).to(AB.dev)\n model_reweighting = NeuralNetwork(input_dim = d*2, num_nodes = num_nodes_IPM).to(AB.dev)\n learning_rate = 1e-3\n optimizer_IPM = torch.optim.Adam(model_IPM.parameters(), lr=learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=True)\n optimizer_reweighting = torch.optim.Adam(model_reweighting.parameters(), lr=learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=True)\n \n \n # In[22]:\n \n \n epochs = 50\n loss_trace = []\n for t in range(epochs):\n #print(f\"Epoch {t+1}\\n-------------------------------\")\n current_test_loss = AB.train_loop(model_IPM = model_IPM,\n model_reweighting = model_reweighting,\n optimizer_IPM = optimizer_IPM,\n optimizer_reweighting = optimizer_reweighting,\n IPM_steps = 3,\n reweight_steps = 3,\n lambda_l2_weight = 5e-2,\n lambda_l2_IPM = 1e-2,\n lambda_l1_IPM = 1e-2,\n )\n loss_trace.append(current_test_loss.to(\"cpu\").detach().numpy())\n \n \n \n \n weights = model_reweighting(xi_natural.to(\"cuda:0\"))\n #weights /=weights.mean()\n eta_NAB = weights.to(\"cpu\").detach().numpy()\n \n \n \n \n \n # 4. Get $r^{\\natural}$ estimation with the same K-fold splitting\n \n # In[26]:\n \n \n from sklearn.linear_model import LinearRegression\n RF_R = RandomForestRegressor(n_estimators = 20, min_samples_split = 5)\n #model_r = RF_R\n model_r = LinearRegression()\n \n \n # In[27]:\n \n \n def get_r_estimation(model, K = 3):\n r_hat = np.zeros(n)\n r_hat_ring = np.zeros(m)\n for k in range(K):\n SGIPW = Shallow_GIPW(xi_natural[Ind_out[k],:], xi_ring_natural)\n model_k = model\n model_k.fit(xi_natural[Ind_out[k],:].detach().numpy(), R[Ind_out[k]].detach().numpy())\n \n r_hat[Ind_in[k]] = model_k.predict(xi_natural[Ind_in[k]].detach().numpy())\n r_hat_ring += model_k.predict(xi_ring_natural.detach().numpy())\n r_hat_ring /= K\n \n return r_hat, r_hat_ring\n \n \n # In[28]:\n \n \n r_hat,r_hat_ring = get_r_estimation(model_r)\n \n \n # In[29]:\n \n \n \n \n # ## Estimators\n \n # In[30]:\n \n \n def get_DE(eta, R = R, ref= ref):\n try:\n eta = torch.from_numpy(eta)\n except:\n pass\n pred = (eta*R).mean().item()\n error = torch.abs(pred - ref).item()\n return pred, error \n def get_DRE(eta,r_hat, r_hat_ring, R = R, ref = ref):\n try:\n eta = torch.from_numpy(eta)\n r_hat = torch.from_numpy(r_hat)\n except:\n pass\n pred = (eta*(R -r_hat)).mean() + r_hat_ring.mean()\n error = torch.abs(pred - ref).item()\n return pred.item(), error \n \n \n \n \n \n \n # In[31]:\n \n \n #pd.set_option(\"display.precision\", 2)\n #pd.set_option('display.float_format', lambda x: '%.2f' % x)\n table_bad_reg = pd.DataFrame([[get_DE(eta_OT)[1],get_DRE(eta_OT,r_hat,r_hat_ring)[1]],[get_DE(eta_MMD)[1],get_DRE(eta_MMD,r_hat,r_hat_ring)[1]], [get_DE(eta_NAB)[1],get_DRE(eta_NAB,r_hat,r_hat_ring)[1]], [get_DE(eta_RF)[1],get_DRE(eta_RF,r_hat,r_hat_ring)[1]],[get_DE(eta_XGB)[1],get_DRE(eta_XGB,r_hat,r_hat_ring)[1]], [get_DE(eta_LR)[1],get_DRE(eta_LR,r_hat,r_hat_ring)[1]],[None, torch.abs(r_hat_ring.mean()-ref).item()]], columns = (\"DE\",\"DRE\"), index = (\"OT\", \"MMD\",\"NAB\", \"GIPW-RF\",\"GIPW-XGB\",\"GIPW-LR\",\"G-computation\"))\n \n \n # ## Bad regression model: Linear regression\n \n # In[32]:\n \n \n \n \n # In[ ]:\n \n \n \n \n \n # ## Good regression model: XGBoosting\n \n # In[33]:\n \n \n XGB_R = xgb.XGBRegressor(n_estimators = 20, gamma = 1e-0)\n model_r = XGB_R\n r_hat,r_hat_ring = get_r_estimation(model_r)\n \n \n # In[34]:\n \n \n pd.set_option(\"display.precision\", 2)\n table_good_reg = pd.DataFrame([[get_DE(eta_OT)[1],get_DRE(eta_OT,r_hat,r_hat_ring)[1]],[get_DE(eta_MMD)[1],get_DRE(eta_MMD,r_hat,r_hat_ring)[1]], [get_DE(eta_NAB)[1],get_DRE(eta_NAB,r_hat,r_hat_ring)[1]], [get_DE(eta_RF)[1],get_DRE(eta_RF,r_hat,r_hat_ring)[1]],[get_DE(eta_XGB)[1],get_DRE(eta_XGB,r_hat,r_hat_ring)[1]], [get_DE(eta_LR)[1],get_DRE(eta_LR,r_hat,r_hat_ring)[1]],[None, torch.abs(r_hat_ring.mean()-ref).item()]], columns = (\"DE\",\"DRE\"), index = (\"OT\", \"MMD\",\"NAB\", \"GIPW-RF\",\"GIPW-XGB\",\"GIPW-LR\",\"G-computation\"))\n \n \n # In[35]:\n \n \n return table_bad_reg, table_good_reg\n\n\n"
] | [
[
"sklearn.ensemble.RandomForestRegressor",
"torch.normal",
"torch.abs",
"torch.ones",
"sklearn.linear_model.LogisticRegression",
"torch.zeros",
"torch.sin",
"torch.from_numpy",
"sklearn.linear_model.LinearRegression",
"torch.rand",
"torch.arange",
"pandas.set_option",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Xuerui-Yang/xuerui-stat | [
"08b9dfedac810cbad5ee5969ca554212eb989db0"
] | [
"xuerui_stat/analysis/random_forest/plot_tree.py"
] | [
"import matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\n\r\n\r\nclass PlotTree():\r\n def __init__(self,tree_class):\r\n self._tree_class=tree_class\r\n self._decision_node = dict(boxstyle=\"sawtooth\", fc=\"0.8\")\r\n self._leaf_node = dict(boxstyle=\"round4\", fc=\"0.8\")\r\n self._arrow_args = dict(arrowstyle=\"<-\")\r\n\r\n def __get_tree_depth(self,tree):\r\n \"\"\"获取树的深度\"\"\"\r\n depth = 0\r\n # 定义的dict中首位储存的是节点信息,不计入计数\r\n for key in ('Left', 'Right'):\r\n # 记录各子节点的深度\r\n sub_tree = tree[key]\r\n if type(sub_tree).__name__ == \"dict\":\r\n # 如果该节点有分支,迭代计算该节点的深度\r\n thisdepth = self.__get_tree_depth(sub_tree)\r\n else:\r\n # 否则深度为一\r\n thisdepth = 1\r\n # 比较各分支深度,保留最深记录\r\n if thisdepth > depth:\r\n depth = thisdepth\r\n # 分支深度加一即为当前节点深度\r\n return depth + 1\r\n\r\n\r\n def __plot_node(self,node_txt, cntr_pt, prnt_pt, node_type):\r\n self._ax1.annotate(node_txt, xy=prnt_pt, xycoords='axes fraction',\r\n xytext=cntr_pt, textcoords='axes fraction',\r\n va=\"center\", ha=\"center\", bbox=node_type, arrowprops=self._arrow_args)\r\n\r\n\r\n def __plot_mid_text(self,cntr_pt, prnt_pt, txt_string):\r\n xMid = (prnt_pt[0] - cntr_pt[0]) / 2.0 + cntr_pt[0]\r\n yMid = (prnt_pt[1] - cntr_pt[1]) / 2.0 + cntr_pt[1]\r\n self._ax1.text(xMid, yMid, txt_string, va=\"center\",\r\n ha=\"center\", rotation=30)\r\n\r\n def __plot_tree(self,tree, prnt_pt, node_txt, branch=None):\r\n self._layer += 1\r\n diff = 1 / 2**(self._layer)\r\n keys = list(tree.keys())\r\n text = tree[keys[0]]\r\n if branch == 'Left':\r\n self._xOff -= diff\r\n elif branch == 'Right':\r\n self._xOff += diff\r\n else:\r\n pass\r\n cntr_pt = (self._xOff, self._yOff)\r\n self.__plot_mid_text(cntr_pt, prnt_pt, node_txt)\r\n self.__plot_node(text, cntr_pt, prnt_pt, self._decision_node)\r\n self._yOff = self._yOff - 1.0 / self._totalD\r\n for key in keys[1:]:\r\n sub_tree = tree[key]\r\n if type(sub_tree).__name__ == 'dict':\r\n self.__plot_tree(sub_tree, cntr_pt, str(key), key)\r\n else:\r\n if key == 'Left':\r\n x = self._xOff - diff / 2\r\n elif key == 'Right':\r\n x = self._xOff + diff / 2\r\n else:\r\n pass\r\n self.__plot_node(sub_tree, (x, self._yOff), cntr_pt, self._leaf_node)\r\n self.__plot_mid_text((x, self._yOff), cntr_pt, str(key))\r\n if branch == 'Left':\r\n self._xOff += diff\r\n elif branch == 'Right':\r\n self._xOff -= diff\r\n else:\r\n pass\r\n self._layer -= 1\r\n self._yOff = self._yOff + 1.0 / self._totalD\r\n\r\n def tree_structure_plot(self):\r\n fig = plt.figure(1, facecolor='white')\r\n fig.clf()\r\n axprops = dict(xticks=[], yticks=[])\r\n self._ax1 = plt.subplot(111, frameon=False, **axprops)\r\n self._totalD = float(self.__get_tree_depth(self._tree_class.tree))\r\n self._xOff = 0.5\r\n self._yOff = 1.0\r\n self._layer = 0\r\n self.__plot_tree(self._tree_class.tree, (0.5, 1.0), '')\r\n plt.show()\r\n\r\n def confusion_matrix_plot(self):\r\n mat=self._tree_class.confusion_matrix\r\n if mat is None:\r\n print(\"The confusion matrix is not computed. Please use 'test()' in 'DecisionTree' class to get it.\")\r\n else:\r\n fig, ax = plt.subplots(figsize=(6, 6))\r\n sns.heatmap(mat,xticklabels=mat.columns,yticklabels=mat.index,\r\n cbar_kws={\"shrink\": .5}, ax=ax)\r\n plt.tight_layout()\r\n plt.show()\r\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cvisb/cvisb_data | [
"81ebf22782f2c44f8aa8ab9437cc4fb54248c3ed",
"81ebf22782f2c44f8aa8ab9437cc4fb54248c3ed",
"81ebf22782f2c44f8aa8ab9437cc4fb54248c3ed",
"81ebf22782f2c44f8aa8ab9437cc4fb54248c3ed"
] | [
"sample-viewer-api/src/static/data/exploratory_scripts/2019-06-03_bonnie_plasmasamples.py",
"sample-viewer-api/src/static/data/exploratory_scripts/merge_public_ids_20190227.py",
"sample-viewer-api/src/static/data/compile_cvisb_data/clean_serology/clean_sero_covid_32783920.py",
"sample-viewer-api/src/static/data/compile_cvisb_data/clean_serology/generate_serology_datadownload.py"
] | [
"# Goal: get ebola/Lassa for Bonnie's plasma samples.\n# Simple clean and merge\n\nimport pandas as pd\n\nimport os\nos.chdir(\"/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/\")\n\nimport helpers\n\ndf = pd.read_excel(\"/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/sample_rosters/one_offs/CViSB Plasma Samples_Bonnie_2019-06-03.xlsx\")\n\ndf.shape\n\n\ndf['privatePatientID'] = df[\"Sample ID\"].apply(helpers.interpretID)\n\n# id dictionary\nids = pd.read_json(\"/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/output_data/patients/patients_2019-06-03_PRIVATE_dict.json\")\nids.reset_index(inplace=True)\nids.head()\nmerged = pd.merge(df, ids, how=\"left\", left_on=\"privatePatientID\", right_on=\"index\", indicator=True)\n\nmerged._merge.value_counts()\n\nmerged[merged._merge == \"left_only\"]\nmerged = merged[['Sample ID', \"Date of collection\", \"Sample type\", \"cohort\", \"elisa\", \"sID\", \"gID\", \"patientID\"]]\nmerged.to_csv(\"/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/sample_rosters/one_offs/2019-06-03_CViSBplasma_Bonnie.csv\", index = False)\n",
"import pandas as pd\nimport re\n\nroster_file = \"/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/2019-01-31_patients_PRIVATE.csv\"\nnew_id_file = \"/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/patient_rosters/survivor_IDdict_v1_2012-02-27_PRIVATE.xlsx\"\n\n# import new IDs from John\n# read in new IDs as string to ensure preserve leading 0s\nids = pd.read_excel(new_id_file, converters={'Study Specific #': str})\n\n# --- Grab the household ID information ---\n# Double check all IDs are 7-digits\nids['id_length'] = ids['Study Specific #'].map(len)\nids.id_length.value_counts()\n\nids['hhID'] = ids['Study Specific #'].apply(lambda x: x[:-1])\nids['hhNumber'] = ids['Study Specific #'].apply(lambda x: x[-1])\n\n# Huh. More contacts than I would have thought; roughly divided between primary contacts, #1, #2, #3\nids.hhNumber.value_counts()\n\nids[ids.Type == \"s-\"]\n\n# Fix case of type; some S/s ambiguities\n# Majority of samples are contacts.\nids.Type.value_counts()\nids[\"ID_type\"] = ids.Type.apply(lambda x: x.upper())\n\n# n = G: 137, S: 358, C: 1293\n# ids[ids.ID_type==\"S-\"][\"G-Number\"].notnull().sum()\n# --- Append ID_type to study number to create new id ---\n\n\ndef makePublicID(row):\n return(row.ID_type + row['Study Specific #'])\n\n\nids['publicPatientID'] = ids.apply(makePublicID, axis=1)\n\n# Double check public PatientIDs are unique.\nlen(ids[\"Study Specific #\"].unique()) == len(ids)\nlen(ids) - len(ids[\"Study Specific #\"].unique())\n# Duplicate rows: checking if duplicated in entire array. One value totally duplicated...\nids[ids.duplicated()]\n\n# ... and another where the same new ID is assigned to two different contacts. :(\nids[ids.duplicated(subset=[\"Study Specific #\"])]\nids[ids[\"Study Specific #\"] == \"8177251\"]\n\nids[ids[\"hhID\"] == \"817725\"]\n\n\n# --- Create dictionary of known IDs ---\n# split G-numbers into multiple values\ndef getGID(id):\n if(id == id):\n if(re.search(\"^[0-9][0-9][0-9]\", id)):\n return(\"G-\" + str(id))\n else:\n # For EM110\n return(str(id))\n else:\n # Ignore NaNs\n return(pd.np.nan)\n\n\ndef splitGID(id):\n if(id == id):\n return(str(id).split(\"/\"))\n else:\n # Ignore NaNs\n return(pd.np.nan)\n\n\ndef getSID(row):\n return(row.ID_type + str(row.ID).zfill(3))\n\n# Extract only the S-ids from S and C-ids\n\n\ndef findSID(id):\n if(re.search(\"^S\\-[0-9][0-9][0-9]\", id)):\n return(id)\n else:\n return(pd.np.nan)\n\n# def grabIDs(row):\n# # split G-numbers into multiple values\n# if(row[\"G-Number\"]==row[\"G-Number\"]):\n# gID = str(row[\"G-Number\"]).split(\"/\")\n# # Append G- to G numbers\n# gID = [\"G-\" + s for s in gID]\n# else:\n# # Ignore NaNs\n# gID = []\n\n # # Append S- or C- to IDs\n # sID = row.ID_type + str(row.ID).zfill(3)\n # # Merge together and return\n # gID.append(sID)\n # return(gID)\n\n\nids['gID'], ids['gID2'] = ids['G-Number'].apply(splitGID).str\nids['gID'] = ids.gID.apply(getGID)\nids['gID2'] = ids.gID2.apply(getGID)\n# Taking the first G-number to be its G-number\nids['G_number'] = ids.gID\nids['alternateIdentifier'] = ids.gID2\n\n# ids['id_dict'] = ids.apply(grabIDs, axis = 1)\n# ids['id_count'] = ids.id_dict.map(len)\n\nids['sID'] = ids.apply(getSID, axis=1)\nids['S_number'] = ids.sID.apply(findSID)\n\n# Hand check anything with a dictionary value length > 2\n# Expect 137 IDs to have more than 1 ID.\n# Seems to check out.\nids[\"G-Number\"].notnull().sum()\n\n# ids.id_count.value_counts()\n\n\n# Check S-ids are unique. They are not.\nids[ids.duplicated(subset=[\"sID\"])].sID\n# G-ids seem to be unique.\nids.dropna(subset=[\"G-Number\"]\n )[ids.dropna(subset=[\"G-Number\"]).duplicated(subset=[\"gID\"])]\n\n\n# --- Group by household IDs to generate relatedTo, hhCount ---\nids[\"hhCount\"] = ids.groupby(\"hhID\").publicPatientID.transform(\"count\")\n# ids[\"relatedTo\"] = ids.groupby(\"hhID\")['publicPatientID'].apply(listicle, axis = 1)\n\nids['gID'] = ids.gID.apply(lambda x: [\"G-\" + str(x) for id in x])\n\n\n# convert wide to long, to create a many:many ID --> publicID dictionary.\nid_df = pd.melt(ids, value_vars=[\"gID\", \"gID2\", \"sID\"], id_vars=[\"publicPatientID\", \"hhID\", 'hhNumber', \"hhCount\",\n \"ID_type\", \"G_number\", \"S_number\", \"alternateIdentifier\"], var_name=\"type_discard\", value_name='ID')\n\nid_df = id_df.dropna(subset=[\"ID\"]).drop([\"type_discard\"], axis=1)\n\n\nids.sample(3)\n# id_dict =\nid_dict = id_df.set_index(\"ID\").to_dict(\"index\")\n\n# -------------------------------- MERGE --------------------------------\ncvisb = pd.read_csv(roster_file)\n\ncvisb.head()\n\n# Categorize the ID type\n\n\ndef findType(id):\n if(re.search(\"^G\\-[0-9][0-9][0-9]\", id)):\n return(\"G-id\")\n elif(re.search(\"^C.*[0-9][0-9][0-9]\", id)):\n return(\"C-id\")\n elif(re.search(\"^S.*\\-[0-9][0-9][0-9]\", id)):\n return(\"S-id\")\n elif(re.search(\"^S[0-9][0-9][0-9]\", id)):\n return(\"S-id\")\n else:\n return(\"unknown\")\n\n\ncvisb['id_type'] = cvisb.ID.apply(findType)\n\n# Very different distribution of data between John's roster and the samples we've pulled!\ncvisb.id_type.value_counts()\n\n# Initial test: merging based on G-ID or S/C-ID. Mostly fails.... 75 match exactly w/ G-ids, 146 with S or C-ids.\n# Problems:\ndfG = pd.merge(cvisb, ids, how='left', indicator=True,\n left_on=\"ID\", right_on=\"gID\")\ndfS = pd.merge(cvisb, ids, how='left', indicator=True,\n left_on=\"ID\", right_on=\"sID\")\n\ndfG._merge.value_counts()\ndfS.groupby(\"id_type\")._merge.value_counts()\n\n\ng_pdmerge = dfG[(dfG.id_type == \"G-id\") & (dfG._merge == \"both\")].ID_x\ns_pdmerge = dfS[(dfS.id_type != \"G-id\") & (dfS._merge == \"both\")].ID_x\n\ndfS._merge.value_counts()\ndfS.groupby(\"id_type\")._merge.value_counts()\n\n\n# -------------------------------- MERGE #2: manual loop function --------------------------------\n# Re-importing data, so the allIDs comes in as an array\nimport os\nos.chdir(\"/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/\")\nfrom cvisb_patient_prep import patients\n\n# Remove Nigerian patients\npatients = patients[patients.country != 'Nigeria'].drop('country', axis=1)\n\npatients['id_type'] = patients.ID.apply(findType)\n\n# from ast import literal_eval\n# # Convert strings to array\n# def string2array(val):\n# if(re.search(\"^\\[.+\\]$\", val)):\n# print('converting')\n# return(literal_eval(val))\n# else:\n# return((val))\n\n# cvisb['allIDs2'] = cvisb.allIDs.apply(string2array)\n\n\ndef findID(row, includeAssumptions=False):\n # for all: check no whitespace on any side\n public_ids = []\n # Check the main ID\n # print(row.ID)\n main_id = row.ID.strip()\n if(main_id in id_dict):\n # print('found')\n public_ids.append(id_dict[main_id]['publicPatientID'])\n\n # If include assumed IDs, check those as well.\n if(includeAssumptions):\n assumed_id = str(row.ID_assumed).strip()\n if(assumed_id in id_dict):\n public_ids.append(id_dict[assumed_id]['publicPatientID'])\n\n# Array of Alternate IDs\n if(isinstance(row.allIDs, list)):\n for id in row.allIDs:\n # print(id)\n # Check if it exists in the dictionary\n if(id.strip() in id_dict):\n # print('found')\n public_ids.append(id_dict[id.strip()]['publicPatientID'])\n else:\n # Single ID string\n # print(row.allIDs)\n alt_id = row.allIDs.strip()\n if(alt_id in id_dict):\n public_ids.append(id_dict[alt_id]['publicPatientID'])\n print(public_ids)\n # remove duplicates\n return(list(set(public_ids)))\n\n\ndef interpretID(id):\n id = str(id)\n # Interpret ID based on regex patterns\n # Assuming C1-xxx-2 == C[visit number]-[id number]-[household number]\n # Therefore deleting the visit code\n weirdC = re.match(\"^(C)([0-9])(\\-[0-9][0-9][0-9]\\-[0-9])$\", id)\n if weirdC:\n return(weirdC[1] + weirdC[3])\n # S-timepoint: assuming S-xxx-3 == S-xxx at visit 3.\n sTimepoint = re.match(\"^(S)(\\-[0-9][0-9][0-9])\\-([0-9])$\", id)\n if sTimepoint:\n return(sTimepoint[1] + sTimepoint[2])\n # 3-digit SID: S-xxx-3 --> S-xxx at visit 3\n sTimepoint2 = re.match(\"^(S\\-)0([0-9][0-9][0-9])\\-([0-9])$\", id)\n if sTimepoint2:\n return(sTimepoint2[1] + sTimepoint2[2])\n # S-timepoint in diff format: assuming S2-xxx == S-xxx at visit 2\n weirdS = re.match(\"^(S)([0-9])(\\-[0-9][0-9][0-9])$\", id)\n if weirdS:\n return(weirdS[1] + weirdS[3])\n nohyphen = re.match(\"^(S)([0-9][0-9][0-9])$\", id)\n if nohyphen:\n return(nohyphen[1] + \"-\" + nohyphen[2])\n\n return(pd.np.nan)\n\n\npatients['ID_assumed'] = patients.ID.apply(interpretID)\n\n# Only look for exact matches between IDs in our roster and John's.\n# Only interpretation I did: Gxxx or Gxxxx = G-xxx / G-xxxx\npatients['publicID_exact'] = patients.apply(findID, axis=1)\npatients['exactLength'] = patients.publicID_exact.apply(len)\n\npatients.exactLength.value_counts()\npatients.groupby(\"id_type\").exactLength.value_counts()\n\n# Look for exact + inexact matches.\n# Only interpretation I did: Gxxx or Gxxxx = G-xxx / G-xxxx\npatients['publicID_inexact'] = patients.apply(\n lambda x: findID(row=x, includeAssumptions=True), axis=1)\npatients['inexactLength'] = patients.publicID_inexact.apply(len)\npatients['inexactMatch'] = patients.publicID_inexact.apply(\n lambda x: len(x) > 0)\n\npatients.inexactLength.value_counts()\npatients.groupby(\"id_type\").inexactLength.value_counts()\n\npatients.sample(10)\n\n# A couple of entries seem to be transcribed improperly, leading to multiple identifiers:\n# G-3284\t\"This should be G-3283\" -- Brian's rosters\n# S-492 \t\"This should be S-452\" -- Brian's rosters\n# G-4009\tS-009 -- Brian's roster\npatients[patients.inexactLength > 1]\n\n\n# Check difference\ng_mymerge = patients[(patients.id_type == \"G-id\")\n & (patients.exactLength > 0)].ID\ns_mymerge = patients[(patients.id_type != \"G-id\")\n & (patients.exactLength > 0)].ID\nset(g_pdmerge) - set(g_mymerge)\nset(g_mymerge) - set(g_pdmerge)\nset(s_pdmerge) - set(s_mymerge)\nset(s_mymerge) - set(s_pdmerge)\n\n# Bit of cleanup before exporting\n\n\ndef cleanID(id):\n if(len(id) == 0):\n return pd.np.nan\n elif(len(id) == 1):\n return(id[0])\n else:\n return(id)\n\n id_df.head()\n\n\n# Final check for merges: are there unused IDs from the matches that I have that John doesn't have?\n# -- and therefore might be contradictory.\nmatches = patients[patients.inexactLength == 1][[\n 'ID', 'allIDs', 'ID_assumed', 'publicID_inexact']]\nmatches['publicPatientID'] = matches.publicID_inexact.apply(cleanID)\nmatches.head()\n\nmerged = pd.merge(matches, id_df, how=\"left\",\n on=\"publicPatientID\", indicator=True)\nmerged._merge.value_counts() # YAY! all merge.\n\n\ndef compareRosters(row):\n # Pull out all identifiers from CViSB\n cvisb = row.allIDs\n if(isinstance(cvisb, list)):\n cvisb.append(row.ID_x)\n if(row.ID_assumed == row.ID_assumed):\n cvisb.append(row.ID_assumed)\n else:\n cvisb = [row.ID_x, row.ID_assumed, row.allIDs]\n\n # Use the same assumptions to get rid of timepoint IDs\n cvisb = [convert(id) for id in cvisb]\n\n # Pull out Tulane identifiers\n john = set([row.G_number, row.S_number, row.alternateIdentifier, row.ID_y])\n\n return(list(set(cvisb) - john))\n\n\ndef convert(id):\n if(id == id):\n id = id.strip()\n # Use interpretID function to clean up S and C IDs\n first_pass = interpretID(id)\n if(first_pass == first_pass): # not null\n return(first_pass)\n\n sid = re.match(\"^(S\\-)([0-9][0-9])(\\-[0-9])$\", id)\n if sid:\n return(sid[1] + \"0\" + sid[2])\n\n gid = re.match(\"^(G)([0-9][0-9][0-9][0-9])$\", id)\n if gid:\n return(gid[1] + \"-\" + gid[2])\n\n gidTime = re.match(\"^(G)([0-9][0-9][0-9][0-9])\\-\\d$\", id)\n if gidTime:\n return(gidTime[1] + \"-\" + gidTime[2])\n if id == \"nan\":\n return(pd.np.nan)\n\n return(id)\n\n\n[convert(id) for id in [\"S3-107\", \"G5578\"]]\n\nmerged.head()\nmerged['leftover_IDs'] = merged.apply(compareRosters, axis=1)\nmerged['leftover_length'] = merged.leftover_IDs.apply(len)\n\nmerged.leftover_length.value_counts()\n\nmerged[merged.leftover_length > 0][[\"ID_x\", \"ID_assumed\", \"allIDs\", \"ID_y\", \"G_number\", \"S_number\",\n \"alternateIdentifier\", \"leftover_IDs\", \"leftover_length\"]].to_csv(\"2019-02-28_weirdIDjoins.csv\")\n\n\n# ----- Export G-numbers without matches -------\n# reorder rows and columns\npatients.sort_values([\"inexactLength\", \"id_type\", \"ID\"], inplace=True)\n\n\nmissing = patients[patients.exactLength != 1]\nmissing = missing[[\"ID\", \"ID_assumed\", \"allIDs\", \"id_type\",\n \"cohort\", \"outcome\", \"publicID_inexact\", \"inexactMatch\", \"source\"]]\nmissing[\"publicID_inexact\"] = missing.publicID_inexact.apply(cleanID)\n\nmissing.to_csv(\n \"2019-02-28_CViSBmissingPublicIDs_PRIVATE.csv\", index=False)\n",
"# Code to cleanup serology data from \"Distinct Early Serological Signatures Track with SARS-CoV-2 Survival\", Immunity 2020\n# Note: code is certainly not the most efficient or elegant. Apologies\n# Laura Hughes, [email protected]\n# October 2020\n\nimport pandas as pd\n# import helpers\nfrom datetime import datetime\nimport re\nfrom math import ceil\n\nimport os\nos.chdir(\"/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/compile_cvisb_data\")\n# Helper functions for cleanup...\nimport helpers\n\n# constants, statics.\nsero_file=\"/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/input_data/expt_summary_data/systems_serology/systems-serology-covid19-01.xlsx\"\nversion = 0.1\nupdatedBy = \"Tomer Zohar\"\ndateUpdated = \"2020-10-19\"\noutput_dir = \"/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/output_data\"\n# output_dir = \"../output_data\"\ndef clean_sero_covid(sero_file, version, updatedBy, dateUpdated, output_dir):\n patientCols = [\"patientID\", \"alternateIdentifier\",\n \"gender\", \"ethnicity\", \"age\",\n \"outcome\", \"cohort\", \"species\", \"hasPatientData\",\n \"dateModified\", \"updatedBy\", \"dataStatus\", \"sourceFiles\",\n \"country\", \"location\", \"locationPrivate\", \"daysOnset\",\"medications\", \"symptoms\",\n \"infectionYear\", \"publisher\", \"citation\"]\n today = datetime.today().strftime(\"%Y-%m-%d\")\n # --- Load data ---\n df = pd.read_excel(sero_file)\n # Remove blank rows\n df.dropna(subset=[\"SampleID\"], inplace=True)\n\n # --- provenance ---\n df[\"dateModified\"] = dateUpdated\n df[\"updatedBy\"] = updatedBy\n df[\"sourceFiles\"] = df.SampleID.apply(lambda x: [sero_file])\n df[\"dataStatus\"] = \"final\"\n df['creator'] = df.SampleID.apply(lambda x: [helpers.getLabAuthor(\"Galit\")])\n df['publisher'] = df.SampleID.apply(lambda x: helpers.cvisb)\n citation = helpers.getSource(df.iloc[0],[\"32783920\"])\n df['citation'] = df.SampleID.apply(lambda x: citation)\n\n # --- patient attributes ---\n df[\"patientID\"] = df.SampleID.apply(lambda x: f\"serology32783920_{str(x).zfill(2)}\")\n df[\"alternateIdentifier\"] = df.patientID.apply(lambda x: [x])\n df[\"gender\"] = df.Sex.apply(convertSex)\n df[\"ethnicity\"] = df.Race.apply(convertEthnicity)\n # df[\"age\"] = df.Age.apply(convertAge)\n df[\"age\"] = None\n df[\"outcome\"] = df.apply(convertOutcome, axis=1)\n df[\"daysOnset\"] = df.Admit_From_Onset\n df[\"medications\"] = df.apply(getMeds, axis=1)\n\n # constants\n df[\"cohort\"] = \"COVID-19\"\n df[\"species\"] = \"Homo sapiens\"\n df[\"country\"] = df.SampleID.apply(lambda x: helpers.getCountry(\"USA\"))\n df[\"countryName\"] = \"United States\"\n df[\"location\"] = df.SampleID.apply(lambda x: [{\"name\": \"Seattle\", \"locationType\": \"unknown\", \"administrativeType\": \"city\", \"@type\": \"AdministrativeArea\"}, {\"name\": \"Washington\", \"locationType\": \"unknown\", \"administrativeType\": \"state\", \"administrativeUnit\": 1, \"identifier\": \"US-WA\", \"@type\": \"AdministrativeArea\"}, {\"name\": \"United States\", \"locationType\": \"unknown\", \"administrativeType\": \"country\", \"administrativeUnit\": 0, \"identifier\": \"US\", \"@type\": \"Country\"}])\n df[\"locationPrivate\"] = df.location\n df[\"hasPatientData\"] = True\n df[\"infectionYear\"] = 2020\n df['version'] = version\n\n df[\"symptoms\"] = df.Ards.apply(getSymptoms)\n\n # --- Check uniqueness ---\n if(sum(df.duplicated(subset=[\"patientID\"]))):\n print(\"ERROR! Duplicate patient IDs detected\")\n\n df[patientCols].to_json(f\"{output_dir}/patients/{today}_patient_sero_serology32783920.json\", orient=\"records\")\n\n\n # --- Experiments cleanup ---\n # --- SEROLOGY ---\n sero = df[[\"patientID\", \"updatedBy\", \"version\", \"sourceFiles\", \"dataStatus\", \"citation\", \"creator\", \"publisher\", \"dateModified\", 'S IgG1','RBD IgG1','N IgG1','S IgG2','RBD IgG2','N IgG2','S IgG3','RBD IgG3','N IgG3','S IgG4','RBD IgG4','N IgG4','S IgA1','RBD IgA1','N IgA1','S IgA2','RBD IgA2','N IgA2','S IgM','RBD IgM','N IgM','S FcRg2A','RBD FcRg2A','N FcRg2A','S FcRg2b','RBD FcRg2b','N FcRg2b','S FcRg3A','RBD FcRg3A','N FcRg3A','S SNA','RBD SNA','N SNA','S RCA','RBD RCA','N RCA','S ADCP','RBD ADCP','N ADCP','S ADNP','RBD ADNP','N ADNP','S ADCD','RBD ADCD','N ADCD','S NKD-CD107a','RBD NKD-CD107a','N NKD-CD107a','S NKD-MIP1b','RBD NKD-MIP1b','N NKD-MIP1b', 'NT50']]\n\n # --- Compile data object ---\n sero = pd.melt(sero, id_vars=[\"patientID\", \"updatedBy\", \"sourceFiles\", \"dataStatus\", \"citation\", \"creator\", \"publisher\", \"dateModified\", \"version\"])\n sero[\"antigen\"] = sero.variable.apply(getAntigen)\n sero[\"assay\"] = sero.variable.apply(getAssay)\n sero[\"data\"] = sero.apply(getSeroData, axis=1)\n\n sero[\"experimentID\"] = sero.apply(lambda x: f\"{x.patientID}_{x.assay}_{x.antigen}\", axis=1)\n sero[\"privatePatientID\"] = sero.patientID\n sero[\"releaseDate\"] = \"2020-10-01\"\n\n # --- experiment classifications ---\n sero[\"variableMeasured\"] = sero.assay\n sero[\"measurementTechnique\"] = \"Serology\"\n sero[\"measurementCategory\"] = \"Systems Serology\"\n sero[\"includedInDataset\"] = \"systems-serology-32783920\"\n sero['isControl'] = sero.experimentID.apply(getControl)\n sero.drop([\"variable\", \"value\", \"antigen\", \"assay\"], axis=1, inplace=True)\n # --- Check uniqueness ---\n if(sum(sero.duplicated(subset=[\"experimentID\"]))):\n print(\"ERROR! Duplicate experiment IDs detected in serology data\")\n\n chunk_size = 500\n for i in range(0, ceil(len(sero)/chunk_size)):\n sero.iloc[i*chunk_size:(i+1)*chunk_size].to_json(f\"{output_dir}/experiments/{today}_experiment_sero_serology32783920_{i}.json\", orient=\"records\")\n\n # --- RT-PCR ---\n pcr = df[df.Viral_Load == df.Viral_Load][[\"patientID\", \"updatedBy\", \"version\", \"sourceFiles\", \"dataStatus\", \"citation\", \"creator\", \"publisher\", \"dateModified\", \"Viral_Load\"]]\n pcr[\"data\"] = pcr.apply(getPCRData, axis=1)\n\n pcr[\"experimentID\"] = pcr.patientID.apply(lambda x: f\"{x}_RT-PCR\")\n pcr[\"privatePatientID\"] = pcr.patientID\n pcr[\"releaseDate\"] = \"2020-10-01\"\n\n # --- experiment classifications ---\n pcr[\"variableMeasured\"] = \"virus level\"\n pcr[\"measurementTechnique\"] = \"Reverse Transcriptase-Polymerase Chain Reaction\"\n pcr[\"measurementCategory\"] = \"clinical measurements\"\n pcr[\"includedInDataset\"] = \"rtpcr-32783920\"\n if(sum(pcr.duplicated(subset=[\"experimentID\"]))):\n print(\"ERROR! Duplicate experiment IDs detected in PCR data\")\n pcr.to_json(f\"{output_dir}/experiments/{today}_experiment_rtpcr_serology32783920.json\", orient=\"records\")\n\n return({\"sero\":sero, \"pcr\": pcr})\n\n\ndef getSeroData(row):\n obj = {}\n obj['@type'] = \"SystemsSerology\"\n obj['assayType'] = row.assay\n obj['value'] = row.value\n antigen = None\n if(row.antigen == \"S\"):\n obj['antigen'] = \"SARS-CoV-2 Spike Antigen\"\n elif(row.antigen == \"RBD\"):\n obj['antigen'] = \"SARS-CoV-2 Receptor Binding Domain Antigen\"\n if(row.antigen == \"N\"):\n obj['antigen'] = \"SARS-CoV-2 Nucleocapsid Antigen\"\n obj['antigenVirus'] = \"SARS-CoV-2\"\n # Add in a pseudo-binary if the sample is a control experiment\n if(re.search(\"control\", row.patientID.lower())):\n obj['controlType'] = row.patientID\n return([obj])\n\ndef getPCRData(row):\n if(row.Viral_Load == row.Viral_Load):\n obj = {}\n obj['@type'] = \"RTPCR\"\n obj[\"virus\"] = \"SARS-CoV-2\"\n obj[\"RTPCRresult\"] = \"positive\"\n obj[\"RTPCRvalue\"] = row.Viral_Load\n return([obj])\n\ndef clean_immune_effector_funcs(filename, sero_cols, updatedBy, dateModified, version, verbose, output_dir):\n # --- checks ---\n # Experiment ID is unique\n # Check if data is null\n null_data = df[df['value'].isnull()]\n if(len(null_data) > 0):\n helpers.log_msg(f\"{'-'*50}\", verbose)\n helpers.log_msg(f\"\\tDATA WARNING: {len(null_data)} experiments have null data values\", verbose)\n helpers.log_msg(null_data[['sampleID', 'experimentID', 'batchID']], verbose)\n helpers.log_msg(f\"{'-'*50}\", verbose)\n\n return(df[sero_cols])\n\ndef getAntigen(variable):\n terms = variable.split(\" \")\n if(len(terms) == 2):\n return terms[0]\n\ndef getAssay(variable):\n terms = variable.split(\" \")\n if(len(terms) == 2):\n return terms[1]\n else:\n return(terms[0])\n\n# --- Patient helper functions ---\n# 0, Male | 1, Female\ndef convertSex(sex):\n if(sex == 0):\n return(\"Male\")\n elif(sex == 1):\n return(\"Female\")\n# 0, Convalescent | 1, Deceased\ndef convertOutcome(row):\n if(row.Outcome == 0):\n return(\"survivor\")\n elif(row.Outcome == 1):\n return(\"dead\")\n elif(\"control\" in row.SampleID.lower()):\n return(\"control\")\n\n# 1, American Indian/Alaska Native (AIAN) | 2, Asian | 3, Black | 4, White | 5, Other | 6, Multiracial\ndef convertEthnicity(race):\n if(race == 1):\n return(\"American Indian/Alaska Native\")\n elif(race == 2):\n return(\"Asian\")\n elif(race == 3):\n return(\"Black\")\n elif(race == 4):\n return(\"White\")\n elif(race == 5):\n return(\"Other\")\n elif(race == 6):\n return(\"Multiracial\")\n\n\n# 0, younger than 49 | 1, 50-59 | 2, 60-69 | 3, 70-79 | 4, 80 and older\ndef convertAge(age):\n if(age == 0):\n return(\"<= 49\")\n elif(age == 1):\n return(\"50-59\")\n elif(age == 2):\n return(\"60-69\")\n elif(age == 3):\n return(\"70-79\")\n elif(age == 4):\n return(\">= 80\")\n\ndef getMeds(row):\n return([\n {\"@type\": \"Medication\", \"name\":'Remdesivir', \"isDrugAdministered\": bool(row.Remdesivir)},\n {\"@type\": \"Medication\", \"name\":'Antibiotics', \"isDrugAdministered\": bool(row.Antibiotics)},\n {\"@type\": \"Medication\", \"name\":'Chloroquines', \"isDrugAdministered\": bool(row.Chloroquines)},\n {\"@type\": \"Medication\", \"name\":'Tocilizumab', \"isDrugAdministered\": bool(row.Tocilizumab)}\n ])\n\ndef getSymptoms(ards):\n if(ards==ards):\n return([{\n \"@type\": \"AcuteSymptom\", \"acuteRespiratoryDistressSyndrome\": bool(ards)\n }])\n\n# --- Experiment helper functions ---\ndef getControl(id):\n return(\"control\" in id.lower())\n\nexperiments = clean_sero_covid(sero_file, version, updatedBy, dateUpdated, output_dir)\n\n\n# EXPORT DATASETS\n# EXPORT DATADOWNLOAD\ntoday = datetime.today().strftime(\"%Y-%m-%d\")\nos.chdir(\"/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/compile_cvisb_data/clean_serology/\")\nfrom generate_serology_covid1_datadownload import get_serology_downloads\nfrom generate_serology_covid1_dataset import get_serology_dataset\n\ndwnld = get_serology_downloads(dateUpdated, experiments[\"sero\"], updatedBy, version, \"systems-serology-32783920\")\ndwnld.to_json(f\"{output_dir}/datadownloads/{today}_download_sero_serology32783920.json\", orient=\"records\")\nds = get_serology_dataset(dateUpdated, dwnld, experiments[\"sero\"], version, \"systems-serology-32783920\")\nds.to_json(f\"{output_dir}/datasets/{today}_dataset_sero_serology32783920.json\", orient=\"records\")\n\ndwnld = get_serology_downloads(dateUpdated, experiments[\"pcr\"], updatedBy, version, \"rtpcr-32783920\")\ndwnld.to_json(f\"{output_dir}/datadownloads/{today}_download_rtpcr_serology32783920.json\", orient=\"records\")\nds = get_serology_dataset(dateUpdated, dwnld, experiments[\"pcr\"], version, \"rtpcr-32783920\")\nds.to_json(f\"{output_dir}/datasets/{today}_dataset_rtpcr_serology32783920.json\", orient=\"records\")\n",
"import pandas as pd\nimport os\nimport json\n\n# [Import helper functions] ----------------------------------------------------------------------------------------------------\n# Helper functions for cleanup...\nimport helpers\n\n\ndef get_serology_downloads(dateModified, experiments, updatedBy, version, datasetID):\n ds = {}\n\n # --- static variables ---\n # identifiers\n ds['@context'] = \"http://schema.org/\"\n ds[\"@type\"] = \"DataDownload\"\n ds[\"includedInDataset\"] = datasetID\n ds[\"name\"] = \"CViSB-SystemsSerology.csv\"\n ds[\"description\"] = \"Summary of systems serology measurements\"\n ds[\"identifier\"] = \"CViSB_SystemsSerology.csv\"\n\n # properties\n ds[\"measurementCategory\"] = \"Systems Serology\"\n ds[\"additionalType\"] = \"summary data\"\n ds[\"encodingFormat\"] = \"text/csv\"\n ds[\"contentUrl\"] = f\"https://data.cvisb.org/dataset/{ds['name']}\"\n\n # credit\n ds['creator'] = [helpers.getLabAuthor(\"Galit\")]\n ds['publisher'] = [helpers.cvisb]\n\n # --- possibly variable, each time ---\n ds[\"version\"] = version\n ds[\"dateModified\"] = dateModified\n ds[\"updatedBy\"] = updatedBy\n\n # pulled from experiments\n ds[\"measurementTechnique\"] = helpers.getUnique(experiments, \"measurementTechnique\")\n ds[\"variableMeasured\"] = helpers.getUnique(experiments, \"variableMeasured\")\n ds[\"citation\"] = helpers.getUnique(experiments, \"citation\")\n ds[\"experimentIDs\"] = experiments.experimentID\n\n # with open(export_file, 'w') as outfile:\n # json.dump([ds], outfile)\n\n return(pd.DataFrame([ds]))\n"
] | [
[
"pandas.merge",
"pandas.read_excel",
"pandas.read_json"
],
[
"pandas.read_csv",
"pandas.read_excel",
"pandas.melt",
"pandas.merge"
],
[
"pandas.read_excel",
"pandas.melt"
],
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
plutasnyy/mgr | [
"4ca5686ba7d62d0e2b8c172f17eb90bd822fdc21"
] | [
"src/models/conv_block.py"
] | [
"from torch import nn\n\n\nclass ConvolutionalBlock(nn.Module):\n\n def __init__(self, in_channels=128, out_channels=256, kernel_size=3, padding=1, stride=1, padding_mode='zeros'):\n super().__init__()\n self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, stride=stride,\n padding_mode=padding_mode)\n self.bn1 = nn.BatchNorm1d(out_channels)\n self.relu1 = nn.ReLU()\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out)\n return out\n"
] | [
[
"torch.nn.BatchNorm1d",
"torch.nn.ReLU",
"torch.nn.Conv1d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
peterchanw/utils | [
"26133c52ba5b0407d38371100b7b56fe2cf68149"
] | [
"LinReg/utilsLinReg.py"
] | [
"import sklearn.metrics as metrics\nimport pandas as pd\nimport numpy as np\n\ndef repair_chrdata(df, tCol):\n ### Parameters:\n # df: input dataframe\n # tCol: targeted column label with NaN\n ### Output\n # df: repaired dataframe\n # word: string of related dataframe column with some records have NaN in targeted column\n # count: number of records fixed in the targeted column with NaN\n\n # work out number of NaN records need to fix\n dFrm = df[df[tCol].isnull()]\n count = len(dFrm)\n # work out the fill up string (most appearance) at targeted column for NULL\n tword = df[tCol].unique().tolist()\n # print(tword)\n wordLT = df[tCol].value_counts(dropna=False)\n word = ''\n wordCnt = 0\n for index, value in wordLT.items():\n print(f'[COUNT] Index: {index}, Value: {value}')\n if wordCnt < value:\n word = index\n wordCnt = value\n # print(word)\n # print(wordLT)\n # update the targeted NaN with the most frequent string\n mask = df[tCol].isnull()\n df.loc[mask, tCol] = word\n print(f'[REPAIR] \"{tCol}\" with string: {word}, Count: {count}')\n return df, word, count\n\n# Repair a single number data column contained NaN with median value\ndef repair_numdata(df, tCol):\n ### Parameters:\n # df: input dataframe\n # tCol: targeted column label with NaN\n ### Output\n # df: repaired dataframe\n # medianVal: median value of related dataframe column with some records have NaN in targeted column\n # count: number of records fixed in the targeted column with NaN\n\n # work out number of NaN records need to fix\n dFrm = df[df[tCol].isnull()]\n count = len(dFrm)\n # work out the median value of the records from targeted column\n medianVal = df[tCol].median()\n # update the targeted NaN with the median value\n mask = df[tCol].isnull()\n df.loc[mask, tCol] = medianVal\n print(f'[REPAIR] \"{tCol}\" Median: {medianVal}, Count: {count}')\n return df, medianVal, count\n\n### Work out the educated guess targets to repair dataframe with NaN in 'repair_rdata' function\ndef repair_target(df, tCol, rCol):\n ### Parameters:\n # df: input dataframe\n # tCol: targeted column label with NaN\n # rCol: related column label without NaN for educated guess\n ### Output\n # target: column value of related column that have NaN in targeted column\n repair = df[df[tCol].isnull()]\n # print(repair[[rCol, tCol]])\n target = sorted(repair[rCol].unique().tolist())\n print(f'[TARGET] {tCol} NaN target: {target}')\n return target\n\n### Educated guess to repair dataframe column contained NaN with mean value of related\n### dataframe column\ndef repair_rcdata(df, tCol, rCol, target):\n ### Parameters:\n # df: input dataframe\n # tCol: targeted column label with NaN\n # rCol: related column label without NaN for educated guess\n # target: column value of related column that have NaN in targeted column\n ### Output\n # df: repaired dataframe\n # meanVal: mean value of related dataframe column with some records have NaN in targeted column\n # count: number of records fixed in the targeted column with NaN\n\n ### Main coding\n # work out number of NaN records need to fix\n dFrm = df[df[tCol].isnull()]\n dFrm = dFrm[dFrm[rCol] == target]\n count = len(dFrm)\n # work out the mean value of the records from related column\n repair = df.loc[df[rCol] == target]\n meanVal = round(repair[tCol].mean(), 3)\n if np.isnan(meanVal):\n meanVal = np.float64(0)\n # update the targeted NaN with the calculated mean value of related records\n df[tCol] = df.apply(\n lambda row: meanVal if np.isnan(row[tCol]) & (row[rCol] == target)\n else row[tCol], axis=1\n )\n print(f'[REPAIR] {tCol}({target}) Mean: {meanVal}, Count: {count}')\n return df, meanVal, count\n\ndef regression_results(y_true, y_pred):\n\n # Regression metrics\n explained_variance=metrics.explained_variance_score(y_true, y_pred)\n mean_absolute_error=metrics.mean_absolute_error(y_true, y_pred)\n mse=metrics.mean_squared_error(y_true, y_pred)\n # mean_squared_log_error=metrics.mean_squared_log_error(y_true, y_pred)\n # median_absolute_error=metrics.median_absolute_error(y_true, y_pred)\n r2=metrics.r2_score(y_true, y_pred)\n\n print('explained_variance: ', round(explained_variance,4))\n # print('mean_squared_log_error: ', round(mean_squared_log_error,4))\n print('r-squared (r2): ', round(r2,4))\n print('mean_absolute_error (MAE): ', round(mean_absolute_error,4))\n print('mean_squared_error (MSE): ', round(mse,4))\n print('root_mean_squared_error (RMSE): ', round(np.sqrt(mse),4))\n"
] | [
[
"sklearn.metrics.explained_variance_score",
"sklearn.metrics.r2_score",
"numpy.sqrt",
"numpy.isnan",
"sklearn.metrics.mean_absolute_error",
"sklearn.metrics.mean_squared_error",
"numpy.float64"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hansonmcoombs/flopy | [
"49398983c36d381992621d5bf698ea7f78fc0014",
"49398983c36d381992621d5bf698ea7f78fc0014"
] | [
"autotest/t024_test.py",
"flopy/modpath/mp6sim.py"
] | [
"import os\n\nimport numpy as np\nimport pytest\nfrom ci_framework import FlopyTestSetup, base_test_dir\n\nimport flopy\n\nbase_dir = base_test_dir(__file__, rel_path=\"temp\", verbose=True)\n\nex_pth = os.path.join(\"..\", \"examples\", \"data\", \"mf2005_test\")\ntestmodels = [\n os.path.join(ex_pth, f) for f in os.listdir(ex_pth) if f.endswith(\".nam\")\n]\n\n\[email protected](\n \"namfile\",\n testmodels,\n)\ndef test_checker_on_load(namfile):\n # load all of the models in the mf2005_test folder\n # model level checks are performed by default on load()\n checker_on_load(namfile)\n\n\ndef checker_on_load(mfnam):\n f = os.path.basename(mfnam)\n d = os.path.dirname(mfnam)\n m = flopy.modflow.Modflow.load(f, model_ws=d)\n assert isinstance(\n m, flopy.modflow.Modflow\n ), \"Not a flopy.modflow.Modflow instance\"\n\n\ndef test_bcs_check():\n model_ws = f\"{base_dir}_test_bcs_check\"\n test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)\n\n mf = flopy.modflow.Modflow(version=\"mf2005\", model_ws=model_ws)\n\n # test check for isolated cells\n dis = flopy.modflow.ModflowDis(\n mf, nlay=2, nrow=3, ncol=3, top=100, botm=95\n )\n bas = flopy.modflow.ModflowBas(mf, ibound=np.ones((2, 3, 3), dtype=int))\n chk = bas.check()\n\n dis = flopy.modflow.ModflowDis(\n mf, nlay=3, nrow=5, ncol=5, top=100, botm=95\n )\n ibound = np.zeros((3, 5, 5), dtype=int)\n ibound[1, 1, 1] = 1 # fully isolated cell\n ibound[0:2, 4, 4] = 1 # cell connected vertically to one other cell\n bas = flopy.modflow.ModflowBas(mf, ibound=ibound)\n mf._mg_resync = True\n chk = bas.check()\n assert chk.summary_array[\"desc\"][0] == \"isolated cells in ibound array\"\n assert (\n chk.summary_array.i[0] == 1\n and chk.summary_array.i[0] == 1\n and chk.summary_array.j[0] == 1\n )\n assert len(chk.summary_array) == 1\n\n ghb = flopy.modflow.ModflowGhb(\n mf, stress_period_data={0: [0, 0, 0, 100, 1]}\n )\n riv = flopy.modflow.ModflowRiv(\n mf,\n stress_period_data={\n 0: [[0, 0, 0, 101, 10, 100], [0, 0, 1, 80, 10, 90]]\n },\n )\n chk = ghb.check()\n assert chk.summary_array[\"desc\"][0] == \"BC in inactive cell\"\n chk = riv.check()\n assert chk.summary_array[\"desc\"][4] == \"RIV stage below rbots\"\n assert np.array_equal(chk.summary_array[\"j\"], np.array([0, 1, 1, 1, 1]))\n\n\ndef test_properties_check():\n # test that storage values ignored for steady state\n model_ws = f\"{base_dir}_test_properties_check\"\n test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)\n\n mf = flopy.modflow.Modflow(\n version=\"mf2005\",\n model_ws=model_ws,\n )\n dis = flopy.modflow.ModflowDis(\n mf,\n nrow=2,\n ncol=2,\n top=np.array([[100, np.nan], [100, 100]]),\n nper=3,\n steady=True,\n )\n chk = dis.check()\n assert len(chk.summary_array) == 1\n kij = (\n chk.summary_array[\"k\"][0],\n chk.summary_array[\"i\"][0],\n chk.summary_array[\"j\"][0],\n )\n assert kij == (0, 0, 1)\n lpf = flopy.modflow.ModflowLpf(mf, sy=np.ones((2, 2)), ss=np.ones((2, 2)))\n chk = lpf.check()\n assert len(chk.summary_array) == 0\n\n # test k values check\n lpf = flopy.modflow.ModflowLpf(\n mf,\n hk=np.array([[1, 1e10], [1, -1]]),\n hani=np.array([[1, 1], [1, -1]]),\n vka=np.array([[1e10, 0], [1, 1e-20]]),\n )\n chk = lpf.check()\n ind1 = np.array(\n [\n True if list(inds) == [0, 1, 1] else False\n for inds in chk.view_summary_array_fields([\"k\", \"i\", \"j\"])\n ]\n )\n ind1_errors = chk.summary_array[ind1][\"desc\"]\n ind2 = np.array(\n [\n True if list(inds) == [0, 0, 1] else False\n for inds in chk.view_summary_array_fields([\"k\", \"i\", \"j\"])\n ]\n )\n ind2_errors = chk.summary_array[ind2][\"desc\"]\n ind3 = np.array(\n [\n True if list(inds) == [0, 0, 0] else False\n for inds in chk.view_summary_array_fields([\"k\", \"i\", \"j\"])\n ]\n )\n ind3_errors = chk.summary_array[ind3][\"desc\"]\n\n assert (\n \"zero or negative horizontal hydraulic conductivity values\"\n in ind1_errors\n )\n assert (\n \"horizontal hydraulic conductivity values below checker threshold of 1e-11\"\n in ind1_errors\n )\n assert \"negative horizontal anisotropy values\" in ind1_errors\n assert (\n \"vertical hydraulic conductivity values below checker threshold of 1e-11\"\n in ind1_errors\n )\n assert (\n \"horizontal hydraulic conductivity values above checker threshold of 100000.0\"\n in ind2_errors\n )\n assert (\n \"zero or negative vertical hydraulic conductivity values\"\n in ind2_errors\n )\n assert (\n \"vertical hydraulic conductivity values above checker threshold of 100000.0\"\n in ind3_errors\n )\n\n\ndef test_oc_check():\n m = flopy.modflow.Modflow()\n oc = flopy.modflow.mfoc.ModflowOc(m)\n chk = oc.check()\n assert len(chk.summary_array) == 1, len(chk.summary_array)\n assert \"DIS package not available\" in chk.summary_array[0][\"desc\"]\n\n flopy.modflow.ModflowDis(m)\n oc.stress_period_data = {(0, 0): [\"save head\", \"save budget\"]}\n chk = oc.check() # check passsed\n assert len(chk.summary_array) == 0, len(chk.summary_array)\n\n oc.stress_period_data = {(0, 0): [\"save\"]}\n chk = oc.check()\n assert len(chk.summary_array) == 1, len(chk.summary_array)\n assert \"too few words\" in chk.summary_array[0][\"desc\"]\n\n oc.stress_period_data = {(0, 0): [\"save it\"]}\n chk = oc.check()\n assert len(chk.summary_array) == 1, len(chk.summary_array)\n assert \"action 'save it' ignored\" in chk.summary_array[0][\"desc\"]\n\n oc.stress_period_data = {(1, 1): [\"save head\", \"save budget\"]}\n chk = oc.check()\n assert len(chk.summary_array) == 1, len(chk.summary_array)\n assert \"OC stress_period_data ignored\" in chk.summary_array[0][\"desc\"]\n\n\nif __name__ == \"__main__\":\n print(f\"numpy version: {np.__version__}\")\n for mfnam in testmodels:\n checker_on_load(mfnam)\n test_bcs_check()\n test_properties_check()\n test_oc_check()\n",
"\"\"\"\nmpsim module. Contains the ModpathSim class. Note that the user can access\nthe ModpathSim class as `flopy.modpath.ModpathSim`.\n\nAdditional information for this MODFLOW/MODPATH package can be found at the `Online\nMODFLOW Guide\n<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?dis.htm>`_.\n\n\"\"\"\nimport numpy as np\n\nfrom ..pakbase import Package\nfrom ..utils import Util3d, import_optional_dependency\n\npd = import_optional_dependency(\n \"pandas\",\n error_message=\"writing particles is more effcient with pandas\",\n errors=\"ignore\",\n)\n\n\nclass Modpath6Sim(Package):\n \"\"\"\n MODPATH Simulation File Package Class.\n\n Parameters\n ----------\n model : model object\n The model object (of type :class:`flopy.modpath.mp.Modpath`) to which\n this package will be added.\n extension : string\n Filename extension (default is 'mpsim')\n\n\n Attributes\n ----------\n heading : str\n Text string written to top of package input file.\n\n Methods\n -------\n\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n\n >>> import flopy\n >>> m = flopy.modpath.Modpath6()\n >>> dis = flopy.modpath.Modpath6Sim(m)\n\n \"\"\"\n\n def __init__(\n self,\n model,\n mp_name_file=\"mp.nam\",\n mp_list_file=\"mp.list\",\n option_flags=[1, 2, 1, 1, 1, 2, 2, 1, 2, 1, 1, 1],\n ref_time=0,\n ref_time_per_stp=[0, 0, 1.0],\n stop_time=None,\n group_name=[\"group_1\"],\n group_placement=[[1, 1, 1, 0, 1, 1]],\n release_times=[[1, 1]],\n group_region=[[1, 1, 1, 1, 1, 1]],\n mask_nlay=[1],\n mask_layer=[1],\n mask_1lay=[1],\n face_ct=[1],\n ifaces=[[6, 1, 1]],\n part_ct=[[1, 1, 1]],\n time_ct=1,\n release_time_incr=1,\n time_pts=[1],\n particle_cell_cnt=[[2, 2, 2]],\n cell_bd_ct=1,\n bud_loc=[[1, 1, 1, 1]],\n trace_id=1,\n stop_zone=1,\n zone=1,\n retard_fac=1.0,\n retard_fcCB=1.0,\n strt_file=None,\n extension=\"mpsim\",\n ):\n\n # call base package constructor\n super().__init__(model, extension, \"MPSIM\", 32)\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\n\n self.heading1 = \"# MPSIM for Modpath, generated by Flopy.\"\n self.heading2 = \"#\"\n self.mp_name_file = f\"{model.name}.mpnam\"\n self.mp_list_file = f\"{model.name}.mplst\"\n options_list = [\n \"SimulationType\",\n \"TrackingDirection\",\n \"WeakSinkOption\",\n \"WeakSourceOption\",\n \"ReferenceTimeOption\",\n \"StopOption\",\n \"ParticleGenerationOption\",\n \"TimePointOption\",\n \"BudgetOutputOption\",\n \"ZoneArrayOption\",\n \"RetardationOption\",\n \"AdvectiveObservationsOption\",\n ]\n self.option_flags = option_flags\n options_dict = dict(list(zip(options_list, option_flags)))\n self.options_dict = options_dict\n self.endpoint_file = f\"{model.name}.mpend\"\n self.pathline_file = f\"{model.name}.mppth\"\n self.time_ser_file = f\"{model.name}.mp.tim_ser\"\n self.advobs_file = f\"{model.name}.mp.advobs\"\n self.ref_time = ref_time\n self.ref_time_per_stp = ref_time_per_stp\n self.stop_time = stop_time\n self.group_ct = len(group_name)\n self.group_name = group_name\n self.group_placement = group_placement\n self.release_times = release_times\n self.group_region = group_region\n self.mask_nlay = mask_nlay\n self.mask_layer = mask_layer\n self.mask_1lay = mask_1lay\n self.face_ct = face_ct\n self.ifaces = ifaces\n self.part_ct = part_ct\n self.strt_file = f\"{model.name}.loc\"\n if strt_file is not None:\n self.strt_file = strt_file\n self.time_ct = time_ct\n self.release_time_incr = release_time_incr\n self.time_pts = time_pts\n self.particle_cell_cnt = particle_cell_cnt\n self.cell_bd_ct = cell_bd_ct\n self.bud_loc = bud_loc\n self.trace_file = f\"{model.name}.trace_file.txt\"\n self.trace_id = trace_id\n self.stop_zone = stop_zone\n self.zone = Util3d(\n model,\n (nlay, nrow, ncol),\n np.int32,\n zone,\n name=\"zone\",\n locat=self.unit_number[0],\n )\n self.retard_fac = retard_fac\n self.retard_fcCB = retard_fcCB\n\n # self.mask_nlay = Util3d(model,(nlay,nrow,ncol),np.int32,\\\n # mask_nlay,name='mask_nlay',locat=self.unit_number[0])\n # self.mask_1lay = Util3d(model,(nlay,nrow,ncol),np.int32,\\\n # mask_1lay,name='mask_1lay',locat=self.unit_number[0])\n # self.stop_zone = Util3d(model,(nlay,nrow,ncol),np.int32,\\\n # stop_zone,name='stop_zone',locat=self.unit_number[0])\n # self.retard_fac = Util3d(model,(nlay,nrow,ncol),np.float32,\\\n # retard_fac,name='retard_fac',locat=self.unit_number[0])\n # self.retard_fcCB = Util3d(model,(nlay,nrow,ncol),np.float32,\\\n # retard_fcCB,name='retard_fcCB',locat=self.unit_number[0])\n\n self.parent.add_package(self)\n\n def check(self, f=None, verbose=True, level=1, checktype=None):\n \"\"\"\n Check package data for common errors.\n\n Parameters\n ----------\n f : str or file handle\n String defining file name or file handle for summary file\n of check method output. If a sting is passed a file handle\n is created. If f is None, check method does not write\n results to a summary file. (default is None)\n verbose : bool\n Boolean flag used to determine if check method results are\n written to the screen\n level : int\n Check method analysis level. If level=0, summary checks are\n performed. If level=1, full checks are performed.\n\n Returns\n -------\n None\n\n Examples\n --------\n \"\"\"\n chk = self._get_check(f, verbose, level, checktype)\n\n # MODPATH apparently produces no output if stoptime > last timepoint\n if (\n self.options_dict[\"StopOption\"] == 3\n and self.options_dict[\"TimePointOption\"] == 3\n ):\n if self.time_pts[-1] < self.stop_time:\n chk._add_to_summary(\n type=\"Error\",\n value=self.stop_time,\n desc=\"Stop time greater than last TimePoint\",\n )\n else:\n chk.append_passed(\"Valid stop time\")\n chk.summarize()\n return chk\n\n def write_file(self):\n \"\"\"\n Write the package file\n\n Returns\n -------\n None\n\n \"\"\"\n # item numbers and CamelCase variable names correspond to Modpath 6 documentation\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\n\n f_sim = open(self.fn_path, \"w\")\n # item 0\n f_sim.write(f\"#{self.heading1}\\n#{self.heading2}\\n\")\n # item 1\n f_sim.write(f\"{self.mp_name_file}\\n\")\n # item 2\n f_sim.write(f\"{self.mp_list_file}\\n\")\n # item 3\n for i in range(12):\n f_sim.write(f\"{self.option_flags[i]:4d}\")\n f_sim.write(\"\\n\")\n\n # item 4\n f_sim.write(f\"{self.endpoint_file}\\n\")\n # item 5\n if self.options_dict[\"SimulationType\"] == 2:\n f_sim.write(f\"{self.pathline_file}\\n\")\n # item 6\n if self.options_dict[\"SimulationType\"] == 3:\n f_sim.write(f\"{self.time_ser_file}\\n\")\n # item 7\n if (\n self.options_dict[\"AdvectiveObservationsOption\"] == 2\n and self.option_dict[\"SimulationType\"] == 3\n ):\n f_sim.write(f\"{self.advobs_file}\\n\")\n\n # item 8\n if self.options_dict[\"ReferenceTimeOption\"] == 1:\n f_sim.write(f\"{self.ref_time:f}\\n\")\n # item 9\n if self.options_dict[\"ReferenceTimeOption\"] == 2:\n Period, Step, TimeFraction = self.ref_time_per_stp\n f_sim.write(f\"{Period + 1} {Step + 1} {TimeFraction:f}\\n\")\n\n # item 10\n if self.options_dict[\"StopOption\"] == 3:\n f_sim.write(f\"{self.stop_time:f}\\n\")\n\n if self.options_dict[\"ParticleGenerationOption\"] == 1:\n # item 11\n f_sim.write(f\"{self.group_ct}\\n\")\n for i in range(self.group_ct):\n # item 12\n f_sim.write(f\"{self.group_name[i]}\\n\")\n # item 13\n (\n Grid,\n GridCellRegionOption,\n PlacementOption,\n ReleaseStartTime,\n ReleaseOption,\n CHeadOption,\n ) = self.group_placement[i]\n f_sim.write(\n \"{0:d} {1:d} {2:d} {3:f} {4:d} {5:d}\\n\".format(\n Grid,\n GridCellRegionOption,\n PlacementOption,\n ReleaseStartTime,\n ReleaseOption,\n CHeadOption,\n )\n )\n # item 14\n if ReleaseOption == 2:\n (\n ReleasePeriodLength,\n ReleaseEventCount,\n ) = self.release_times[i]\n f_sim.write(\n f\"{ReleasePeriodLength:f} {ReleaseEventCount}\\n\"\n )\n # item 15\n if GridCellRegionOption == 1:\n (\n MinLayer,\n MinRow,\n MinColumn,\n MaxLayer,\n MaxRow,\n MaxColumn,\n ) = self.group_region[i]\n f_sim.write(\n \"{0:d} {1:d} {2:d} {3:d} {4:d} {5:d}\\n\".format(\n MinLayer + 1,\n MinRow + 1,\n MinColumn + 1,\n MaxLayer + 1,\n MaxRow + 1,\n MaxColumn + 1,\n )\n )\n # item 16\n if GridCellRegionOption == 2:\n f_sim.write(self.mask_nlay[i].get_file_entry())\n # item 17\n if GridCellRegionOption == 3:\n f_sim.write(f\"{self.mask_layer[i]}\\n\")\n # item 18\n f_sim.write(self.mask_1lay[i].get_file_entry())\n # item 19 and 20\n if PlacementOption == 1:\n f_sim.write(f\"{self.face_ct[i]}\\n\")\n # item 20\n for j in range(self.face_ct[i]):\n (\n IFace,\n ParticleRowCount,\n ParticleColumnCount,\n ) = self.ifaces[i][j]\n f_sim.write(\n f\"{IFace} {ParticleRowCount} {ParticleColumnCount}\\n\"\n )\n # item 21\n elif PlacementOption == 2:\n (\n ParticleLayerCount,\n ParticleRowCount,\n ParticleColumnCount,\n ) = self.particle_cell_cnt[i]\n f_sim.write(\n \"{0:d} {1:d} {2:d} \\n\".format(\n ParticleLayerCount,\n ParticleRowCount,\n ParticleColumnCount,\n )\n )\n\n # item 22\n if self.options_dict[\"ParticleGenerationOption\"] == 2:\n f_sim.write(f\"{self.strt_file}\\n\")\n\n if self.options_dict[\"TimePointOption\"] != 1:\n # item 23\n if (\n self.options_dict[\"TimePointOption\"] == 2\n or self.options_dict[\"TimePointOption\"] == 3\n ):\n f_sim.write(f\"{self.time_ct}\\n\")\n # item 24\n if self.options_dict[\"TimePointOption\"] == 2:\n f_sim.write(f\"{self.release_time_incr:f}\\n\")\n # item 25\n if self.options_dict[\"TimePointOption\"] == 3:\n for r in range(self.time_ct):\n f_sim.write(f\"{self.time_pts[r]:f}\\n\")\n\n if (\n self.options_dict[\"BudgetOutputOption\"] != 1\n or self.options_dict[\"BudgetOutputOption\"] != 2\n ):\n # item 26\n if self.options_dict[\"BudgetOutputOption\"] == 3:\n f_sim.write(f\"{self.cell_bd_ct}\\n\")\n # item 27\n for k in range(self.cell_bd_ct):\n Grid, Layer, Row, Column = self.bud_loc[k]\n f_sim.write(\n f\"{Grid} {Layer + 1} {Row + 1} {Column + 1} \\n\"\n )\n if self.options_dict[\"BudgetOutputOption\"] == 4:\n # item 28\n f_sim.write(f\"{self.trace_file}\\n\")\n # item 29\n f_sim.write(f\"{self.trace_id}\\n\")\n\n if self.options_dict[\"ZoneArrayOption\"] != 1:\n # item 30\n f_sim.write(f\"{self.stop_zone}\\n\")\n # item 31\n f_sim.write(self.zone.get_file_entry())\n\n if self.options_dict[\"RetardationOption\"] != 1:\n # item 32\n f_sim.write(self.retard_fac.get_file_entry())\n # item 33\n f_sim.write(self.retard_fcCB.get_file_entry())\n\n f_sim.close()\n\n\nclass StartingLocationsFile(Package):\n \"\"\"\n Class for working with MODPATH Starting Locations file for particles.\n\n Parameters\n ----------\n model : Modpath object\n The model object (of type :class:`flopy.modpath.mp.Modpath`) to which\n this package will be added.\n inputstyle : 1\n Input style described in MODPATH6 manual (currently only input style 1 is supported)\n extension : string\n Filename extension (default is 'loc')\n\n use_pandas: bool, if True and pandas is available use pandas to write the particle locations >2x speed\n \"\"\"\n\n def __init__(\n self,\n model,\n inputstyle=1,\n extension=\"loc\",\n verbose=False,\n use_pandas=True,\n ):\n\n super().__init__(model, extension, \"LOC\", 33)\n\n self.model = model\n self.use_pandas = use_pandas\n self.heading = (\n \"# Starting locations file for Modpath, generated by Flopy.\"\n )\n self.input_style = inputstyle\n if inputstyle != 1:\n raise NotImplementedError\n self.data = self.get_empty_starting_locations_data(0)\n self.extension = extension\n\n # add to package list so location are written with other ModPath files\n self.parent.add_package(self)\n\n @staticmethod\n def get_dtypes():\n \"\"\"\n Build numpy dtype for the MODPATH 6 starting locations file.\n \"\"\"\n dtype = np.dtype(\n [\n (\"particleid\", int),\n (\"particlegroup\", int),\n (\"initialgrid\", int),\n (\"k0\", int),\n (\"i0\", int),\n (\"j0\", int),\n (\"xloc0\", np.float32),\n (\"yloc0\", np.float32),\n (\"zloc0\", np.float32),\n (\"initialtime\", np.float32),\n (\"label\", \"|S40\"),\n (\"groupname\", \"|S16\"),\n ]\n )\n return dtype\n\n @staticmethod\n def get_empty_starting_locations_data(\n npt=0, default_xloc0=0.5, default_yloc0=0.5, default_zloc0=0.0\n ):\n \"\"\"get an empty recarray for particle starting location info.\n\n Parameters\n ----------\n npt : int\n Number of particles. Particles in array will be numbered consecutively from 1 to npt.\n\n \"\"\"\n dtype = StartingLocationsFile.get_dtypes()\n d = np.zeros(npt, dtype=dtype)\n d = d.view(np.recarray)\n d[\"particleid\"] = np.arange(1, npt + 1)\n d[\"particlegroup\"] = 1\n d[\"initialgrid\"] = 1\n d[\"xloc0\"] = default_xloc0\n d[\"yloc0\"] = default_yloc0\n d[\"zloc0\"] = default_zloc0\n d[\"groupname\"] = \"group1\"\n return d\n\n def write_file(self, data=None, float_format=\"{:.8f}\"):\n\n if data is None:\n data = self.data\n if len(data) == 0:\n print(\"No data to write!\")\n return\n data = data.copy()\n data[\"k0\"] += 1\n data[\"i0\"] += 1\n data[\"j0\"] += 1\n if pd is not None and self.use_pandas and len(data) > 0:\n self._write_particle_data_with_pandas(data, float_format)\n else:\n self._write_wo_pandas(data, float_format)\n\n def _write_particle_data_with_pandas(self, data, float_format):\n \"\"\"\n write particle data with pandas, more than twice as efficient\n :param data: particle data, pd.Dataframe or numpy record array with keys:\n ['k0', 'i0', 'j0', 'groupname', 'particlegroup', 'xloc0', 'yloc0', 'zloc0',\n 'initialtime', 'label']\n :param save_group_mapper bool, if true, save a groupnumber to group name mapper as well.\n :return:\n \"\"\"\n # convert float format string to pandas float format\n float_format = (\n float_format.replace(\"{\", \"\").replace(\"}\", \"\").replace(\":\", \"%\")\n )\n data = pd.DataFrame(data)\n if len(data) == 0:\n return\n # check if byte strings and decode\n if isinstance(data.label.iloc[0], (bytes, bytearray)):\n data.loc[:, \"label\"] = data.label.str.decode(\"UTF-8\")\n if isinstance(data.groupname.iloc[0], (bytes, bytearray)):\n data.loc[:, \"groupname\"] = data.groupname.str.decode(\"UTF-8\")\n\n # write loc file with pandas to save time\n # simple speed test writing particles with flopy and running model took 30 min, writing with pandas took __min\n loc_path = self.fn_path\n # write groups\n group_dict = dict(\n data[[\"particlegroup\", \"groupname\"]].itertuples(False, None)\n )\n\n # writing group loc data\n groups = (\n data[[\"particlegroup\", \"groupname\"]]\n .groupby(\"particlegroup\")\n .count()\n .reset_index()\n .rename(columns={\"groupname\": \"count\"})\n )\n groups.loc[:, \"groupname\"] = groups.loc[:, \"particlegroup\"].replace(\n group_dict\n )\n group_count = len(groups.index)\n groups = pd.Series(\n groups[[\"groupname\", \"count\"]].astype(str).values.flatten()\n )\n with open(loc_path, \"w\") as f:\n f.write(\"{}\\n\".format(self.heading))\n f.write(\"{:d}\\n\".format(self.input_style))\n f.write(\"{}\\n\".format(group_count))\n\n groups.to_csv(loc_path, sep=\" \", index=False, header=False, mode=\"a\")\n\n # write particle data\n print(\"writing loc particle data\")\n data.drop(\"groupname\", 1, inplace=True)\n data.to_csv(\n loc_path,\n sep=\" \",\n header=False,\n index=False,\n mode=\"a\",\n float_format=float_format,\n )\n\n def _write_wo_pandas(self, data, float_format):\n with open(self.fn_path, \"w\") as output:\n output.write(f\"{self.heading}\\n\")\n output.write(f\"{self.input_style}\\n\")\n groups = np.unique(data.groupname)\n ngroups = len(groups)\n output.write(f\"{ngroups}\\n\")\n for g in groups:\n npt = len(data[data.groupname == g])\n output.write(f\"{g.decode()}\\n{npt}\\n\")\n txt = \"\"\n for p in data:\n txt += \"{:d} {:d} {:d} {:d} {:d} {:d}\".format(*list(p)[:6])\n fmtstr = \" {0} {0} {0} {0} \".format(float_format)\n txt += fmtstr.format(*list(p)[6:10])\n txt += f\"{p[10].decode()}\\n\"\n output.write(txt)\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.ones"
],
[
"numpy.arange",
"numpy.zeros",
"numpy.dtype",
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ggzhang0071/nni | [
"f4145e62d89c3ca383cf00f2de5dfd2d1025ad92",
"eaad98528c7aa714c9848800d607d6aa3bdd531d",
"eaad98528c7aa714c9848800d607d6aa3bdd531d"
] | [
"nni/retiarii/nn/pytorch/api.py",
"nni/compression/pytorch/utils/mask_conflict.py",
"nni/retiarii/oneshot/pytorch/random.py"
] | [
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport math\nimport operator\nimport warnings\nfrom typing import Any, List, Union, Dict, Optional, Callable, Iterable, NoReturn, TypeVar\n\nimport torch\nimport torch.nn as nn\n\nfrom nni.common.serializer import Translatable\nfrom nni.retiarii.serializer import basic_unit\nfrom nni.retiarii.utils import STATE_DICT_PY_MAPPING_PARTIAL\nfrom .utils import Mutable, generate_new_label, get_fixed_value\n\n\n__all__ = ['LayerChoice', 'InputChoice', 'ValueChoice', 'Placeholder', 'ChosenInputs']\n\n\nclass LayerChoice(Mutable):\n \"\"\"\n Layer choice selects one of the ``candidates``, then apply it on inputs and return results.\n\n Layer choice does not allow itself to be nested.\n\n Parameters\n ----------\n candidates : list of nn.Module or OrderedDict\n A module list to be selected from.\n prior : list of float\n Prior distribution used in random sampling.\n label : str\n Identifier of the layer choice.\n\n Attributes\n ----------\n length : int\n Deprecated. Number of ops to choose from. ``len(layer_choice)`` is recommended.\n names : list of str\n Names of candidates.\n choices : list of Module\n Deprecated. A list of all candidate modules in the layer choice module.\n ``list(layer_choice)`` is recommended, which will serve the same purpose.\n\n Notes\n -----\n ``candidates`` can be a list of modules or a ordered dict of named modules, for example,\n\n .. code-block:: python\n\n self.op_choice = LayerChoice(OrderedDict([\n (\"conv3x3\", nn.Conv2d(3, 16, 128)),\n (\"conv5x5\", nn.Conv2d(5, 16, 128)),\n (\"conv7x7\", nn.Conv2d(7, 16, 128))\n ]))\n\n Elements in layer choice can be modified or deleted. Use ``del self.op_choice[\"conv5x5\"]`` or\n ``self.op_choice[1] = nn.Conv3d(...)``. Adding more choices is not supported yet.\n \"\"\"\n\n # FIXME: prior is designed but not supported yet\n\n @classmethod\n def create_fixed_module(cls, candidates: Union[Dict[str, nn.Module], List[nn.Module]], *,\n label: Optional[str] = None, **kwargs):\n chosen = get_fixed_value(label)\n if isinstance(candidates, list):\n result = candidates[int(chosen)]\n else:\n result = candidates[chosen]\n\n # map the named hierarchies to support weight inheritance for python engine\n if hasattr(result, STATE_DICT_PY_MAPPING_PARTIAL):\n # handle cases where layer choices are nested\n # already has a mapping, will merge with it\n prev_mapping = getattr(result, STATE_DICT_PY_MAPPING_PARTIAL)\n setattr(result, STATE_DICT_PY_MAPPING_PARTIAL, {k: f'{chosen}.{v}' for k, v in prev_mapping.items()})\n else:\n # \"result\" needs to know where to map itself.\n # Ideally, we should put a _mapping_ in the module where \"result\" is located,\n # but it's impossible to put mapping into parent module here.\n setattr(result, STATE_DICT_PY_MAPPING_PARTIAL, {'__self__': str(chosen)})\n return result\n\n def __init__(self, candidates: Union[Dict[str, nn.Module], List[nn.Module]], *,\n prior: Optional[List[float]] = None, label: Optional[str] = None, **kwargs):\n super(LayerChoice, self).__init__()\n if 'key' in kwargs:\n warnings.warn(f'\"key\" is deprecated. Assuming label.')\n label = kwargs['key']\n if 'return_mask' in kwargs:\n warnings.warn(f'\"return_mask\" is deprecated. Ignoring...')\n if 'reduction' in kwargs:\n warnings.warn(f'\"reduction\" is deprecated. Ignoring...')\n self.candidates = candidates\n self.prior = prior or [1 / len(candidates) for _ in range(len(candidates))]\n assert abs(sum(self.prior) - 1) < 1e-5, 'Sum of prior distribution is not 1.'\n self._label = generate_new_label(label)\n\n self.names = []\n if isinstance(candidates, dict):\n for name, module in candidates.items():\n assert name not in [\"length\", \"reduction\", \"return_mask\", \"_key\", \"key\", \"names\"], \\\n \"Please don't use a reserved name '{}' for your module.\".format(name)\n self.add_module(name, module)\n self.names.append(name)\n elif isinstance(candidates, list):\n for i, module in enumerate(candidates):\n self.add_module(str(i), module)\n self.names.append(str(i))\n else:\n raise TypeError(\"Unsupported candidates type: {}\".format(type(candidates)))\n self._first_module = self._modules[self.names[0]] # to make the dummy forward meaningful\n\n @property\n def key(self):\n return self._key()\n\n @torch.jit.ignore\n def _key(self):\n warnings.warn('Using key to access the identifier of LayerChoice is deprecated. Please use label instead.',\n category=DeprecationWarning)\n return self._label\n\n @property\n def label(self):\n return self._label\n\n def __getitem__(self, idx):\n if isinstance(idx, str):\n return self._modules[idx]\n return list(self)[idx]\n\n def __setitem__(self, idx, module):\n key = idx if isinstance(idx, str) else self.names[idx]\n return setattr(self, key, module)\n\n def __delitem__(self, idx):\n if isinstance(idx, slice):\n for key in self.names[idx]:\n delattr(self, key)\n else:\n if isinstance(idx, str):\n key, idx = idx, self.names.index(idx)\n else:\n key = self.names[idx]\n delattr(self, key)\n del self.names[idx]\n\n def __len__(self):\n return len(self.names)\n\n def __iter__(self):\n return map(lambda name: self._modules[name], self.names)\n\n @property\n def choices(self):\n return self._choices()\n\n @torch.jit.ignore\n def _choices(self):\n warnings.warn(\"layer_choice.choices is deprecated. Use `list(layer_choice)` instead.\", category=DeprecationWarning)\n return list(self)\n\n def forward(self, x):\n warnings.warn('You should not run forward of this module directly.')\n return self._first_module(x)\n\n def __repr__(self):\n return f'LayerChoice({self.candidates}, label={repr(self.label)})'\n\n\ntry:\n from typing import Literal\nexcept ImportError:\n from typing_extensions import Literal\n\nReductionType = Literal['mean', 'concat', 'sum', 'none']\n\n\nclass InputChoice(Mutable):\n \"\"\"\n Input choice selects ``n_chosen`` inputs from ``choose_from`` (contains ``n_candidates`` keys).\n Use ``reduction`` to specify how chosen inputs are reduced into one output. A few options are:\n\n * ``none``: do nothing and return the list directly.\n * ``sum``: summing all the chosen inputs.\n * ``mean``: taking the average of all chosen inputs.\n * ``concat``: concatenate all chosen inputs at dimension 1.\n\n We don't support customizing reduction yet.\n\n Parameters\n ----------\n n_candidates : int\n Number of inputs to choose from. It is required.\n n_chosen : int\n Recommended inputs to choose. If None, mutator is instructed to select any.\n reduction : str\n ``mean``, ``concat``, ``sum`` or ``none``.\n prior : list of float\n Prior distribution used in random sampling.\n label : str\n Identifier of the input choice.\n \"\"\"\n\n @classmethod\n def create_fixed_module(cls, n_candidates: int, n_chosen: Optional[int] = 1,\n reduction: ReductionType = 'sum', *,\n prior: Optional[List[float]] = None, label: Optional[str] = None, **kwargs):\n return ChosenInputs(get_fixed_value(label), reduction=reduction)\n\n def __init__(self, n_candidates: int, n_chosen: Optional[int] = 1,\n reduction: str = 'sum', *,\n prior: Optional[List[float]] = None, label: Optional[str] = None, **kwargs):\n super(InputChoice, self).__init__()\n if 'key' in kwargs:\n warnings.warn(f'\"key\" is deprecated. Assuming label.')\n label = kwargs['key']\n if 'return_mask' in kwargs:\n warnings.warn(f'\"return_mask\" is deprecated. Ignoring...')\n if 'choose_from' in kwargs:\n warnings.warn(f'\"reduction\" is deprecated. Ignoring...')\n self.n_candidates = n_candidates\n self.n_chosen = n_chosen\n self.reduction = reduction\n self.prior = prior or [1 / n_candidates for _ in range(n_candidates)]\n assert self.reduction in ['mean', 'concat', 'sum', 'none']\n self._label = generate_new_label(label)\n\n @property\n def key(self):\n return self._key()\n\n @torch.jit.ignore\n def _key(self):\n warnings.warn('Using key to access the identifier of InputChoice is deprecated. Please use label instead.',\n category=DeprecationWarning)\n return self._label\n\n @property\n def label(self):\n return self._label\n\n def forward(self, candidate_inputs: List[torch.Tensor]) -> torch.Tensor:\n warnings.warn('You should not run forward of this module directly.')\n return candidate_inputs[0]\n\n def __repr__(self):\n return f'InputChoice(n_candidates={self.n_candidates}, n_chosen={self.n_chosen}, ' \\\n f'reduction={repr(self.reduction)}, label={repr(self.label)})'\n\n\nclass ChosenInputs(nn.Module):\n \"\"\"\n A module that chooses from a tensor list and outputs a reduced tensor.\n The already-chosen version of InputChoice.\n\n When forward, ``chosen`` will be used to select inputs from ``candidate_inputs``,\n and ``reduction`` will be used to choose from those inputs to form a tensor.\n\n Attributes\n ----------\n chosen : list of int\n Indices of chosen inputs.\n reduction : ``mean`` | ``concat`` | ``sum`` | ``none``\n How to reduce the inputs when multiple are selected.\n \"\"\"\n\n def __init__(self, chosen: Union[List[int], int], reduction: ReductionType):\n super().__init__()\n self.chosen = chosen if isinstance(chosen, list) else [chosen]\n self.reduction = reduction\n\n def forward(self, candidate_inputs):\n return self._tensor_reduction(self.reduction, [candidate_inputs[i] for i in self.chosen])\n\n def _tensor_reduction(self, reduction_type, tensor_list):\n if reduction_type == 'none':\n return tensor_list\n if not tensor_list:\n return None # empty. return None for now\n if len(tensor_list) == 1:\n return tensor_list[0]\n if reduction_type == 'sum':\n return sum(tensor_list)\n if reduction_type == 'mean':\n return sum(tensor_list) / len(tensor_list)\n if reduction_type == 'concat':\n return torch.cat(tensor_list, dim=1)\n raise ValueError(f'Unrecognized reduction policy: \"{reduction_type}\"')\n\n\n# the code in ValueChoice can be generated with this codegen\n# this is not done online because I want to have type-hint supports\n# $ python -c \"from nni.retiarii.nn.pytorch.api import _valuechoice_codegen; _valuechoice_codegen(_internal=True)\"\ndef _valuechoice_codegen(*, _internal: bool = False):\n if not _internal:\n raise RuntimeError(\"This method is set to be internal. Please don't use it directly.\")\n MAPPING = {\n # unary\n 'neg': '-', 'pos': '+', 'invert': '~',\n # binary\n 'add': '+', 'sub': '-', 'mul': '*', 'matmul': '@',\n 'truediv': '//', 'floordiv': '/', 'mod': '%',\n 'lshift': '<<', 'rshift': '>>',\n 'and': '&', 'xor': '^', 'or': '|',\n # no reflection\n 'lt': '<', 'le': '<=', 'eq': '==',\n 'ne': '!=', 'ge': '>=', 'gt': '>',\n # NOTE\n # Currently we don't support operators like __contains__ (b in a),\n # Might support them in future when we actually need them.\n }\n\n binary_template = \"\"\" def __{op}__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.{opt}, '{{}} {sym} {{}}', [self, other])\"\"\"\n\n binary_r_template = \"\"\" def __r{op}__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.{opt}, '{{}} {sym} {{}}', [other, self])\"\"\"\n\n unary_template = \"\"\" def __{op}__(self) -> 'ValueChoiceX':\n return ValueChoiceX(operator.{op}, '{sym}{{}}', [self])\"\"\"\n\n for op, sym in MAPPING.items():\n if op in ['neg', 'pos', 'invert']:\n print(unary_template.format(op=op, sym=sym) + '\\n')\n else:\n opt = op + '_' if op in ['and', 'or'] else op\n print(binary_template.format(op=op, opt=opt, sym=sym) + '\\n')\n if op not in ['lt', 'le', 'eq', 'ne', 'ge', 'gt']:\n print(binary_r_template.format(op=op, opt=opt, sym=sym) + '\\n')\n\n\ndef _valuechoice_staticmethod_helper(orig_func):\n orig_func.__doc__ += \"\"\"\n Notes\n -----\n This function performs lazy evaluation.\n Only the expression will be recorded when the function is called.\n The real evaluation happens when the inner value choice has determined its final decision.\n If no value choice is contained in the parameter list, the evaluation will be intermediate.\"\"\"\n return orig_func\n\n\nclass ValueChoiceX(Translatable):\n \"\"\"Internal API. Implementation note:\n\n The transformed (X) version of value choice.\n It can be the result of composition (transformation) of one or several value choices. For example,\n\n .. code-block:: python\n\n nn.ValueChoice([1, 2]) + nn.ValueChoice([3, 4]) + 5\n\n The instance of base class cannot be created directly. Instead, they should be only the result of transformation of value choice.\n Therefore, there is no need to implement ``create_fixed_module`` in this class, because,\n 1. For python-engine, value choice itself has create fixed module. Consequently, the transformation is born to be fixed.\n 2. For graph-engine, it uses evaluate to calculate the result.\n\n Potentially, we have to implement the evaluation logic in oneshot algorithms. I believe we can postpone the discussion till then.\n \"\"\"\n\n def __init__(self, function: Callable[..., Any], repr_template: str, arguments: List[Any], dry_run: bool = True):\n super().__init__()\n\n if function is None:\n # this case is a hack for ValueChoice subclass\n # it will reach here only because ``__init__`` in ``nn.Module`` is useful.\n return\n\n self.function = function\n self.repr_template = repr_template\n self.arguments = arguments\n\n assert any(isinstance(arg, ValueChoiceX) for arg in self.arguments)\n\n if dry_run:\n # for sanity check\n self.dry_run()\n\n def inner_choices(self) -> Iterable['ValueChoice']:\n \"\"\"\n Return an iterable of all leaf value choices.\n Useful for composition of value choices.\n No deduplication on labels. Mutators should take care.\n \"\"\"\n for arg in self.arguments:\n if isinstance(arg, ValueChoiceX):\n yield from arg.inner_choices()\n\n def dry_run(self) -> Any:\n \"\"\"\n Dry run the value choice to get one of its possible evaluation results.\n \"\"\"\n # values are not used\n return self._evaluate(iter([]), True)\n\n def evaluate(self, values: Iterable[Any]) -> Any:\n \"\"\"\n Evaluate the result of this group.\n ``values`` should in the same order of ``inner_choices()``.\n \"\"\"\n return self._evaluate(iter(values), False)\n\n def _evaluate(self, values: Iterable[Any], dry_run: bool = False) -> Any:\n # \"values\" iterates in the recursion\n eval_args = []\n for arg in self.arguments:\n if isinstance(arg, ValueChoiceX):\n # recursive evaluation\n eval_args.append(arg._evaluate(values, dry_run))\n # the recursion will stop when it hits a leaf node (value choice)\n # the implementation is in `ValueChoice`\n else:\n # constant value\n eval_args.append(arg)\n return self.function(*eval_args)\n\n def _translate(self):\n \"\"\"\n Try to behave like one of its candidates when used in ``basic_unit``.\n \"\"\"\n return self.dry_run()\n\n def __repr__(self):\n reprs = []\n for arg in self.arguments:\n if isinstance(arg, ValueChoiceX) and not isinstance(arg, ValueChoice):\n reprs.append('(' + repr(arg) + ')') # add parenthesis for operator priority\n else:\n reprs.append(repr(arg))\n return self.repr_template.format(*reprs)\n\n # the following are a series of methods to create \"ValueChoiceX\"\n # which is a transformed version of value choice\n # https://docs.python.org/3/reference/datamodel.html#special-method-names\n\n # Special operators that can be useful in place of built-in conditional operators.\n @staticmethod\n @_valuechoice_staticmethod_helper\n def to_int(obj: 'ValueChoiceOrAny') -> Union['ValueChoiceX', int]:\n \"\"\"\n Convert a ``ValueChoice`` to an integer.\n \"\"\"\n if isinstance(obj, ValueChoiceX):\n return ValueChoiceX(int, 'int({})', [obj])\n return int(obj)\n\n @staticmethod\n @_valuechoice_staticmethod_helper\n def to_float(obj: 'ValueChoiceOrAny') -> Union['ValueChoiceX', float]:\n \"\"\"\n Convert a ``ValueChoice`` to a float.\n \"\"\"\n if isinstance(obj, ValueChoiceX):\n return ValueChoiceX(float, 'float({})', [obj])\n return float(obj)\n\n @staticmethod\n @_valuechoice_staticmethod_helper\n def condition(pred: 'ValueChoiceOrAny',\n true: 'ValueChoiceOrAny',\n false: 'ValueChoiceOrAny') -> 'ValueChoiceOrAny':\n \"\"\"\n Return ``true`` if the predicate ``pred`` is true else ``false``.\n\n Examples\n --------\n >>> ValueChoice.condition(ValueChoice([1, 2]) > ValueChoice([0, 3]), 2, 1)\n \"\"\"\n if any(isinstance(obj, ValueChoiceX) for obj in [pred, true, false]):\n return ValueChoiceX(lambda t, c, f: t if c else f, '{} if {} else {}', [true, pred, false])\n return true if pred else false\n\n @staticmethod\n @_valuechoice_staticmethod_helper\n def max(arg0: Union[Iterable['ValueChoiceOrAny'], 'ValueChoiceOrAny'],\n *args: List['ValueChoiceOrAny']) -> 'ValueChoiceOrAny':\n \"\"\"\n Returns the maximum value from a list of value choices.\n The usage should be similar to Python's built-in value choices,\n where the parameters could be an iterable, or at least two arguments.\n \"\"\"\n if not args:\n return ValueChoiceX.max(*list(arg0))\n lst = [arg0] + list(args)\n if any(isinstance(obj, ValueChoiceX) for obj in lst):\n return ValueChoiceX(max, 'max({})', lst)\n return max(lst)\n\n @staticmethod\n @_valuechoice_staticmethod_helper\n def min(arg0: Union[Iterable['ValueChoiceOrAny'], 'ValueChoiceOrAny'],\n *args: List['ValueChoiceOrAny']) -> 'ValueChoiceOrAny':\n \"\"\"\n Returns the minunum value from a list of value choices.\n The usage should be similar to Python's built-in value choices,\n where the parameters could be an iterable, or at least two arguments.\n \"\"\"\n if not args:\n return ValueChoiceX.min(*list(arg0))\n lst = [arg0] + list(args)\n if any(isinstance(obj, ValueChoiceX) for obj in lst):\n return ValueChoiceX(min, 'min({})', lst)\n return min(lst)\n\n def __hash__(self):\n # this is required because we have implemented ``__eq__``\n return id(self)\n\n # NOTE:\n # Write operations are not supported. Reasons follow:\n # - Semantics are not clear. It can be applied to \"all\" the inner candidates, or only the chosen one.\n # - Implementation effort is too huge.\n # As a result, inplace operators like +=, *=, magic methods like `__getattr__` are not included in this list.\n\n def __getitem__(self, key: Any) -> 'ValueChoiceX':\n return ValueChoiceX(lambda x, y: x[y], '{}[{}]', [self, key])\n\n # region implement int, float, round, trunc, floor, ceil\n # because I believe sometimes we need them to calculate #channels\n # `__int__` and `__float__` are not supported because `__int__` is required to return int.\n def __round__(self, ndigits: Optional[Any] = None) -> 'ValueChoiceX':\n if ndigits is not None:\n return ValueChoiceX(round, 'round({}, {})', [self, ndigits])\n return ValueChoiceX(round, 'round({})', [self])\n\n def __trunc__(self) -> 'ValueChoiceX':\n raise RuntimeError(\"Try to use `ValueChoice.to_int()` instead of `math.trunc()` on value choices.\")\n\n def __floor__(self) -> 'ValueChoiceX':\n return ValueChoiceX(math.floor, 'math.floor({})', [self])\n\n def __ceil__(self) -> 'ValueChoiceX':\n return ValueChoiceX(math.ceil, 'math.ceil({})', [self])\n\n def __index__(self) -> NoReturn:\n # https://docs.python.org/3/reference/datamodel.html#object.__index__\n raise RuntimeError(\"`__index__` is not allowed on ValueChoice, which means you can't \"\n \"use int(), float(), complex(), range() on a ValueChoice.\")\n\n def __bool__(self) -> NoReturn:\n raise RuntimeError('Cannot use bool() on ValueChoice. That means, using ValueChoice in a if-clause is illegal. '\n 'Please try methods like `ValueChoice.max(a, b)` to see whether that meets your needs.')\n # endregion\n\n # region the following code is generated with codegen (see above)\n # Annotated with \"region\" because I want to collapse them in vscode\n def __neg__(self) -> 'ValueChoiceX':\n return ValueChoiceX(operator.neg, '-{}', [self])\n\n def __pos__(self) -> 'ValueChoiceX':\n return ValueChoiceX(operator.pos, '+{}', [self])\n\n def __invert__(self) -> 'ValueChoiceX':\n return ValueChoiceX(operator.invert, '~{}', [self])\n\n def __add__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.add, '{} + {}', [self, other])\n\n def __radd__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.add, '{} + {}', [other, self])\n\n def __sub__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.sub, '{} - {}', [self, other])\n\n def __rsub__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.sub, '{} - {}', [other, self])\n\n def __mul__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.mul, '{} * {}', [self, other])\n\n def __rmul__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.mul, '{} * {}', [other, self])\n\n def __matmul__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.matmul, '{} @ {}', [self, other])\n\n def __rmatmul__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.matmul, '{} @ {}', [other, self])\n\n def __truediv__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.truediv, '{} // {}', [self, other])\n\n def __rtruediv__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.truediv, '{} // {}', [other, self])\n\n def __floordiv__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.floordiv, '{} / {}', [self, other])\n\n def __rfloordiv__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.floordiv, '{} / {}', [other, self])\n\n def __mod__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.mod, '{} % {}', [self, other])\n\n def __rmod__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.mod, '{} % {}', [other, self])\n\n def __lshift__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.lshift, '{} << {}', [self, other])\n\n def __rlshift__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.lshift, '{} << {}', [other, self])\n\n def __rshift__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.rshift, '{} >> {}', [self, other])\n\n def __rrshift__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.rshift, '{} >> {}', [other, self])\n\n def __and__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.and_, '{} & {}', [self, other])\n\n def __rand__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.and_, '{} & {}', [other, self])\n\n def __xor__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.xor, '{} ^ {}', [self, other])\n\n def __rxor__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.xor, '{} ^ {}', [other, self])\n\n def __or__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.or_, '{} | {}', [self, other])\n\n def __ror__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.or_, '{} | {}', [other, self])\n\n def __lt__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.lt, '{} < {}', [self, other])\n\n def __le__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.le, '{} <= {}', [self, other])\n\n def __eq__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.eq, '{} == {}', [self, other])\n\n def __ne__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.ne, '{} != {}', [self, other])\n\n def __ge__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.ge, '{} >= {}', [self, other])\n\n def __gt__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(operator.gt, '{} > {}', [self, other])\n # endregion\n\n # __pow__, __divmod__, __abs__ are special ones.\n # Not easy to cover those cases with codegen.\n def __pow__(self, other: Any, modulo: Optional[Any] = None) -> 'ValueChoiceX':\n if modulo is not None:\n return ValueChoiceX(pow, 'pow({}, {}, {})', [self, other, modulo])\n return ValueChoiceX(lambda a, b: a ** b, '{} ** {}', [self, other])\n\n def __rpow__(self, other: Any, modulo: Optional[Any] = None) -> 'ValueChoiceX':\n if modulo is not None:\n return ValueChoiceX(pow, 'pow({}, {}, {})', [other, self, modulo])\n return ValueChoiceX(lambda a, b: a ** b, '{} ** {}', [other, self])\n\n def __divmod__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(divmod, 'divmod({}, {})', [self, other])\n\n def __rdivmod__(self, other: Any) -> 'ValueChoiceX':\n return ValueChoiceX(divmod, 'divmod({}, {})', [other, self])\n\n def __abs__(self) -> 'ValueChoiceX':\n return ValueChoiceX(abs, 'abs({})', [self])\n\n\nValueChoiceOrAny = TypeVar('ValueChoiceOrAny', ValueChoiceX, Any)\n\n\nclass ValueChoice(ValueChoiceX, Mutable):\n \"\"\"\n ValueChoice is to choose one from ``candidates``.\n\n In most use scenarios, ValueChoice should be passed to the init parameters of a serializable module. For example,\n\n .. code-block:: python\n\n class Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = nn.Conv2d(3, nn.ValueChoice([32, 64]), kernel_size=nn.ValueChoice([3, 5, 7]))\n\n def forward(self, x):\n return self.conv(x)\n\n In case, you want to search a parameter that is used repeatedly, this is also possible by sharing the same value choice instance.\n (Sharing the label should have the same effect.) For example,\n\n .. code-block:: python\n\n class Net(nn.Module):\n def __init__(self):\n super().__init__()\n hidden_dim = nn.ValueChoice([128, 512])\n self.fc = nn.Sequential(\n nn.Linear(64, hidden_dim),\n nn.Linear(hidden_dim, 10)\n )\n\n # the following code has the same effect.\n # self.fc = nn.Sequential(\n # nn.Linear(64, nn.ValueChoice([128, 512], label='dim')),\n # nn.Linear(nn.ValueChoice([128, 512], label='dim'), 10)\n # )\n\n def forward(self, x):\n return self.fc(x)\n\n Note that ValueChoice should be used directly. Transformations like ``nn.Linear(32, nn.ValueChoice([64, 128]) * 2)``\n are not supported.\n\n Another common use case is to initialize the values to choose from in init and call the module in forward to get the chosen value.\n Usually, this is used to pass a mutable value to a functional API like ``torch.xxx`` or ``nn.functional.xxx```.\n For example,\n\n .. code-block:: python\n\n class Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.dropout_rate = nn.ValueChoice([0., 1.])\n\n def forward(self, x):\n return F.dropout(x, self.dropout_rate())\n\n Parameters\n ----------\n candidates : list\n List of values to choose from.\n prior : list of float\n Prior distribution to sample from.\n label : str\n Identifier of the value choice.\n \"\"\"\n\n # FIXME: prior is designed but not supported yet\n\n @classmethod\n def create_fixed_module(cls, candidates: List[Any], *, label: Optional[str] = None, **kwargs):\n value = get_fixed_value(label)\n if value not in candidates:\n raise ValueError(f'Value {value} does not belong to the candidates: {candidates}.')\n return value\n\n def __init__(self, candidates: List[Any], *, prior: Optional[List[float]] = None, label: Optional[str] = None):\n super().__init__(None, None, None)\n self.candidates = candidates\n self.prior = prior or [1 / len(candidates) for _ in range(len(candidates))]\n assert abs(sum(self.prior) - 1) < 1e-5, 'Sum of prior distribution is not 1.'\n self._label = generate_new_label(label)\n self._accessor = []\n\n @property\n def label(self):\n return self._label\n\n def forward(self):\n warnings.warn('You should not run forward of this module directly.')\n return self.candidates[0]\n\n def inner_choices(self) -> Iterable['ValueChoice']:\n # yield self because self is the only value choice here\n yield self\n\n def dry_run(self) -> Any:\n return self.candidates[0]\n\n def _evaluate(self, values: Iterable[Any], dry_run: bool = False) -> Any:\n if dry_run:\n return self.candidates[0]\n try:\n value = next(values)\n except StopIteration:\n raise ValueError(f'Value list {values} is exhausted when trying to get a chosen value of {self}.')\n if value not in self.candidates:\n raise ValueError(f'Value {value} does not belong to the candidates of {self}.')\n return value\n\n def __repr__(self):\n return f'ValueChoice({self.candidates}, label={repr(self.label)})'\n\n\n@basic_unit\nclass Placeholder(nn.Module):\n \"\"\"\n The API that creates an empty module for later mutations.\n For advanced usages only.\n \"\"\"\n\n def __init__(self, label, **related_info):\n self.label = label\n self.related_info = related_info\n super().__init__()\n\n def forward(self, x):\n return x\n",
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\nimport os\nimport logging\nimport torch\nimport numpy as np\nfrom .shape_dependency import ChannelDependency, GroupDependency, InputChannelDependency\nfrom .utils import get_module_by_name\n# logging.basicConfig(level = logging.DEBUG)\n_logger = logging.getLogger('FixMaskConflict')\n\n\ndef fix_mask_conflict(masks, model, dummy_input, traced=None):\n \"\"\"\n MaskConflict fix the mask conflict for the channel dependencies\n and group dependency.\n\n Parameters\n ----------\n masks : dict/str\n A dict object that stores the masks or the path of the mask file\n model : torch.nn.Module\n model to fix the mask conflict\n dummy_input : torch.Tensor/list of tensors/dict of tensors\n input example to trace the model\n traced : torch._C.torch.jit.TopLevelTracedModule\n the traced model of the target model, is this parameter is not None,\n we donnot use the model and dummpy_input to get the trace graph.\n \"\"\"\n if isinstance(masks, str):\n # if the input is the path of the mask_file\n assert os.path.exists(masks)\n masks = torch.load(masks)\n assert len(masks) > 0, 'Mask tensor cannot be empty'\n # if the user uses the model and dummy_input to trace the model, we\n # should get the traced model handly, so that, we only trace the\n # model once, GroupMaskConflict and ChannelMaskConflict will reuse\n # this traced model.\n if traced is None:\n assert model is not None and dummy_input is not None\n training = model.training\n # We need to trace the model in eval mode\n model.eval()\n kw_args = {}\n if torch.__version__ >= '1.6.0':\n # only pytorch with version greater than 1.6.0 has the strict option\n kw_args['strict'] = False\n traced = torch.jit.trace(model, dummy_input, **kw_args)\n model.train(training)\n\n fix_group_mask = GroupMaskConflict(masks, model, dummy_input, traced)\n masks = fix_group_mask.fix_mask()\n fix_channel_mask = ChannelMaskConflict(masks, model, dummy_input, traced)\n masks = fix_channel_mask.fix_mask()\n return masks\n\n\nclass MaskFix:\n def __init__(self, masks, model=None, dummy_input=None, traced=None):\n # check if the parameters are valid\n parameter_valid = False\n if traced is not None:\n parameter_valid = True\n elif (model is not None) and (dummy_input is not None):\n parameter_valid = True\n if not parameter_valid:\n raise Exception('The input parameters is invalid!')\n self.model = model\n self.dummy_input = dummy_input\n self.traced = traced\n self.masks = masks\n\n def fix_mask(self):\n raise NotImplementedError\n\n def export(self, path):\n \"\"\"\n Export the masks after fixing the conflict to file.\n \"\"\"\n torch.save(self.masks, path)\n\n\nclass GroupMaskConflict(MaskFix):\n def __init__(self, masks, model, dummy_input, traced=None):\n \"\"\"\n GroupMaskConflict fix the mask conflict between the layers that\n has group dependecy with each other.\n\n Parameters\n ----------\n masks : dict\n a dict object that stores the masks\n model : torch.nn.Module\n model to fix the mask conflict\n dummy_input : torch.Tensor\n input example to trace the model\n traced : torch._C.torch.jit.TopLevelTracedModule\n the traced model of the target model, is this parameter is not None,\n we donnot use the model and dummpy_input to get the trace graph.\n \"\"\"\n super(GroupMaskConflict, self).__init__(\n masks, model, dummy_input, traced)\n\n def fix_mask(self):\n \"\"\"\n Fix the mask conflict before the mask inference for the layers that\n has group dependencies. This function should be called before the\n mask inference of the 'speedup' module.\n \"\"\"\n group_depen = GroupDependency(\n self.model, self.dummy_input, self.traced)\n depens = group_depen.dependency\n min_groups = group_depen.min_groups\n _logger.info(depens)\n for layername in depens:\n group_max = depens[layername]\n group_min = min_groups[layername]\n if layername not in self.masks:\n # this layer not pruned\n continue\n w_mask = self.masks[layername]['weight']\n shape = w_mask.size()\n count = np.prod(shape[1:])\n all_ones = (w_mask.flatten(1).sum(-1) == count).nonzero().squeeze(1).tolist()\n all_zeros = (w_mask.flatten(1).sum(-1) == 0).nonzero().squeeze(1).tolist()\n if len(all_ones) + len(all_zeros) < w_mask.size(0):\n # In fine-grained pruning, skip this layer\n _logger.info('Layers %s using fine-grained pruning', layername)\n continue\n assert shape[0] % group_max == 0\n # Find the number of masked filter for each group (mini_masked).\n # Because we have to keep the pruned filter can still\n # be divided into the same number of groups, so we only can\n # prune mini_masked filters for each group.\n step = shape[0] / group_max\n group_masked = []\n for i in range(group_max):\n _start = step * i\n _end = step * (i + 1)\n _tmp_list = list(\n filter(lambda x: _start <= x and x < _end, all_zeros))\n group_masked.append(_tmp_list)\n mini_masked = min([len(x) for x in group_masked])\n need_unmask = set()\n for gm in group_masked:\n for i in range(mini_masked, len(gm)):\n # To keep the output channel number still being divisible to\n # groups, we set the masks of following filters to be zero.\n pos = gm[i]\n need_unmask.add(pos)\n step = shape[0] / group_min\n for i in range(group_min):\n _start = step * i\n _end = step * (i+1)\n _tmp_list = list(\n filter(lambda x: _start <= x and x < _end, all_zeros))\n if len(_tmp_list) == step:\n # if the whole group is removed, then we don't have to unmask for\n # the filters in this group\n for pos in _tmp_list:\n if pos in need_unmask:\n need_unmask.remove(pos)\n for pos in need_unmask:\n self.masks[layername]['weight'][pos] = torch.ones(shape[1:])\n if hasattr(self.masks[layername], 'bias'):\n self.masks[layername]['bias'][pos] = 1\n return self.masks\n\n\nclass ChannelMaskConflict(MaskFix):\n def __init__(self, masks, model, dummy_input, traced=None):\n \"\"\"\n ChannelMaskConflict fix the mask conflict between the layers that\n has channel dependecy with each other.\n\n Parameters\n ----------\n masks : dict\n a dict object that stores the masks\n model : torch.nn.Module\n model to fix the mask conflict\n dummy_input : torch.Tensor\n input example to trace the model\n graph : torch._C.torch.jit.TopLevelTracedModule\n the traced graph of the target model, is this parameter is not None,\n we donnot use the model and dummpy_input to get the trace graph.\n \"\"\"\n super(ChannelMaskConflict, self).__init__(\n masks, model, dummy_input, traced)\n self.conv_prune_dim = detect_mask_prune_dim(masks, model)\n self.channel_prune_type = detect_channel_prune_type(masks, model)\n _logger.info('Dectected conv prune dim\" %d', self.conv_prune_dim)\n\n def fix_mask(self):\n \"\"\"\n Fix the mask conflict before the mask inference for the layers that\n has shape dependencies. This function should be called before the\n mask inference of the 'speedup' module. Only structured pruning masks\n are supported.\n \"\"\"\n if self.conv_prune_dim == 0:\n channel_depen = ChannelDependency(\n self.model, self.dummy_input, self.traced, self.channel_prune_type)\n\n else:\n channel_depen = InputChannelDependency(\n self.model, self.dummy_input, self.traced)\n depen_sets = channel_depen.dependency_sets\n sum_idx = (1, 2, 3) if self.conv_prune_dim == 0 else (0, 2, 3)\n\n (_tmp_name, _tmp_tensor) = list(self.masks.items())[0]\n device = _tmp_tensor['weight'].device\n\n for dset in depen_sets:\n if len(dset) <= 1:\n continue\n # channel_masks is a list, each element is None or a vector, for example:\n # [[0, 1, 1, 0, 0], [0, 0, 1, 1, 0], None], None means no channel\n # is pruned.\n channel_masks = []\n fine_grained = False\n for name in dset:\n if name in self.masks:\n _, m = get_module_by_name(self.model, name)\n assert m is not None\n mask = self.masks[name]['weight']\n if type(m).__name__ == 'Conv2d':\n channel_mask = (mask.abs().sum(sum_idx) != 0).int()\n channel_masks.append(channel_mask)\n if (channel_mask.sum() * (mask.numel() / mask.shape[self.conv_prune_dim])).item() != (mask > 0).sum().item():\n fine_grained = True\n elif type(m).__name__ == 'Linear':\n if self.conv_prune_dim == 1:\n channel_masks.append(\n (mask.abs().sum(0) != 0).int())\n else:\n channel_masks.append(\n (mask.abs().sum(1) != 0).int())\n elif type(m).__name__ == 'BatchNorm2d':\n channel_masks.append(mask.int())\n elif type(m).__name__ == 'ConvTranspose2d':\n # convtranspose have difference memory layout, so that we need create\n # a tmp_sum_idx for conv_transpose\n tmp_sum_idx = (\n 0, 2, 3) if self.conv_prune_dim == 0 else (1, 2, 3)\n channel_mask = (mask.abs().sum(tmp_sum_idx) != 0).int()\n channel_masks.append(channel_mask)\n if (channel_mask.sum() * (mask.numel() / mask.shape[1 - self.conv_prune_dim])).item() != (mask > 0).sum().item():\n fine_grained = True\n else:\n raise RuntimeError(\n f'unsupported module type: {type(m).__name__}')\n else:\n # no mask means not pruned, equivlent to full masks\n channel_masks.append(None)\n if fine_grained:\n _logger.info(\"Fine-grianed mask detected\")\n if all(x is None for x in channel_masks):\n continue\n num_channels_list = [len(x)\n for x in channel_masks if x is not None]\n # number of channels in same set should be identical\n assert len(set(num_channels_list)) == 1\n num_channels = num_channels_list[0]\n\n for i, dim_mask in enumerate(channel_masks):\n if dim_mask is None:\n channel_masks[i] = torch.ones(\n num_channels).int().to(device)\n\n # merge masks with 'or'\n merged_channel_mask = channel_masks[0].clone()\n for i in range(1, len(channel_masks)):\n merged_channel_mask = (\n (merged_channel_mask + channel_masks[i]) != 0).int()\n\n merged_index = torch.nonzero(merged_channel_mask, as_tuple=True)[0]\n\n for name in dset:\n if name not in self.masks:\n assert all(merged_channel_mask)\n continue\n orig_mask = self.masks[name]['weight']\n _, m = get_module_by_name(self.model, name)\n new_mask = torch.zeros_like(orig_mask)\n if type(m).__name__ == 'Conv2d':\n if self.conv_prune_dim == 0:\n new_mask[merged_index, :, :, :] = 1.\n else:\n new_mask[:, merged_index, :, :] = 1.\n elif type(m).__name__ == 'Linear':\n if self.conv_prune_dim == 0:\n new_mask[merged_index, :] = 1\n elif self.conv_prune_dim == 1:\n new_mask[:, merged_index] = 1.\n elif type(m).__name__ == 'BatchNorm2d':\n new_mask = merged_channel_mask.type_as(orig_mask)\n else:\n raise RuntimeError(\n f'unsupported module type: {type(m).__name__}')\n self.masks[name]['weight'] = new_mask\n if 'bias' in self.masks[name] and self.masks[name]['bias'] is not None:\n if type(m).__name__ == 'Conv2d':\n assert self.conv_prune_dim == 0\n if self.conv_prune_dim == 0:\n self.masks[name]['bias'] = merged_channel_mask.type_as(\n self.masks[name]['bias'])\n\n return self.masks\n\ndef detect_channel_prune_type(masks, model):\n \"\"\"\n User can prune a channel through two ways: 1) prune\n the corresponding filter of the conv layer(all the\n filter related pruner), 2) prune the BN layers that\n followed after a conv(Slim pruner). This function find\n the pruning type of the masks.\n\n Parameters\n ----------\n masks: dict\n A dict object that stores the masks.\n model: nn.Module\n Model object which the mask can be applied on.\n\n Returns:\n -------\n prune_type: str\n Could be Filter or Batchnorm\n \"\"\"\n prune_type = 'Filter'\n all_batch_norm = True\n for layer_name in masks:\n _, m = get_module_by_name(model, layer_name)\n if m is None or (not isinstance(m, torch.nn.BatchNorm2d)):\n all_batch_norm = False\n break\n if all_batch_norm:\n # if all masks are for batchnorm layers, then the prune_type is BatchNorm\n # Note, actually we currently do not support pruning both Conv and BatchNorm\n # at the same time.\n prune_type = 'Batchnorm'\n return prune_type\n\ndef detect_mask_prune_dim(masks, model):\n \"\"\"\n Detect how the masks of convolutional layers are pruned.\n\n Parameters\n ----------\n masks: dict\n A dict object that stores the masks.\n model: nn.Module\n Model object which the mask can be applied on.\n Returns:\n -------\n How the masks of convolutional layers are pruned, this depends on pruning algorithms, it should\n return 1 for masks generated by AMCPruner, and returns 0 for masks generated by the rest\n NNI builtin pruners.\n 0: filter pruning, prune filters of weights which causes channels of output feature maps are pruned.\n 1: channel pruning, prune kernels corresponding to each input channels which causes channels of\n input feature maps are pruned.\n \"\"\"\n dim0_preserved, dim1_preserved = 0., 0.\n dim0_num, dim1_num = 0., 0.\n for module_name in masks:\n _, m = get_module_by_name(model, module_name)\n if m is None or type(m).__name__ != 'Conv2d':\n continue\n\n mask = masks[module_name]['weight'].clone()\n assert (mask >= 0).sum() == mask.numel(), \\\n \"mask values should be greater than or equal to 0.\"\n mask = (mask > 0).int()\n mask = mask.view(mask.shape[0], mask.shape[1], -1)\n dim0_mask = (mask.sum((1, 2)) > 0).int()\n dim1_mask = (mask.sum((0, 2)) > 0).int()\n dim0_preserved += dim0_mask.sum().item()\n dim1_preserved += dim1_mask.sum().item()\n dim0_num += len(dim0_mask)\n dim1_num += len(dim1_mask)\n\n if dim0_num == 0 or dim1_num == 0:\n _logger.warning('no multi-dimension masks found.')\n return 0\n\n dim0_sparsity, dim1_sparsity = 1. - dim0_preserved / \\\n dim0_num, 1. - dim1_preserved / dim1_num\n _logger.info('dim0 sparsity: %f', dim0_sparsity)\n _logger.info('dim1 sparsity: %f', dim1_sparsity)\n\n if dim0_sparsity == dim1_sparsity == 0.:\n _logger.warning('nothing masked.')\n\n if dim0_sparsity > 0 and dim1_sparsity > 0:\n _logger.warning('both dim0 and dim1 masks found.')\n\n return 0 if dim0_sparsity >= dim1_sparsity else 1\n",
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport logging\nimport random\n\nimport torch\nimport torch.nn as nn\n\nfrom ..interface import BaseOneShotTrainer\nfrom .utils import AverageMeterGroup, replace_layer_choice, replace_input_choice, to_device\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef _get_mask(sampled, total):\n multihot = [i == sampled or (isinstance(sampled, list) and i in sampled) for i in range(total)]\n return torch.tensor(multihot, dtype=torch.bool) # pylint: disable=not-callable\n\n\nclass PathSamplingLayerChoice(nn.Module):\n \"\"\"\n Mixed module, in which fprop is decided by exactly one or multiple (sampled) module.\n If multiple module is selected, the result will be sumed and returned.\n\n Attributes\n ----------\n sampled : int or list of int\n Sampled module indices.\n mask : tensor\n A multi-hot bool 1D-tensor representing the sampled mask.\n \"\"\"\n\n def __init__(self, layer_choice):\n super(PathSamplingLayerChoice, self).__init__()\n self.op_names = []\n for name, module in layer_choice.named_children():\n self.add_module(name, module)\n self.op_names.append(name)\n assert self.op_names, 'There has to be at least one op to choose from.'\n self.sampled = None # sampled can be either a list of indices or an index\n\n def forward(self, *args, **kwargs):\n assert self.sampled is not None, 'At least one path needs to be sampled before fprop.'\n if isinstance(self.sampled, list):\n return sum([getattr(self, self.op_names[i])(*args, **kwargs) for i in self.sampled]) # pylint: disable=not-an-iterable\n else:\n return getattr(self, self.op_names[self.sampled])(*args, **kwargs) # pylint: disable=invalid-sequence-index\n\n def __len__(self):\n return len(self.op_names)\n\n @property\n def mask(self):\n return _get_mask(self.sampled, len(self))\n\n\nclass PathSamplingInputChoice(nn.Module):\n \"\"\"\n Mixed input. Take a list of tensor as input, select some of them and return the sum.\n\n Attributes\n ----------\n sampled : int or list of int\n Sampled module indices.\n mask : tensor\n A multi-hot bool 1D-tensor representing the sampled mask.\n \"\"\"\n\n def __init__(self, input_choice):\n super(PathSamplingInputChoice, self).__init__()\n self.n_candidates = input_choice.n_candidates\n self.n_chosen = input_choice.n_chosen\n self.sampled = None\n\n def forward(self, input_tensors):\n if isinstance(self.sampled, list):\n return sum([input_tensors[t] for t in self.sampled]) # pylint: disable=not-an-iterable\n else:\n return input_tensors[self.sampled]\n\n def __len__(self):\n return self.n_candidates\n\n @property\n def mask(self):\n return _get_mask(self.sampled, len(self))\n\n\nclass SinglePathTrainer(BaseOneShotTrainer):\n \"\"\"\n Single-path trainer. Samples a path every time and backpropagates on that path.\n\n Parameters\n ----------\n model : nn.Module\n Model with mutables.\n loss : callable\n Called with logits and targets. Returns a loss tensor.\n metrics : callable\n Returns a dict that maps metrics keys to metrics data.\n optimizer : Optimizer\n Optimizer that optimizes the model.\n num_epochs : int\n Number of epochs of training.\n dataset_train : Dataset\n Dataset of training.\n dataset_valid : Dataset\n Dataset of validation.\n batch_size : int\n Batch size.\n workers: int\n Number of threads for data preprocessing. Not used for this trainer. Maybe removed in future.\n device : torch.device\n Device object. Either ``torch.device(\"cuda\")`` or ``torch.device(\"cpu\")``. When ``None``, trainer will\n automatic detects GPU and selects GPU first.\n log_frequency : int\n Number of mini-batches to log metrics.\n \"\"\"\n\n def __init__(self, model, loss, metrics,\n optimizer, num_epochs, dataset_train, dataset_valid,\n batch_size=64, workers=4, device=None, log_frequency=None):\n self.model = model\n self.loss = loss\n self.metrics = metrics\n self.optimizer = optimizer\n self.num_epochs = num_epochs\n self.dataset_train = dataset_train\n self.dataset_valid = dataset_valid\n self.batch_size = batch_size\n self.workers = workers\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device is None else device\n self.log_frequency = log_frequency\n self.model.to(self.device)\n\n self.nas_modules = []\n replace_layer_choice(self.model, PathSamplingLayerChoice, self.nas_modules)\n replace_input_choice(self.model, PathSamplingInputChoice, self.nas_modules)\n for _, module in self.nas_modules:\n module.to(self.device)\n\n self.train_loader = torch.utils.data.DataLoader(self.dataset_train,\n batch_size=batch_size,\n num_workers=workers)\n self.valid_loader = torch.utils.data.DataLoader(self.dataset_valid,\n batch_size=batch_size,\n num_workers=workers)\n\n def _resample(self):\n result = {}\n for name, module in self.nas_modules:\n if name not in result:\n result[name] = random.randint(0, len(module) - 1)\n module.sampled = result[name]\n return result\n\n def _train_one_epoch(self, epoch):\n self.model.train()\n meters = AverageMeterGroup()\n for step, (x, y) in enumerate(self.train_loader):\n x, y = to_device(x, self.device), to_device(y, self.device)\n self.optimizer.zero_grad()\n self._resample()\n logits = self.model(x)\n loss = self.loss(logits, y)\n loss.backward()\n self.optimizer.step()\n\n metrics = self.metrics(logits, y)\n metrics[\"loss\"] = loss.item()\n meters.update(metrics)\n if self.log_frequency is not None and step % self.log_frequency == 0:\n _logger.info(\"Epoch [%s/%s] Step [%s/%s] %s\", epoch + 1,\n self.num_epochs, step + 1, len(self.train_loader), meters)\n\n def _validate_one_epoch(self, epoch):\n self.model.eval()\n meters = AverageMeterGroup()\n with torch.no_grad():\n for step, (x, y) in enumerate(self.valid_loader):\n x, y = to_device(x, self.device), to_device(y, self.device)\n self._resample()\n logits = self.model(x)\n loss = self.loss(logits, y)\n metrics = self.metrics(logits, y)\n metrics[\"loss\"] = loss.item()\n meters.update(metrics)\n if self.log_frequency is not None and step % self.log_frequency == 0:\n _logger.info(\"Epoch [%s/%s] Validation Step [%s/%s] %s\", epoch + 1,\n self.num_epochs, step + 1, len(self.valid_loader), meters)\n\n def fit(self):\n for i in range(self.num_epochs):\n self._train_one_epoch(i)\n self._validate_one_epoch(i)\n\n def export(self):\n return self._resample()\n\n\nRandomTrainer = SinglePathTrainer\n"
] | [
[
"torch.cat"
],
[
"torch.ones",
"torch.jit.trace",
"torch.load",
"torch.zeros_like",
"numpy.prod",
"torch.nonzero",
"torch.save"
],
[
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.cuda.is_available",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
neochristou/tensorflow | [
"1fb338b1c42930c0eef4d0b4d8d5fdf24a678654",
"50b55bfc5c9132c3bd82505181380bffbb47a5ff",
"1fb338b1c42930c0eef4d0b4d8d5fdf24a678654"
] | [
"tensorflow/python/distribute/collective_all_reduce_strategy.py",
"tensorflow/python/data/experimental/ops/data_service_ops.py",
"tensorflow/tools/docs/generate2_test.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Class CollectiveAllReduceStrategy implementing DistributionStrategy.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport threading\nimport time\nimport weakref\n\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.core.protobuf import tensorflow_server_pb2\nfrom tensorflow.python.distribute import collective_util\nfrom tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib\nfrom tensorflow.python.distribute import cross_device_utils\nfrom tensorflow.python.distribute import device_util\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import distribute_utils\nfrom tensorflow.python.distribute import distribution_strategy_context as ds_context\nfrom tensorflow.python.distribute import input_lib\nfrom tensorflow.python.distribute import mirrored_strategy\nfrom tensorflow.python.distribute import multi_worker_util\nfrom tensorflow.python.distribute import numpy_dataset\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.distribute import values\nfrom tensorflow.python.distribute.cluster_resolver import ClusterResolver\nfrom tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver\nfrom tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import collective_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training.tracking import base\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# pylint: disable=line-too-long\n@tf_export(\"distribute.MultiWorkerMirroredStrategy\", v1=[])\nclass CollectiveAllReduceStrategy(distribute_lib.Strategy):\n \"\"\"A distribution strategy for synchronous training on multiple workers.\n\n This strategy implements synchronous distributed training across multiple\n workers, each with potentially multiple GPUs. Similar to\n `tf.distribute.MirroredStrategy`, it replicates all variables and computations\n to each local device. The difference is that it uses a distributed collective\n implementation (e.g. all-reduce), so that multiple workers can work together.\n\n You need to launch your program on each worker and configure\n `cluster_resolver` correctly. For example, if you are using\n `tf.distribute.cluster_resolver.TFConfigClusterResolver`, each worker needs to\n have its corresponding `task_type` and `task_id` set in the `TF_CONFIG`\n environment variable. An example TF_CONFIG on worker-0 of a two worker cluster\n is:\n\n ```\n TF_CONFIG = '{\"cluster\": {\"worker\": [\"localhost:12345\", \"localhost:23456\"]}, \"task\": {\"type\": \"worker\", \"index\": 0} }'\n ```\n\n Your program runs on each worker as-is. Note that collectives require each\n worker to participate. All `tf.distribute` and non `tf.distribute` API may use\n collectives internally, e.g. checkpointing and saving since reading a\n `tf.Variable` with `tf.VariableSynchronization.ON_READ` all-reduces the value.\n Therefore it's recommended to run exactly the same program on each worker.\n Dispatching based on `task_type` or `task_id` of the worker is error-prone.\n\n `cluster_resolver.num_accelerators()` determines the number of GPUs the\n strategy uses. If it's zero, the strategy uses the CPU. All workers need to\n use the same number of devices, otherwise the behavior is undefined.\n\n This strategy is not intended for TPU. Use `tf.distribute.TPUStrategy`\n instead.\n\n After setting up TF_CONFIG, using this strategy is similar to using\n `tf.distribute.MirroredStrategy` and `tf.distribute.TPUStrategy`.\n\n ```\n strategy = tf.distribute.MultiWorkerMirroredStrategy()\n\n with strategy.scope():\n model = tf.keras.Sequential([\n tf.keras.layers.Dense(2, input_shape=(5,)),\n ])\n optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)\n\n def dataset_fn(ctx):\n x = np.random.random((2, 5)).astype(np.float32)\n y = np.random.randint(2, size=(2, 1))\n dataset = tf.data.Dataset.from_tensor_slices((x, y))\n return dataset.repeat().batch(1, drop_remainder=True)\n dist_dataset = strategy.distribute_datasets_from_function(dataset_fn)\n\n model.compile()\n model.fit(dist_dataset)\n ```\n\n You can also write your own training loop:\n\n ```\n @tf.function\n def train_step(iterator):\n\n def step_fn(inputs):\n features, labels = inputs\n with tf.GradientTape() as tape:\n logits = model(features, training=True)\n loss = tf.keras.losses.sparse_categorical_crossentropy(\n labels, logits)\n\n grads = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n\n strategy.run(step_fn, args=(next(iterator),))\n\n for _ in range(NUM_STEP):\n train_step(iterator)\n ```\n\n See\n [Multi-worker training with Keras](https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras)\n for a detailed tutorial.\n\n __Saving__\n\n You need to save and checkpoint on all workers instead of just one. This is\n because variables whose synchronization=ON_READ triggers aggregation during\n saving. It's recommended to save to a different path on each worker to avoid\n race conditions. Each worker saves the same thing. See\n [Multi-worker training with Keras](https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras#model_saving_and_loading)\n tutorial for examples.\n\n __Known Issues__\n\n * `tf.distribute.cluster_resolver.TFConfigClusterResolver` does not return the\n correct number of accelerators. The strategy uses all available GPUs if\n `cluster_resolver` is `tf.distribute.cluster_resolver.TFConfigClusterResolver`\n or `None`.\n * In eager mode, the strategy needs to be created before calling any other\n Tensorflow API.\n\n \"\"\"\n # pylint: enable=line-too-long\n\n # TODO(anjalisridhar): Update our guides with examples showing how we can use\n # the cluster_resolver argument.\n\n # The starting number for collective keys. This should only be set in tests.\n _collective_key_base = 0\n\n def __init__(self,\n cluster_resolver=None,\n communication_options=None):\n \"\"\"Creates the strategy.\n\n Args:\n cluster_resolver: optional\n `tf.distribute.cluster_resolver.ClusterResolver`. If `None`,\n `tf.distribute.cluster_resolver.TFConfigClusterResolver` is used.\n communication_options: optional\n `tf.distribute.experimental.CommunicationOptions`. This configures the\n default options for cross device communications. It can be overridden by\n options provided to the communication APIs like\n `tf.distribute.ReplicaContext.all_reduce`. See\n `tf.distribute.experimental.CommunicationOptions` for details.\n \"\"\"\n if communication_options is None:\n communication_options = collective_util.Options()\n super(CollectiveAllReduceStrategy, self).__init__(\n CollectiveAllReduceExtended(\n self,\n cluster_resolver=cluster_resolver,\n communication_options=communication_options))\n\n distribute_lib.distribution_strategy_gauge.get_cell(\"V2\").set(\n \"MultiWorkerMirroredStrategy\")\n # pylint: disable=protected-access\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_workers\").set(self.extended._num_workers)\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_replicas_per_worker\").set(self.extended._num_gpus_per_worker)\n\n @classmethod\n def _from_local_devices(cls, devices, communication_options=None):\n \"\"\"A convenience method to create an object with a list of devices.\"\"\"\n obj = cls(communication_options=communication_options)\n obj.extended._initialize_local(TFConfigClusterResolver(), devices=devices) # pylint: disable=protected-access\n return obj\n\n @property\n def cluster_resolver(self):\n \"\"\"Returns the cluster resolver associated with this strategy.\n\n As a multi-worker strategy, `tf.distribute.MultiWorkerMirroredStrategy`\n provides the associated `tf.distribute.cluster_resolver.ClusterResolver`. If\n the user provides one in `__init__`, that instance is returned; if the user\n does not, a default `TFConfigClusterResolver` is provided.\n \"\"\"\n return self.extended._cluster_resolver # pylint: disable=protected-access\n\n\nclass _CollectiveAllReduceStrategyExperimentalMeta(type):\n\n @classmethod\n def __instancecheck__(cls, instance):\n # This is to make isinstance(tf.distribute.MultiWorkerMirroredStrategy(),\n # tf.distribute.experimental.MultiWorkerMirroredStrategy). Some libraries is\n # performing such check.\n return isinstance(instance, CollectiveAllReduceStrategy)\n\n\n@tf_export(\"distribute.experimental.MultiWorkerMirroredStrategy\", v1=[])\nclass _CollectiveAllReduceStrategyExperimental(\n CollectiveAllReduceStrategy,\n metaclass=_CollectiveAllReduceStrategyExperimentalMeta):\n\n __doc__ = CollectiveAllReduceStrategy.__doc__\n\n @deprecation.deprecated(\n None, \"use distribute.MultiWorkerMirroredStrategy instead\")\n def __init__(self,\n communication=collective_util.CommunicationImplementation.AUTO,\n cluster_resolver=None):\n \"\"\"Creates the strategy.\n\n Args:\n communication: optional\n `tf.distribute.experimental.CommunicationImplementation`. This is a hint\n on the preferred collective communication implementation. Possible\n values include `AUTO`, `RING`, and `NCCL`.\n cluster_resolver: optional\n `tf.distribute.cluster_resolver.ClusterResolver`. If `None`,\n `tf.distribute.cluster_resolver.TFConfigClusterResolver` is used.\n \"\"\"\n communication_options = collective_util.Options(\n implementation=communication)\n super(_CollectiveAllReduceStrategyExperimental,\n self).__init__(cluster_resolver, communication_options)\n\n @classmethod\n def _from_local_devices(\n cls,\n devices,\n communication=collective_util.CommunicationImplementation.AUTO):\n \"\"\"A convenience method to create an object with a list of devices.\"\"\"\n obj = cls(communication)\n obj.extended._initialize_local(TFConfigClusterResolver(), devices=devices) # pylint: disable=protected-access\n return obj\n\n\n_CollectiveAllReduceStrategyExperimental.__name__ = CollectiveAllReduceStrategy.__name__\n\n\n@tf_export(v1=[\"distribute.experimental.MultiWorkerMirroredStrategy\"]) # pylint: disable=missing-docstring\nclass CollectiveAllReduceStrategyV1(distribute_lib.StrategyV1):\n\n __doc__ = CollectiveAllReduceStrategy.__doc__\n\n # The starting number for collective keys. This should only be set in tests.\n _collective_key_base = 0\n\n def __init__(self,\n communication=collective_util.CommunicationImplementation.AUTO,\n cluster_resolver=None):\n \"\"\"Initializes the object.\"\"\"\n communication_options = collective_util.Options(\n implementation=communication)\n super(CollectiveAllReduceStrategyV1, self).__init__(\n CollectiveAllReduceExtended(\n self,\n cluster_resolver=cluster_resolver,\n communication_options=communication_options))\n distribute_lib.distribution_strategy_gauge.get_cell(\"V1\").set(\n \"MultiWorkerMirroredStrategy\")\n # pylint: disable=protected-access\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_workers\").set(self.extended._num_workers)\n distribute_lib.distribution_strategy_replica_gauge.get_cell(\n \"num_gpu_per_worker\").set(self.extended._num_gpus_per_worker)\n\n\nclass CollectiveAllReduceExtended(mirrored_strategy.MirroredExtended):\n \"\"\"Implementation of CollectiveAllReduceStrategy.\"\"\"\n\n # Whether to perdically check the health of the cluster. If any worker is not\n # reachable, collectives are aborted and the user program should get a\n # tf.errors.UnavailableError. It's required to restart in order to recover.\n _enable_check_health = True\n # Check health interval in seconds.\n _check_health_interval = 30\n # Timeout in seconds for the first check health. The first check health needs\n # to wait for cluster, which may make a longer time.\n _check_health_initial_timeout = 0\n # Times to retry before considering the peer is down.\n _check_health_retry_limit = 3\n # Timeout in seconds the each check health.\n _check_health_timeout = 10\n\n def __init__(self, container_strategy, cluster_resolver,\n communication_options):\n if not isinstance(communication_options, collective_util.Options):\n raise ValueError(\"communication_options must be an instance of \"\n \"tf.distribute.experimental.CommunicationOptions\")\n self._cluster_resolver = cluster_resolver or TFConfigClusterResolver()\n if not isinstance(self._cluster_resolver, ClusterResolver):\n raise ValueError(\"cluster_resolver must be an instance of \"\n \"tf.distribute.cluster_resolver.ClusterResolver\")\n distribute_lib.StrategyExtendedV1.__init__(self, container_strategy)\n self._communication_options = communication_options\n self._collective_key_base = container_strategy._collective_key_base # pylint: disable=protected-access\n self._initialize_strategy(self._cluster_resolver)\n self._cfer_fn_cache = weakref.WeakKeyDictionary()\n self.experimental_enable_get_next_as_optional = True\n assert isinstance(self._cross_device_ops,\n cross_device_ops_lib.CollectiveAllReduce)\n\n def _use_merge_call(self):\n \"\"\"XLA is not supported for multi-worker strategy.\"\"\"\n return True\n\n def _initialize_strategy(self, cluster_resolver):\n if cluster_resolver.cluster_spec().as_dict():\n self._initialize_multi_worker(cluster_resolver)\n else:\n self._initialize_local(cluster_resolver)\n\n def _initialize_local(self, cluster_resolver, devices=None):\n \"\"\"Initializes the object for local training.\"\"\"\n self._is_chief = True\n self._num_workers = 1\n\n if ops.executing_eagerly_outside_functions():\n try:\n context.context().configure_collective_ops(\n scoped_allocator_enabled_ops=(\"CollectiveReduce\",))\n except RuntimeError:\n logging.warning(\"Collective ops is not configured at program startup. \"\n \"Some performance features may not be enabled.\")\n self._collective_ops_configured = True\n\n # TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in\n # some cases.\n if isinstance(cluster_resolver, TFConfigClusterResolver):\n num_gpus = context.num_gpus()\n else:\n num_gpus = cluster_resolver.num_accelerators().get(\"GPU\", 0)\n\n if devices:\n local_devices = devices\n else:\n if num_gpus:\n local_devices = tuple(\"/device:GPU:%d\" % i for i in range(num_gpus))\n else:\n local_devices = (\"/device:CPU:0\",)\n\n self._worker_device = device_util.canonicalize(\"/device:CPU:0\")\n self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)\n\n self._collective_keys = cross_device_utils.CollectiveKeys(\n group_key_start=1 + self._collective_key_base)\n self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=local_devices,\n group_size=len(local_devices),\n collective_keys=self._collective_keys)\n # CrossDeviceOps for per host tensors.\n self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=[self._worker_device],\n group_size=self._num_workers,\n collective_keys=self._collective_keys)\n super(CollectiveAllReduceExtended, self)._initialize_single_worker(\n local_devices)\n\n self._cluster_spec = None\n self._task_type = None\n self._task_id = None\n self._id_in_cluster = 0\n\n # This is a mark to tell whether we are running with standalone client or\n # independent worker. Right now with standalone client, strategy object is\n # created as local strategy and then turn into multi-worker strategy via\n # configure call.\n self._local_or_standalone_client_mode = True\n\n # Save the num_gpus_per_worker and rpc_layer for configure method.\n self._num_gpus_per_worker = num_gpus\n self._rpc_layer = cluster_resolver.rpc_layer\n self._warn_nccl_no_gpu()\n\n logging.info(\n \"Single-worker MultiWorkerMirroredStrategy with local_devices \"\n \"= %r, communication = %s\", local_devices,\n self._communication_options.implementation)\n\n def _initialize_multi_worker(self, cluster_resolver):\n \"\"\"Initializes the object for multi-worker training.\"\"\"\n cluster_spec = multi_worker_util.normalize_cluster_spec(\n cluster_resolver.cluster_spec())\n task_type = cluster_resolver.task_type\n task_id = cluster_resolver.task_id\n if task_type is None or task_id is None:\n raise ValueError(\"When `cluster_spec` is given, you must also specify \"\n \"`task_type` and `task_id`.\")\n self._cluster_spec = cluster_spec\n self._task_type = task_type\n self._task_id = task_id\n self._id_in_cluster = multi_worker_util.id_in_cluster(\n self._cluster_spec, self._task_type, self._task_id)\n\n self._num_workers = multi_worker_util.worker_count(cluster_spec, task_type)\n if not self._num_workers:\n raise ValueError(\"No `worker`, `chief` or `evaluator` tasks can be found \"\n \"in `cluster_spec`.\")\n\n self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,\n task_id)\n\n self._worker_device = \"/job:%s/task:%d\" % (task_type, task_id)\n self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)\n\n if (ops.executing_eagerly_outside_functions() and\n not getattr(self, \"_local_or_standalone_client_mode\", False)):\n context.context().configure_collective_ops(\n collective_leader=multi_worker_util.collective_leader(\n cluster_spec, task_type, task_id),\n scoped_allocator_enabled_ops=(\"CollectiveReduce\",),\n device_filters=(\"/job:%s/task:%d\" % (task_type, task_id),))\n self._collective_ops_configured = True\n\n # Starting a std server in eager mode and in independent worker mode.\n if (context.executing_eagerly() and\n not getattr(self, \"_std_server_started\", False) and\n not getattr(self, \"_local_or_standalone_client_mode\", False)):\n # Checking _local_or_standalone_client_mode as well because we should not\n # create the std server in standalone client mode.\n config_proto = copy.deepcopy(context.context().config)\n config_proto = self._update_config_proto(config_proto)\n\n # If coordination service is enabled, use its internal heartbeat to detect\n # peer failures instead of the Python-level health check.\n if config_proto.experimental.coordination_service:\n self._enable_check_health = False\n\n if hasattr(cluster_resolver, \"port\"):\n port = cluster_resolver.port\n else:\n port = 0\n server_def = tensorflow_server_pb2.ServerDef(\n cluster=cluster_spec.as_cluster_def(),\n default_session_config=config_proto,\n job_name=task_type,\n task_index=task_id,\n protocol=cluster_resolver.rpc_layer or \"grpc\",\n port=port)\n context.context().enable_collective_ops(server_def)\n self._std_server_started = True\n # The `ensure_initialized` is needed before calling\n # `context.context().devices()`.\n context.context().ensure_initialized()\n logging.info(\n \"Enabled multi-worker collective ops with available devices: %r\",\n context.context().devices())\n\n # TODO(yuefengz): The `num_gpus` is only for this particular task. It\n # assumes all workers have the same number of GPUs. We should remove this\n # assumption by querying all tasks for their numbers of GPUs.\n # TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in\n # some cases.\n if isinstance(cluster_resolver, TFConfigClusterResolver):\n num_gpus = context.num_gpus()\n else:\n num_gpus = cluster_resolver.num_accelerators().get(\"GPU\", 0)\n\n if num_gpus:\n local_devices = tuple(\"%s/device:GPU:%d\" % (self._worker_device, i)\n for i in range(num_gpus))\n else:\n local_devices = (self._worker_device,)\n\n self._collective_keys = cross_device_utils.CollectiveKeys(\n group_key_start=1 + self._collective_key_base)\n self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=local_devices,\n group_size=len(local_devices) * self._num_workers,\n collective_keys=self._collective_keys)\n # CrossDeviceOps for per host tensors.\n self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n devices=[self._worker_device],\n group_size=self._num_workers,\n collective_keys=self._collective_keys)\n super(CollectiveAllReduceExtended, self)._initialize_single_worker(\n local_devices)\n\n # Add a default device so that ops without specified devices will not end up\n # on other workers.\n self._default_device = \"/job:%s/task:%d\" % (task_type, task_id)\n\n # Save the num_gpus_per_worker and rpc_layer for configure method.\n self._num_gpus_per_worker = num_gpus\n self._rpc_layer = cluster_resolver.rpc_layer\n self._warn_nccl_no_gpu()\n\n if self._enable_check_health and context.executing_eagerly():\n self._start_check_health_thread()\n else:\n logging.info(\"Check health not enabled.\")\n\n logging.info(\n \"MultiWorkerMirroredStrategy with cluster_spec = %r, task_type = %r, \"\n \"task_id = %r, num_workers = %r, local_devices = %r, \"\n \"communication = %s\", cluster_spec.as_dict(), task_type, task_id,\n self._num_workers, local_devices,\n self._communication_options.implementation)\n\n def __del__(self):\n self._stop_check_health_thread()\n\n def _input_workers_with_options(self, options=None):\n host_device = device_util.get_host_for_device(self._worker_device)\n if not options or options.experimental_fetch_to_device:\n return input_lib.InputWorkers([(host_device, self.worker_devices)])\n else:\n return input_lib.InputWorkers([(\n host_device,\n [device_util.get_host_for_device(worker) for worker in\n self.worker_devices])])\n\n @property\n def _input_workers(self):\n return self._input_workers_with_options()\n\n def _get_variable_creator_initial_value(self,\n replica_id,\n device,\n primary_var,\n **kwargs):\n if replica_id == 0: # First replica on each worker.\n assert device is not None\n assert primary_var is None\n\n def initial_value_fn(): # pylint: disable=g-missing-docstring\n # Only the first device participates in the broadcast of initial values.\n group_key = self._collective_keys.get_group_key([device])\n group_size = self._num_workers\n collective_instance_key = (\n self._collective_keys.get_instance_key(group_key, device))\n\n with ops.device(device):\n initial_value = kwargs[\"initial_value\"]\n if callable(initial_value):\n initial_value = initial_value()\n if isinstance(initial_value, base.CheckpointInitialValue):\n initial_value = initial_value.wrapped_value\n assert not callable(initial_value)\n initial_value = ops.convert_to_tensor(\n initial_value, dtype=kwargs.get(\"dtype\", None))\n\n if self._num_workers > 1:\n if self._is_chief:\n bcast_send = collective_ops.broadcast_send(\n initial_value, initial_value.shape, initial_value.dtype,\n group_size, group_key, collective_instance_key)\n with ops.control_dependencies([bcast_send]):\n return array_ops.identity(initial_value)\n else:\n return collective_ops.broadcast_recv(initial_value.shape,\n initial_value.dtype,\n group_size, group_key,\n collective_instance_key)\n return initial_value\n\n return initial_value_fn\n else:\n return super(CollectiveAllReduceExtended,\n self)._get_variable_creator_initial_value(\n replica_id=replica_id,\n device=device,\n primary_var=primary_var,\n **kwargs)\n\n def _make_input_context(self):\n input_context = distribute_lib.InputContext(\n num_input_pipelines=self._num_workers,\n input_pipeline_id=self._id_in_cluster,\n num_replicas_in_sync=self._num_replicas_in_sync)\n return input_context\n\n def _experimental_distribute_dataset(self, dataset, options):\n if (options and options.experimental_replication_mode ==\n distribute_lib.InputReplicationMode.PER_REPLICA):\n raise NotImplementedError(\n \"InputReplicationMode.PER_REPLICA \"\n \"is only supported in \"\n \"`distribute_datasets_from_function` \"\n \"of tf.distribute.MirroredStrategy\"\n )\n input_context = self._make_input_context()\n return input_lib.get_distributed_dataset(\n dataset,\n self._input_workers_with_options(options),\n self._container_strategy(),\n num_replicas_in_sync=self._num_replicas_in_sync,\n input_context=input_context,\n options=options)\n\n def _distribute_datasets_from_function(self, dataset_fn, options):\n if (options and options.experimental_replication_mode ==\n distribute_lib.InputReplicationMode.PER_REPLICA):\n raise NotImplementedError(\n \"InputReplicationMode.PER_REPLICA \"\n \"is only supported in \"\n \"`distribute_datasets_from_function` \"\n \"of tf.distribute.MirroredStrategy\")\n input_context = self._make_input_context()\n return input_lib.get_distributed_datasets_from_function(\n dataset_fn=dataset_fn,\n input_workers=self._input_workers_with_options(options),\n input_contexts=[input_context],\n strategy=self._container_strategy(),\n options=options)\n\n def _experimental_distribute_values_from_function(self, value_fn):\n per_replica_values = []\n num_local_replicas = len(self.worker_devices)\n for local_replica_id in range(num_local_replicas):\n replica_id = (self._id_in_cluster * num_local_replicas +\n local_replica_id)\n value_context = distribute_lib.ValueContext(\n replica_id, self._num_replicas_in_sync)\n per_replica_values.append(value_fn(value_context))\n return distribute_utils.regroup(per_replica_values, always_wrap=True)\n\n def _make_dataset_iterator(self, dataset):\n \"\"\"Distributes the dataset to each local GPU.\"\"\"\n input_context = self._make_input_context()\n return input_lib.DatasetIterator(\n dataset,\n self._input_workers,\n self._container_strategy(),\n num_replicas_in_sync=self._num_replicas_in_sync,\n input_context=input_context)\n\n def _make_input_fn_iterator(\n self,\n input_fn,\n replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):\n \"\"\"Distributes the input function to each local GPU.\"\"\"\n input_context = self._make_input_context()\n return input_lib.InputFunctionIterator(input_fn, self._input_workers,\n [input_context],\n self._container_strategy())\n\n def _configure(self,\n session_config=None,\n cluster_spec=None,\n task_type=None,\n task_id=None):\n \"\"\"Configures the object.\n\n Args:\n session_config: a `tf.compat.v1.ConfigProto`\n cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the\n cluster configurations.\n task_type: the current task type, such as \"worker\".\n task_id: the current task id.\n\n Raises:\n ValueError: if `task_type` is not in the `cluster_spec`.\n \"\"\"\n if cluster_spec:\n # Use the num_gpus_per_worker recorded in constructor since _configure\n # doesn't take num_gpus.\n cluster_resolver = SimpleClusterResolver(\n cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),\n task_type=task_type,\n task_id=task_id,\n num_accelerators={\"GPU\": self._num_gpus_per_worker},\n rpc_layer=self._rpc_layer)\n self._initialize_multi_worker(cluster_resolver)\n assert isinstance(self._cross_device_ops,\n cross_device_ops_lib.CollectiveAllReduce)\n\n if session_config:\n session_config.CopyFrom(self._update_config_proto(session_config))\n\n def _update_config_proto(self, config_proto):\n updated_config = copy.deepcopy(config_proto)\n # Enable the scoped allocator optimization for CollectiveOps. This\n # optimization converts many small all-reduces into fewer larger\n # all-reduces.\n rewrite_options = updated_config.graph_options.rewrite_options\n rewrite_options.scoped_allocator_optimization = (\n rewriter_config_pb2.RewriterConfig.ON)\n # We turn on ScopedAllocator only for CollectiveReduce op, i.e. enable_op =\n # [\"CollectiveReduce\"]. Since we can't assign to a repeated proto field, we\n # clear and then append.\n del rewrite_options.scoped_allocator_opts.enable_op[:]\n rewrite_options.scoped_allocator_opts.enable_op.append(\"CollectiveReduce\")\n\n if (not ops.executing_eagerly_outside_functions() and\n self._communication_options.implementation ==\n collective_util.CommunicationImplementation.NCCL):\n updated_config.experimental.collective_nccl = True\n\n if not self._cluster_spec:\n return updated_config\n\n assert self._task_type\n assert self._task_id is not None\n\n # Collective group leader is needed for collective ops to coordinate\n # workers.\n updated_config.experimental.collective_group_leader = (\n multi_worker_util.collective_leader(self._cluster_spec, self._task_type,\n self._task_id))\n\n # The device filters prevent communication between workers.\n del updated_config.device_filters[:]\n updated_config.device_filters.append(\n \"/job:%s/task:%d\" % (self._task_type, self._task_id))\n\n return updated_config\n\n def _get_cross_device_ops(self, value):\n # CollectiveAllReduce works on a predefined set of devices. In most cases\n # they should be the compute devices, but certain use cases may reduce host\n # tensors as well (e.g. early stopping). We infer the cross_device_ops to\n # use based on the number of devices, since inputs don't always have device\n # annotations. The compute devices one is preferred since we can potentially\n # leverage NCCL.\n if isinstance(value, values.DistributedValues):\n num_devices = len(value._values) # pylint: disable=protected-access\n else:\n num_devices = 1\n if num_devices == len(self.worker_devices):\n return self._cross_device_ops\n else:\n return self._host_cross_device_ops\n\n def _gather_to_implementation(self, value, destinations, axis, options):\n return self._get_cross_device_ops(value)._gather( # pylint: disable=protected-access\n value,\n destinations=destinations,\n axis=axis,\n options=options)\n\n def _reduce_to(self, reduce_op, value, destinations, options):\n if (isinstance(value, values.Mirrored) and\n reduce_op == reduce_util.ReduceOp.MEAN):\n return value\n assert not isinstance(value, values.Mirrored)\n\n if (isinstance(value, values.DistributedValues) and\n len(self.worker_devices) == 1):\n value = value.values[0]\n\n # When there are multiple workers, we need to reduce across workers using\n # collective ops.\n if (not isinstance(value, values.DistributedValues) and\n self._num_workers == 1):\n # This function handles reducing values that are not PerReplica or\n # Mirrored values. For example, the same value could be present on all\n # replicas in which case `value` would be a single value or value could\n # be 0.\n return cross_device_ops_lib.reduce_non_distributed_value(\n reduce_op, value, destinations, len(self.worker_devices))\n return self._get_cross_device_ops(value).reduce(\n reduce_op,\n value,\n destinations=destinations,\n options=self._communication_options.merge(options))\n\n def _replica_ctx_all_reduce(self, reduce_op, value, options=None):\n \"\"\"Implements `StrategyExtendedV2._replica_ctx_all_reduce`.\"\"\"\n # This implementation avoids using `merge_call` and just launches collective\n # ops in one replica.\n if options is None:\n options = collective_util.Options()\n\n if context.executing_eagerly():\n # In eager mode, falls back to the default implemenation that uses\n # `merge_call`. Replica functions are running sequentially in eager mode,\n # and due to the blocking nature of collective ops, execution will hang if\n # collective ops are to be launched sequentially.\n return super()._replica_ctx_all_reduce(reduce_op, value, options)\n\n replica_context = ds_context.get_replica_context()\n assert replica_context, (\n \"`StrategyExtended._replica_ctx_all_reduce` must be called in a \"\n \"replica context\")\n return self._cross_device_ops._all_reduce( # pylint: disable=protected-access\n reduce_op,\n value,\n replica_context._replica_id, # pylint: disable=protected-access\n options)\n\n def _check_health(self):\n while True:\n if self._check_health_thread_should_stop.is_set():\n return\n for job in self._cluster_spec.jobs:\n for task_id in range(self._cluster_spec.num_tasks(job)):\n peer = \"/job:{}/replica:0/task:{}\".format(job, task_id)\n attempts = 0\n while True:\n attempts += 1\n try:\n context.context().check_collective_ops_peer_health(\n peer, timeout_in_ms=self._check_health_timeout * 1000)\n # If check_collective_ops_peer_health doesn't raise an Exception,\n # the peer is healthy.\n break\n except (errors.UnavailableError, errors.FailedPreconditionError,\n errors.DeadlineExceededError) as e:\n # TODO(b/151232436): Always raise UnavailableError when a peer\n # fails. Now there could be many kinds of errors:\n # - Unavailable: when the peer is not reachable, e.g. it's down.\n # - FailedPrecondition: when the peer has restarted.\n if attempts < self._check_health_retry_limit:\n logging.warning(\"%s seems down, retrying %d/%d\", peer, attempts,\n self._check_health_retry_limit)\n continue\n logging.error(\n \"Cluster check alive failed, %s is down, \"\n \"aborting collectives: %s\", peer, e)\n context.context().abort_collective_ops(\n errors.UNAVAILABLE,\n \"cluster check alive failed, {} is down\".format(peer))\n return\n except Exception as e: # pylint: disable=broad-except\n logging.error(\"Unexpected exception in check alive: %s\", e)\n context.context().abort_collective_ops(\n errors.INTERNAL,\n \"unexecpted exception in check alive: %s\" % e)\n return\n time.sleep(self._check_health_interval)\n\n def _start_check_health_thread(self):\n # Use a dummy all-reduce as a barrier to wait for all workers to be up,\n # otherwise the check health may fail immediately.\n\n # Use array_ops.identity to create the dummy tensor so that we have a new\n # Tensor. If we use constant it may be a cached from on a /job:localhost\n # device, which will cause some code that relies on tensor.device to error.\n #\n # TODO(b/151232436): change to an explicit barrier if we have it.\n dummy_value = array_ops.identity([])\n logging.info(\"Waiting for the cluster, timeout = %s\",\n self._check_health_initial_timeout or \"inf\")\n try:\n self._host_cross_device_ops.reduce(\n reduce_util.ReduceOp.SUM,\n dummy_value,\n dummy_value,\n options=collective_util.Options(\n timeout_seconds=self._check_health_initial_timeout,\n implementation=collective_util.CommunicationImplementation.RING))\n if context.is_async():\n context.async_wait()\n except errors.DeadlineExceededError:\n raise RuntimeError(\n \"Timeout waiting for the cluster, timeout is %d seconds\" %\n self._check_health_initial_timeout)\n logging.info(\"Cluster is ready.\")\n self._check_health_thread_should_stop = threading.Event()\n # Start the thread as daemon to avoid it blocking the program from exiting.\n # We try best to shutdown the thread but __del__ is not guaranteed to be\n # called when program exists.\n self._check_health_thread = threading.Thread(\n target=self._check_health,\n daemon=True)\n self._check_health_thread.start()\n\n def _stop_check_health_thread(self):\n if getattr(self, \"_check_health_thread\", None):\n logging.info(\"stopping check health thread\")\n self._check_health_thread_should_stop.set()\n self._check_health_thread.join()\n self._check_health_thread = None\n logging.info(\"check health thread stopped\")\n\n def _warn_nccl_no_gpu(self):\n if ((self._communication_options.implementation ==\n collective_util.CommunicationImplementation.NCCL) and\n self._num_gpus_per_worker == 0):\n logging.warning(\"Enabled NCCL communication but no GPUs detected/\"\n \"specified.\")\n\n def _in_multi_worker_mode(self):\n \"\"\"Whether this strategy indicates working in multi-worker settings.\"\"\"\n return self._num_workers > 1\n\n @property\n def experimental_between_graph(self):\n return True\n\n @property\n def experimental_should_init(self):\n return True\n\n @property\n def should_checkpoint(self):\n return self._is_chief\n\n @property\n def should_save_summary(self):\n return self._is_chief\n\n @property\n def _num_replicas_in_sync(self):\n return len(self.worker_devices) * self._num_workers\n\n # TODO(priyag): Delete this once all strategies use global batch size.\n @property\n def _global_batch_size(self):\n \"\"\"`make_dataset_iterator` and `make_numpy_iterator` use global batch size.\n\n `make_input_fn_iterator` assumes per-replica batching.\n\n Returns:\n Boolean.\n \"\"\"\n return True\n\n def _get_replica_id_in_sync_group(self, replica_id):\n return self._id_in_cluster * len(self.worker_devices) + replica_id\n\n def _get_local_replica_id(self, replica_id_in_sync_group):\n return (replica_id_in_sync_group -\n self._id_in_cluster * len(self.worker_devices))\n\n def __deepcopy__(self, memo):\n # We check the check health thread instead of whether we are in eager mode\n # to limit the backward incompatibility.\n if hasattr(self, \"_check_health_thread\"):\n raise ValueError(\n \"MultiWorkerMirroredStrategy cannot be deep copied in eager mode. \"\n \"If you're using Estimator and see this error message, call \"\n \"tf.compat.v1.disable_eager_execution() at the beginning of your \"\n \"program\")\n # Otherwise, do a regular deepcopy.\n cls = self.__class__\n result = cls.__new__(cls)\n memo[id(self)] = result\n for k, v in self.__dict__.items():\n setattr(result, k, copy.deepcopy(v, memo))\n return result\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Python API for executing a tf.data.Dataset using a tf.data service.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport enum\nimport functools\nimport six\n\nfrom tensorflow.core.protobuf import data_service_pb2\nfrom tensorflow.python import tf2\nfrom tensorflow.python.compat import compat\nfrom tensorflow.python.data.experimental.ops import compression_ops\nfrom tensorflow.python.data.experimental.service import _pywrap_server_lib\nfrom tensorflow.python.data.experimental.service import _pywrap_utils\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import options as options_lib\nfrom tensorflow.python.data.ops.options import AutoShardPolicy\nfrom tensorflow.python.data.ops.options import ExternalStatePolicy\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import gen_experimental_dataset_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.util import lazy_loader\nfrom tensorflow.python.util.tf_export import tf_export\n\nCOMPRESSION_AUTO = \"AUTO\"\nCOMPRESSION_NONE = None\n_PARALLEL_EPOCHS = \"parallel_epochs\"\n_DISTRIBUTED_EPOCH = \"distributed_epoch\"\n\n# TODO(b/176933539): Use the regular import.\nnested_structure_coder = lazy_loader.LazyLoader(\n \"nested_structure_coder\", globals(),\n \"tensorflow.python.saved_model.nested_structure_coder\")\n\n\n@tf_export(\"data.experimental.service.ShardingPolicy\")\nclass ShardingPolicy(enum.IntEnum):\n \"\"\"Specifies how to shard data among tf.data service workers.\n\n OFF: No sharding will be performed. Each worker produces the entire dataset\n without any sharding. With this mode, the best practice is to shuffle the\n dataset nondeterministically so that workers process the dataset in different\n orders. If workers are restarted or join the cluster mid-job, they will begin\n processing the dataset from the beginning.\n\n DYNAMIC: The input dataset is dynamically split among workers at runtime. Each\n worker gets the next split when it reads data from the dispatcher. Data is\n produced non-deterministically in this mode. Dynamic sharding works well with\n varying-sized tf.data service clusters, e.g., when you need to auto-scale your\n workers. Dynamic sharding provides at-most once visitation guarantees. No\n examples will be repeated, but some may be missed if a tf.data service worker\n gets restarted while processing a file.\n\n The following are static sharding policies. The semantics are similar to\n `tf.data.experimental.AutoShardPolicy`. These policies require:\n * The tf.data service cluster is configured with a fixed list of workers\n in DispatcherConfig.\n * Each client only reads from the local tf.data service worker.\n\n If a worker is restarted while performing static sharding, the worker will\n begin processing its shard again from the beginning.\n\n FILE: Shards by input files (i.e. each worker will get a fixed set of files to\n process). When this option is selected, make sure that there is at least as\n many files as workers. If there are fewer input files than workers, a runtime\n error will be raised.\n\n DATA: Shards by elements produced by the dataset. Each worker will process the\n whole dataset and discard the portion that is not for itself. Note that for\n this mode to correctly partition the dataset elements, the dataset needs to\n produce elements in a deterministic order.\n\n FILE_OR_DATA: Attempts FILE-based sharding, falling back to DATA-based\n sharding on failure.\n\n HINT: Looks for the presence of `shard(SHARD_HINT, ...)` which is treated as a\n placeholder to replace with `shard(num_workers, worker_index)`.\n \"\"\"\n\n # LINT.IfChange(tf_data_service_sharding_policy)\n OFF = 0\n DYNAMIC = 1\n FILE = 2\n DATA = 3\n FILE_OR_DATA = 4\n HINT = 5\n # LINT.ThenChange()\n\n def _to_proto(self):\n \"\"\"Converts the policy to ProcessingModeDef proto enum.\"\"\"\n\n if self == ShardingPolicy.OFF:\n return data_service_pb2.ProcessingModeDef.OFF\n if self == ShardingPolicy.DYNAMIC:\n return data_service_pb2.ProcessingModeDef.DYNAMIC\n if self == ShardingPolicy.FILE:\n return data_service_pb2.ProcessingModeDef.FILE\n if self == ShardingPolicy.DATA:\n return data_service_pb2.ProcessingModeDef.DATA\n if self == ShardingPolicy.FILE_OR_DATA:\n return data_service_pb2.ProcessingModeDef.FILE_OR_DATA\n if self == ShardingPolicy.HINT:\n return data_service_pb2.ProcessingModeDef.HINT\n raise ValueError(\n f\"Unable to convert sharding policy {self!r} to proto. Please verify \"\n \"the policy mapping.\")\n\n\ndef _get_validated_sharding_policy(processing_mode):\n \"\"\"Validates `processing_mode` and converts it to ShardingPolicy.\"\"\"\n\n if isinstance(processing_mode, ShardingPolicy):\n return processing_mode\n if compat.forward_compatible(2021, 8, 24):\n if processing_mode == _PARALLEL_EPOCHS:\n return ShardingPolicy.OFF\n if processing_mode == _DISTRIBUTED_EPOCH:\n return ShardingPolicy.DYNAMIC\n elif processing_mode in [_PARALLEL_EPOCHS, _DISTRIBUTED_EPOCH]:\n return processing_mode\n\n raise ValueError(\n \"tf.data service processing mode should be a ShardingPolicy, \"\n \"`\\\"parallel_epochs\\\"`, or `\\\"distributed_epoch\\\"`. Got \"\n f\"{processing_mode!r}.\")\n\n\ndef _serialize(processing_mode):\n \"\"\"Serializes `processing_mode`.\"\"\"\n\n processing_mode = _get_validated_sharding_policy(processing_mode)\n if isinstance(processing_mode, ShardingPolicy):\n # pylint: disable=protected-access\n processing_mode_def = data_service_pb2.ProcessingModeDef(\n sharding_policy=_get_validated_sharding_policy(\n processing_mode)._to_proto())\n return processing_mode_def.SerializeToString()\n if processing_mode in [_PARALLEL_EPOCHS, _DISTRIBUTED_EPOCH]:\n return processing_mode\n\n raise ValueError(\n \"tf.data service processing mode should be a ShardingPolicy, \"\n \"`\\\"parallel_epochs\\\"`, or `\\\"distributed_epoch\\\"`. Got \"\n f\"{processing_mode!r}.\")\n\n\ndef _validate_job_name(job_name):\n if job_name is None:\n return\n if not isinstance(job_name, six.string_types):\n raise ValueError(\"job_name must be a string, but job_name was of type \"\n \"{0}. job_name={1}\".format(type(job_name), job_name))\n if not job_name:\n raise ValueError(\"job_name must not be empty\")\n\n\nclass _DataServiceDatasetV2(dataset_ops.DatasetSource):\n \"\"\"A `Dataset` that reads elements from the tf.data service.\"\"\"\n\n def __init__(self,\n dataset_id,\n processing_mode,\n address,\n element_spec,\n protocol,\n data_transfer_protocol,\n job_name=None,\n consumer_index=None,\n num_consumers=None,\n max_outstanding_requests=None,\n task_refresh_interval_hint_ms=None,\n target_workers=\"AUTO\"):\n \"\"\"Constructs a _DataServiceDatasetV2.\n\n Args:\n dataset_id: The dataset id for the dataset to read from.\n processing_mode: A `tf.data.experimental.service.ShardingPolicy`\n specifying how to shard the dataset among tf.data workers. See\n `tf.data.experimental.service.ShardingPolicy` for details. For backwards\n compatibility, `processing_mode` may also be set to the strings\n `\"parallel_epochs\"` or `\"distributed_epoch\"`, which are respectively\n equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.\n address: The tf.data service address, e.g. \"localhost:5000\".\n element_spec: The dataset element spec for the dataset to read from.\n protocol: The protocol to use for communicating with the tf.data service,\n e.g. \"grpc\".\n data_transfer_protocol: (Optional.) The protocol to use for transferring\n data with the tf.data service. By default, data is transferred using\n gRPC.\n job_name: (Optional.) The name of the job. If provided, it must be a\n non-empty string or Tensor. This argument makes it possible\n for multiple datasets to share the same job. The default behavior is\n that the dataset creates anonymous, exclusively owned jobs.\n consumer_index: (Optional.) The index of the consumer in the range from\n `0` to `num_consumers`. Must be specified alongside `num_consumers`.\n When specified, consumers will read from the job in a strict round-robin\n order, instead of the default first-come-first-served order.\n num_consumers: (Optional.) The number of consumers which will consume from\n the job. Must be specified alongside `consumer_index`. When specified,\n consumers will read from the job in a strict round-robin order, instead\n of the default first-come-first-served order. When `num_consumers` is\n specified, the dataset must have infinite cardinality to prevent a\n producer from running out of data early and causing consumers to go out\n of sync.\n max_outstanding_requests: (Optional.) A limit on how many elements may be\n requested at the same time. You can use this option to control the\n amount of memory used, since `distribute` won't use more than\n `element_size` * `max_outstanding_requests` of memory.\n task_refresh_interval_hint_ms: (Optional.) A hint for how often to query\n the dispatcher for task changes.\n target_workers: (Optional.) Which workers to read from. If `\"AUTO\"`,\n tf.data runtime decides which workers to read from. If `\"ANY\"`, reads\n from any tf.data service workers. If `\"LOCAL\"`, only reads from local\n in-processs tf.data service workers. `\"AUTO\"` works well for most cases,\n while users can specify other targets. For example, `\"LOCAL\"` helps\n avoid RPCs and data copy if every TF worker colocates with a tf.data\n service worker. Consumers of a shared job must use the same\n `target_workers`. Defaults to `\"AUTO\"`.\n \"\"\"\n processing_mode = _serialize(\n _get_validated_sharding_policy(processing_mode))\n if consumer_index is None != num_consumers is None:\n raise ValueError(\n \"Must either set both consumer_index and num_consumers, or neither. \",\n \"consumer_index: \", consumer_index, \", num_consumers: \",\n num_consumers)\n if num_consumers is not None and job_name is None:\n raise ValueError(\"job_name must be set when setting num_consumers\")\n\n if job_name is None:\n job_name = \"\"\n if max_outstanding_requests is None:\n max_outstanding_requests = dataset_ops.AUTOTUNE\n if task_refresh_interval_hint_ms is None:\n task_refresh_interval_hint_ms = dataset_ops.AUTOTUNE\n\n self._dataset_id = ops.convert_to_tensor(\n dataset_id, dtype=dtypes.int64, name=\"dataset_id\")\n self._processing_mode = ops.convert_to_tensor(\n processing_mode, dtype=dtypes.string, name=\"processing_mode\")\n self._address = ops.convert_to_tensor(\n address, dtype=dtypes.string, name=\"address\")\n self._protocol = ops.convert_to_tensor(\n protocol, dtype=dtypes.string, name=\"protocol\")\n self._job_name = ops.convert_to_tensor(\n job_name, dtype=dtypes.string, name=\"job_name\")\n self._consumer_index = ops.convert_to_tensor(\n -1 if consumer_index is None else consumer_index,\n dtype=dtypes.int64,\n name=\"consumer_index\")\n self._num_consumers = ops.convert_to_tensor(\n -1 if num_consumers is None else num_consumers,\n dtype=dtypes.int64,\n name=\"num_consumers\")\n self._max_outstanding_requests = ops.convert_to_tensor(\n max_outstanding_requests,\n dtype=dtypes.int64,\n name=\"max_outstanding_requests\")\n self._element_spec = element_spec\n self._target_workers = target_workers\n\n compat_kwargs = {}\n if data_transfer_protocol is not None:\n compat_kwargs[\"data_transfer_protocol\"] = data_transfer_protocol\n if compat.forward_compatible(2021, 7, 12) or target_workers != \"AUTO\":\n compat_kwargs[\"target_workers\"] = target_workers\n\n variant_tensor = gen_experimental_dataset_ops.data_service_dataset_v2(\n dataset_id=self._dataset_id,\n processing_mode=self._processing_mode,\n address=self._address,\n protocol=self._protocol,\n job_name=self._job_name,\n consumer_index=self._consumer_index,\n num_consumers=self._num_consumers,\n max_outstanding_requests=self._max_outstanding_requests,\n task_refresh_interval_hint_ms=task_refresh_interval_hint_ms,\n iteration_counter=gen_experimental_dataset_ops.dummy_iteration_counter(\n ),\n **compat_kwargs,\n **self._flat_structure)\n super(_DataServiceDatasetV2, self).__init__(variant_tensor)\n\n @property\n def element_spec(self):\n return self._element_spec\n\n\nclass _DataServiceDatasetV1(dataset_ops.DatasetV1Adapter):\n \"\"\"A `Dataset` that executes its input through the tf.data service.\"\"\"\n\n @functools.wraps(_DataServiceDatasetV2.__init__)\n def __init__(self, dataset_id, processing_mode, address, element_spec,\n protocol, data_transfer_protocol, job_name, consumer_index,\n num_consumers, max_outstanding_requests,\n task_refresh_interval_hint_ms, target_workers):\n\n self._wrapped = _DataServiceDatasetV2(\n dataset_id=dataset_id,\n processing_mode=processing_mode,\n address=address,\n element_spec=element_spec,\n protocol=protocol,\n data_transfer_protocol=data_transfer_protocol,\n job_name=job_name,\n consumer_index=consumer_index,\n num_consumers=num_consumers,\n max_outstanding_requests=max_outstanding_requests,\n task_refresh_interval_hint_ms=task_refresh_interval_hint_ms,\n target_workers=target_workers)\n super(_DataServiceDatasetV1, self).__init__(self._wrapped)\n\n\nif tf2.enabled():\n _DataServiceDataset = _DataServiceDatasetV2\nelse:\n _DataServiceDataset = _DataServiceDatasetV1\n\n\ndef _parse_service(service):\n \"\"\"Converts a tf.data service string into a (protocol, address) tuple.\n\n Args:\n service: A string in the format \"protocol://address\" or just \"address\". If\n the string is only an address, the default protocol will be used.\n\n Returns:\n The (protocol, address) tuple\n \"\"\"\n if not isinstance(service, six.string_types):\n raise ValueError(\n \"service must be a string, but service was of type {0}. service={1}\"\n .format(type(service), service))\n if not service:\n raise ValueError(\"service must not be empty\")\n parts = service.split(\"://\")\n if len(parts) == 2:\n protocol, address = parts\n elif len(parts) == 1:\n address = parts[0]\n protocol = _pywrap_utils.TF_DATA_DefaultProtocol()\n else:\n raise ValueError(\"malformed service string has multiple '://': %s\" %\n service)\n # TODO(aaudibert): Considering validating reachability of address here.\n return (protocol, address)\n\n\ndef _decide_compression(compression, data_transfer_protocol):\n if compression == COMPRESSION_AUTO and data_transfer_protocol is not None:\n return COMPRESSION_NONE\n return compression\n\n\ndef _distribute(processing_mode,\n service,\n job_name=None,\n consumer_index=None,\n num_consumers=None,\n max_outstanding_requests=None,\n task_refresh_interval_hint_ms=None,\n data_transfer_protocol=None,\n compression=\"AUTO\",\n target_workers=\"AUTO\"):\n \"\"\"A transformation that moves dataset processing to the tf.data service.\n\n This transformation is similar to `distribute`, but supports additional\n parameters which we do not yet want to add to the public Python API.\n\n Args:\n processing_mode: A `tf.data.experimental.service.ShardingPolicy` specifying\n how to shard the dataset among tf.data workers. See\n `tf.data.experimental.service.ShardingPolicy` for details. For backwards\n compatibility, `processing_mode` may also be set to the strings\n `\"parallel_epochs\"` or `\"distributed_epoch\"`, which are respectively\n equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.\n service: A string or a tuple indicating how to connect to the tf.data\n service. If it's a string, it should be in the format\n `[<protocol>://]<address>`, where `<address>` identifies the dispatcher\n address and `<protocol>` can optionally be used to override the default\n protocol to use. If it's a tuple, it should be (protocol, address).\n job_name: (Optional.) The name of the job. If provided, it must be a\n non-empty string. This argument makes it possible\n for multiple datasets to share the same job. The default behavior is that\n the dataset creates anonymous, exclusively owned jobs.\n consumer_index: (Optional.) The index of the consumer in the range from `0`\n to `num_consumers`. Must be specified alongside `num_consumers`. When\n specified, consumers will read from the job in a strict round-robin order,\n instead of the default first-come-first-served order.\n num_consumers: (Optional.) The number of consumers which will consume from\n the job. Must be specified alongside `consumer_index`. When specified,\n consumers will read from the job in a strict round-robin order, instead of\n the default first-come-first-served order. When `num_consumers` is\n specified, the dataset must have infinite cardinality to prevent a\n producer from running out of data early and causing consumers to go out of\n sync.\n max_outstanding_requests: (Optional.) A limit on how many elements may be\n requested at the same time. You can use this option to control the amount\n of memory used, since `distribute` won't use more than `element_size` *\n `max_outstanding_requests` of memory.\n task_refresh_interval_hint_ms: (Optional.) A hint for how often to query the\n dispatcher for task changes.\n data_transfer_protocol: (Optional.) The protocol to use for transferring\n data with the tf.data service. By default, data is transferred using gRPC.\n compression: How to compress the dataset's elements before transferring them\n over the network. \"AUTO\" leaves the decision of how to compress up to the\n tf.data service runtime. `None` indicates not to compress.\n target_workers: (Optional.) Which workers to read from. If `\"AUTO\"`, tf.data\n runtime decides which workers to read from. If `\"ANY\"`, reads from any\n tf.data service workers. If `\"LOCAL\"`, only reads from local in-processs\n tf.data service workers. `\"AUTO\"` works well for most cases, while users\n can specify other targets. For example, `\"LOCAL\"` helps avoid RPCs and\n data copy if every TF worker colocates with a tf.data service worker.\n Consumers of a shared job must use the same `target_workers`. Defaults\n to `\"AUTO\"`.\n\n Returns:\n Dataset: A `Dataset` of the elements produced by the data service.\n \"\"\"\n processing_mode = _get_validated_sharding_policy(processing_mode)\n valid_compressions = [COMPRESSION_AUTO, COMPRESSION_NONE]\n if compression not in valid_compressions:\n raise ValueError(\n \"Invalid compression argument: {}. Must be one of {}\".format(\n compression, valid_compressions))\n compression = _decide_compression(compression, data_transfer_protocol)\n\n def _apply_fn(dataset): # pylint: disable=missing-docstring\n dataset_id = _register_dataset(service, dataset, compression=compression)\n return _from_dataset_id(\n processing_mode,\n service,\n dataset_id,\n dataset.element_spec,\n job_name=job_name,\n consumer_index=consumer_index,\n num_consumers=num_consumers,\n max_outstanding_requests=max_outstanding_requests,\n task_refresh_interval_hint_ms=task_refresh_interval_hint_ms,\n data_transfer_protocol=data_transfer_protocol,\n compression=compression,\n target_workers=target_workers)\n\n return _apply_fn\n\n\n@tf_export(\"data.experimental.service.distribute\")\ndef distribute(processing_mode,\n service,\n job_name=None,\n consumer_index=None,\n num_consumers=None,\n max_outstanding_requests=None,\n data_transfer_protocol=None,\n compression=\"AUTO\",\n target_workers=\"AUTO\"):\n \"\"\"A transformation that moves dataset processing to the tf.data service.\n\n When you iterate over a dataset containing the `distribute` transformation,\n the tf.data service creates a \"job\" which produces data for the dataset\n iteration.\n\n The tf.data service uses a cluster of workers to prepare data for training\n your model.\n The `processing_mode` argument to `tf.data.experimental.service.distribute`\n describes how to leverage multiple workers to process the input dataset.\n Currently, there are two processing modes to choose from: \"distributed_epoch\"\n and \"parallel_epochs\".\n\n \"distributed_epoch\" means that the dataset will be split across all tf.data\n service workers.\n The dispatcher produces \"splits\" for the dataset and sends them to workers for\n further processing. For example, if a dataset begins with a list of filenames,\n the dispatcher will iterate through the filenames and send the filenames to\n tf.data workers, which will perform the rest of the dataset transformations on\n those files. \"distributed_epoch\" is useful when your model needs to see each\n element of the dataset exactly once, or if it needs to see the data in a\n generally-sequential order. \"distributed_epoch\" only works for datasets with\n splittable sources, such as `Dataset.from_tensor_slices`,\n `Dataset.list_files`, or `Dataset.range`.\n\n \"parallel_epochs\" means that the entire input dataset will be processed\n independently by each of the tf.data service workers.\n For this reason, it is important to shuffle data (e.g. filenames)\n non-deterministically, so that each worker will process the elements of the\n dataset in a different order. \"parallel_epochs\" can be used to distribute\n datasets that aren't splittable.\n\n With two workers, \"parallel_epochs\" will produce every element of the dataset\n twice:\n\n >>> dispatcher = tf.data.experimental.service.DispatchServer()\n >>> dispatcher_address = dispatcher.target.split(\"://\")[1]\n >>> # Start two workers\n >>> workers = [\n ... tf.data.experimental.service.WorkerServer(\n ... tf.data.experimental.service.WorkerConfig(\n ... dispatcher_address=dispatcher_address)) for _ in range(2)\n ... ]\n >>> dataset = tf.data.Dataset.range(10)\n >>> dataset = dataset.apply(tf.data.experimental.service.distribute(\n ... processing_mode=\"parallel_epochs\", service=dispatcher.target))\n >>> print(sorted(list(dataset.as_numpy_iterator())))\n [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9]\n\n \"distributed_epoch\", on the other hand, will still produce each element once:\n\n >>> dispatcher = tf.data.experimental.service.DispatchServer()\n >>> dispatcher_address = dispatcher.target.split(\"://\")[1]\n >>> workers = [\n ... tf.data.experimental.service.WorkerServer(\n ... tf.data.experimental.service.WorkerConfig(\n ... dispatcher_address=dispatcher_address)) for _ in range(2)\n ... ]\n >>> dataset = tf.data.Dataset.range(10)\n >>> dataset = dataset.apply(tf.data.experimental.service.distribute(\n ... processing_mode=\"distributed_epoch\", service=dispatcher.target))\n >>> print(sorted(list(dataset.as_numpy_iterator())))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n When using `apply(tf.data.experimental.service.distribute(...))`, the dataset\n before the `apply` transformation executes within the tf.data service, while\n the operations after `apply` happen within the local process.\n\n >>> dispatcher = tf.data.experimental.service.DispatchServer()\n >>> dispatcher_address = dispatcher.target.split(\"://\")[1]\n >>> workers = [\n ... tf.data.experimental.service.WorkerServer(\n ... tf.data.experimental.service.WorkerConfig(\n ... dispatcher_address=dispatcher_address)) for _ in range(2)\n ... ]\n >>> dataset = tf.data.Dataset.range(5)\n >>> dataset = dataset.map(lambda x: x*x)\n >>> dataset = dataset.apply(\n ... tf.data.experimental.service.distribute(\"parallel_epochs\",\n ... dispatcher.target))\n >>> dataset = dataset.map(lambda x: x+1)\n >>> print(sorted(list(dataset.as_numpy_iterator())))\n [1, 1, 2, 2, 5, 5, 10, 10, 17, 17]\n\n In the above example, the dataset operations (before applying the `distribute`\n function on the elements) will be executed on the tf.data workers,\n and the elements are provided over RPC. The remaining transformations\n (after the call to `distribute`) will be executed locally. The dispatcher\n and the workers will bind to usused free ports (which are chosen at random),\n in order to communicate with each other. However, to bind them to specific\n ports, the `port` parameter can be passed.\n\n The `job_name` argument allows jobs to be shared across multiple\n datasets. Instead of each dataset creating its own job, all\n datasets with the same `job_name` will consume from the same job. A new job\n will be created for each iteration of the dataset (with each repetition of\n `Dataset.repeat` counting as a new iteration). Suppose the `DispatchServer`\n is serving on `localhost:5000` and two training workers (in either a single\n client or multi-client setup) iterate over the below dataset, and there is a\n single tf.data worker:\n\n ```\n range5_dataset = tf.data.Dataset.range(5)\n dataset = range5_dataset.apply(tf.data.experimental.service.distribute(\n \"parallel_epochs\", \"localhost:5000\", job_name=\"my_job_name\"))\n for iteration in range(3):\n print(list(dataset))\n ```\n\n The elements of each job will be split between the two processes, with\n elements being consumed by the processes on a first-come first-served basis.\n One possible result is that process 1 prints\n\n ```\n [0, 2, 4]\n [0, 1, 3]\n [1]\n ```\n\n and process 2 prints\n\n ```\n [1, 3]\n [2, 4]\n [0, 2, 3, 4]\n ```\n\n Job names must not be re-used across different training jobs within the\n lifetime of the tf.data service. In general, the tf.data service is expected\n to live for the duration of a single training job.\n To use the tf.data service with multiple training jobs, make sure to use\n different job names to avoid conflicts. For example, suppose a training job\n calls `distribute` with `job_name=\"job\"` and reads until end of input. If\n another independent job connects to the same tf.data service and tries to read\n from `job_name=\"job\"`, it will immediately receive end of input, without\n getting any data.\n\n **Round Robin data consumption**\n\n By default, when multiple consumers read from the same job, they receive data\n on a first-come first-served basis. In some use cases, it works better to use\n a strict round-robin order. For example, the tf.data service can be used to\n coordinate example sizes across a cluster during sychronous training, so that\n during each step all replicas train on similar-sized elements. To achieve\n this, define a dataset which generates rounds of `num_consumers` consecutive\n similar-sized batches, then enable round-robin reads by setting\n `consumer_index` and `num_consumers`.\n\n Consumers read data by cycling through all workers, reading one element from\n each. First, each consumer will read an element from the first worker, then\n each consumer will read an element from the second worker, and so on.\n\n NOTE: To keep consumers in sync, round robin data consumption requires that\n the dataset have infinite cardinality. You can get this by adding `.repeat()`\n at the end of the dataset definition.\n\n **Keras and Distribution Strategies**\n\n The dataset produced by the `distribute` transformation can be passed to\n Keras' `Model.fit` or Distribution Strategy's\n `tf.distribute.Strategy.experimental_distribute_dataset` like any other\n `tf.data.Dataset`. We recommend setting a `job_name` on the call to\n `distribute` so that if there are multiple workers, they read data from the\n same job. Note that the autosharding normally performed by\n `experimental_distribute_dataset` will be disabled when setting a `job_name`,\n since sharing the job already results in splitting data across the workers.\n When using a shared job, data will be dynamically balanced across workers, so\n that they reach end of input about the same time. This results in better\n worker utilization than with autosharding, where each worker processes an\n independent set of files, and some workers may run out of data earlier than\n others.\n\n Args:\n processing_mode: A `tf.data.experimental.service.ShardingPolicy` specifying\n how to shard the dataset among tf.data workers. See\n `tf.data.experimental.service.ShardingPolicy` for details. For backwards\n compatibility, `processing_mode` may also be set to the strings\n `\"parallel_epochs\"` or `\"distributed_epoch\"`, which are respectively\n equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.\n service: A string or a tuple indicating how to connect to the tf.data\n service. If it's a string, it should be in the format\n `[<protocol>://]<address>`, where `<address>` identifies the dispatcher\n address and `<protocol>` can optionally be used to override the default\n protocol to use. If it's a tuple, it should be (protocol, address).\n job_name: (Optional.) The name of the job. If provided, it must be a\n non-empty string. This argument makes it possible\n for multiple datasets to share the same job. The default behavior is that\n the dataset creates anonymous, exclusively owned jobs.\n consumer_index: (Optional.) The index of the consumer in the range from `0`\n to `num_consumers`. Must be specified alongside `num_consumers`. When\n specified, consumers will read from the job in a strict round-robin order,\n instead of the default first-come-first-served order.\n num_consumers: (Optional.) The number of consumers which will consume from\n the job. Must be specified alongside `consumer_index`. When specified,\n consumers will read from the job in a strict round-robin order, instead of\n the default first-come-first-served order. When `num_consumers` is\n specified, the dataset must have infinite cardinality to prevent a\n producer from running out of data early and causing consumers to go out of\n sync.\n max_outstanding_requests: (Optional.) A limit on how many elements may be\n requested at the same time. You can use this option to control the amount\n of memory used, since `distribute` won't use more than `element_size` *\n `max_outstanding_requests` of memory.\n data_transfer_protocol: (Optional.) The protocol to use for transferring\n data with the tf.data service. By default, data is transferred using gRPC.\n compression: How to compress the dataset's elements before transferring them\n over the network. \"AUTO\" leaves the decision of how to compress up to the\n tf.data service runtime. `None` indicates not to compress.\n target_workers: (Optional.) Which workers to read from. If `\"AUTO\"`, tf.data\n runtime decides which workers to read from. If `\"ANY\"`, reads from any\n tf.data service workers. If `\"LOCAL\"`, only reads from local in-processs\n tf.data service workers. `\"AUTO\"` works well for most cases, while users\n can specify other targets. For example, `\"LOCAL\"` helps avoid RPCs and\n data copy if every TF worker colocates with a tf.data service worker.\n Consumers of a shared job must use the same `target_workers`. Defaults\n to `\"AUTO\"`.\n\n Returns:\n Dataset: A `Dataset` of the elements produced by the data service.\n \"\"\"\n _validate_job_name(job_name)\n return _distribute(\n processing_mode=processing_mode,\n service=service,\n job_name=job_name,\n consumer_index=consumer_index,\n num_consumers=num_consumers,\n max_outstanding_requests=max_outstanding_requests,\n data_transfer_protocol=data_transfer_protocol,\n compression=compression,\n target_workers=target_workers)\n\n\ndef _register_dataset(service, dataset, compression):\n \"\"\"Registers a dataset with the tf.data service.\n\n This transformation is similar to `register_dataset`, but supports additional\n parameters which we do not yet want to add to the public Python API.\n\n Args:\n service: A string or a tuple indicating how to connect to the tf.data\n service. If it's a string, it should be in the format\n `[<protocol>://]<address>`, where `<address>` identifies the dispatcher\n address and `<protocol>` can optionally be used to override the default\n protocol to use. If it's a tuple, it should be (protocol, address).\n dataset: A `tf.data.Dataset` to register with the tf.data service.\n compression: How to compress the dataset's elements before transferring them\n over the network. \"AUTO\" leaves the decision of how to compress up to the\n tf.data service runtime. `None` indicates not to compress.\n\n Returns:\n A scalar int64 tensor of the registered dataset's id.\n \"\"\"\n valid_compressions = [COMPRESSION_AUTO, COMPRESSION_NONE]\n if compression not in valid_compressions:\n raise ValueError(\n \"Invalid compression argument: {}. Must be one of {}\".format(\n compression, valid_compressions))\n if isinstance(service, tuple):\n protocol, address = service\n else:\n protocol, address = _parse_service(service)\n external_state_policy = dataset.options().experimental_external_state_policy\n if external_state_policy is None:\n external_state_policy = ExternalStatePolicy.WARN\n\n encoded_spec = \"\"\n if context.executing_eagerly():\n coder = nested_structure_coder.StructureCoder()\n encoded_spec = coder.encode_structure(\n dataset.element_spec).SerializeToString()\n\n if compression == COMPRESSION_AUTO:\n dataset = dataset.map(\n lambda *x: compression_ops.compress(x),\n num_parallel_calls=dataset_ops.AUTOTUNE)\n dataset = dataset.prefetch(dataset_ops.AUTOTUNE)\n dataset = dataset._apply_debug_options() # pylint: disable=protected-access\n\n dataset_id = gen_experimental_dataset_ops.register_dataset(\n dataset._variant_tensor, # pylint: disable=protected-access\n address=address,\n protocol=protocol,\n external_state_policy=external_state_policy.value,\n element_spec=encoded_spec)\n\n return dataset_id\n\n\n@tf_export(\"data.experimental.service.register_dataset\")\ndef register_dataset(service, dataset, compression=\"AUTO\"):\n \"\"\"Registers a dataset with the tf.data service.\n\n `register_dataset` registers a dataset with the tf.data service so that\n datasets can be created later with\n `tf.data.experimental.service.from_dataset_id`. This is useful when the\n dataset\n is registered by one process, then used in another process. When the same\n process is both registering and reading from the dataset, it is simpler to use\n `tf.data.experimental.service.distribute` instead.\n\n If the dataset is already registered with the tf.data service,\n `register_dataset` returns the already-registered dataset's id.\n\n >>> dispatcher = tf.data.experimental.service.DispatchServer()\n >>> dispatcher_address = dispatcher.target.split(\"://\")[1]\n >>> worker = tf.data.experimental.service.WorkerServer(\n ... tf.data.experimental.service.WorkerConfig(\n ... dispatcher_address=dispatcher_address))\n >>> dataset = tf.data.Dataset.range(10)\n >>> dataset_id = tf.data.experimental.service.register_dataset(\n ... dispatcher.target, dataset)\n >>> dataset = tf.data.experimental.service.from_dataset_id(\n ... processing_mode=\"parallel_epochs\",\n ... service=dispatcher.target,\n ... dataset_id=dataset_id,\n ... element_spec=dataset.element_spec)\n >>> print(list(dataset.as_numpy_iterator()))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n Args:\n service: A string or a tuple indicating how to connect to the tf.data\n service. If it's a string, it should be in the format\n `[<protocol>://]<address>`, where `<address>` identifies the dispatcher\n address and `<protocol>` can optionally be used to override the default\n protocol to use. If it's a tuple, it should be (protocol, address).\n dataset: A `tf.data.Dataset` to register with the tf.data service.\n compression: (Optional.) How to compress the dataset's elements before\n transferring them over the network. \"AUTO\" leaves the decision of how to\n compress up to the tf.data service runtime. `None` indicates not to\n compress.\n\n Returns:\n A scalar int64 tensor of the registered dataset's id.\n \"\"\"\n return _register_dataset(service, dataset, compression)\n\n\ndef _from_dataset_id(processing_mode,\n service,\n dataset_id,\n element_spec,\n job_name=None,\n consumer_index=None,\n num_consumers=None,\n max_outstanding_requests=None,\n task_refresh_interval_hint_ms=None,\n data_transfer_protocol=None,\n compression=\"AUTO\",\n target_workers=\"AUTO\"):\n \"\"\"Creates a dataset which reads data from the tf.data service.\n\n This transformation is similar to `from_dataset_id`, but supports additional\n parameters which we do not yet want to add to the public Python API.\n\n Args:\n processing_mode: A `tf.data.experimental.service.ShardingPolicy` specifying\n how to shard the dataset among tf.data workers. See\n `tf.data.experimental.service.ShardingPolicy` for details. For backwards\n compatibility, `processing_mode` may also be set to the strings\n `\"parallel_epochs\"` or `\"distributed_epoch\"`, which are respectively\n equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.\n service: A string or a tuple indicating how to connect to the tf.data\n service. If it's a string, it should be in the format\n `[<protocol>://]<address>`, where `<address>` identifies the dispatcher\n address and `<protocol>` can optionally be used to override the default\n protocol to use. If it's a tuple, it should be (protocol, address).\n dataset_id: The id of the dataset to read from. This id is returned by\n `register_dataset` when the dataset is registered with the tf.data\n service.\n element_spec: A nested structure of `tf.TypeSpec`s representing the type of\n elements produced by the dataset. This argument is only required inside a\n tf.function. Use `tf.data.Dataset.element_spec` to get the element spec\n for a given dataset.\n job_name: (Optional.) The name of the job. If provided, it must be a\n non-empty string or tensor. This argument makes it possible\n for multiple datasets to share the same job. The default behavior is that\n the dataset creates anonymous, exclusively owned jobs.\n consumer_index: (Optional.) The index of the consumer in the range from `0`\n to `num_consumers`. Must be specified alongside `num_consumers`. When\n specified, consumers will read from the job in a strict round-robin order,\n instead of the default first-come-first-served order.\n num_consumers: (Optional.) The number of consumers which will consume from\n the job. Must be specified alongside `consumer_index`. When specified,\n consumers will read from the job in a strict round-robin order, instead of\n the default first-come-first-served order. When `num_consumers` is\n specified, the dataset must have infinite cardinality to prevent a\n producer from running out of data early and causing consumers to go out of\n sync.\n max_outstanding_requests: (Optional.) A limit on how many elements may be\n requested at the same time. You can use this option to control the amount\n of memory used, since `distribute` won't use more than `element_size` *\n `max_outstanding_requests` of memory.\n task_refresh_interval_hint_ms: (Optional.) A hint for how often to query the\n dispatcher for task changes.\n data_transfer_protocol: (Optional.) The protocol to use for transferring\n data with the tf.data service. By default, data is transferred using gRPC.\n compression: An indication of how the dataset's elements were compressed, so\n that `from_dataset_id` can uncompress them if necessary.\n target_workers: (Optional.) Which workers to read from. If `\"AUTO\"`, tf.data\n runtime decides which workers to read from. If `\"ANY\"`, reads from any\n tf.data service workers. If `\"LOCAL\"`, only reads from local in-processs\n tf.data service workers. `\"AUTO\"` works well for most cases, while users\n can specify other targets. For example, `\"LOCAL\"` helps avoid RPCs and\n data copy if every TF worker colocates with a tf.data service worker.\n Consumers of a shared job must use the same `target_workers`. Defaults\n to `\"AUTO\"`.\n\n Returns:\n A `tf.data.Dataset` which reads from the tf.data service.\n \"\"\"\n processing_mode = _get_validated_sharding_policy(processing_mode)\n valid_compressions = [COMPRESSION_AUTO, COMPRESSION_NONE]\n if isinstance(service, tuple):\n protocol, address = service\n else:\n protocol, address = _parse_service(service)\n\n if compression not in valid_compressions:\n raise ValueError(\n \"Invalid compression argument: {}. Must be one of {}\".format(\n compression, valid_compressions))\n if job_name is not None:\n if not isinstance(job_name, six.string_types) and not isinstance(\n job_name, ops.Tensor):\n raise ValueError(\n \"job_name must be a string or Tensor, but job_name was of type \"\n \"{0}. job_name={1}\".format(type(job_name), job_name))\n\n if element_spec is None:\n if not context.executing_eagerly():\n raise ValueError(\"In graph mode element_spec must be provided manually.\")\n\n dataset_id_val = tensor_util.constant_value(dataset_id)\n try:\n encoded_spec = _pywrap_server_lib.TF_DATA_GetElementSpec(\n dataset_id_val, address, protocol)\n\n except NotImplementedError as err:\n raise ValueError(\"The tf.data service is running an earlier version of \"\n \"TensorFlow that requires specifying `element_spec` as \"\n \"an argument to `from_dataset_id`. Please either supply \"\n \"an element spec or update the tf.data service to the \"\n \"latest version.\") from err\n\n except RuntimeError as err:\n raise ValueError(\"Failed to fetch element spec for dataset id \" +\n str(dataset_id_val) + \" from tf.data service. If the \"\n \"dataset was registered in graph mode or inside a \"\n \"tf.function, the `element_spec` must be specified as \"\n \"an argument to `from_dataset_id`.\") from err\n\n struct_pb = nested_structure_coder.struct_pb2.StructuredValue()\n struct_pb.ParseFromString(encoded_spec)\n coder = nested_structure_coder.StructureCoder()\n element_spec = coder.decode_proto(struct_pb)\n\n # If we compress, the data service side dataset will produce scalar variants.\n compression = _decide_compression(compression, data_transfer_protocol)\n data_service_element_spec = (\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.variant)\n if compression == COMPRESSION_AUTO else element_spec)\n\n dataset = _DataServiceDataset(\n dataset_id=dataset_id,\n processing_mode=processing_mode,\n address=address,\n element_spec=data_service_element_spec,\n protocol=protocol,\n data_transfer_protocol=data_transfer_protocol,\n job_name=job_name,\n consumer_index=consumer_index,\n num_consumers=num_consumers,\n max_outstanding_requests=max_outstanding_requests,\n task_refresh_interval_hint_ms=task_refresh_interval_hint_ms,\n target_workers=target_workers)\n if compression == COMPRESSION_AUTO:\n dataset = dataset.map(\n lambda x: compression_ops.uncompress(x, output_spec=element_spec),\n num_parallel_calls=dataset_ops.AUTOTUNE)\n\n # Disable autosharding for shared jobs.\n if job_name is not None:\n options = options_lib.Options()\n options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF\n dataset = dataset.with_options(options)\n return dataset\n\n\n@tf_export(\"data.experimental.service.from_dataset_id\")\ndef from_dataset_id(processing_mode,\n service,\n dataset_id,\n element_spec=None,\n job_name=None,\n consumer_index=None,\n num_consumers=None,\n max_outstanding_requests=None,\n data_transfer_protocol=None,\n target_workers=\"AUTO\"):\n \"\"\"Creates a dataset which reads data from the tf.data service.\n\n This is useful when the dataset is registered by one process, then used in\n another process. When the same process is both registering and reading from\n the dataset, it is simpler to use `tf.data.experimental.service.distribute`\n instead.\n\n Before using `from_dataset_id`, the dataset must have been registered with the\n tf.data service using `tf.data.experimental.service.register_dataset`.\n `register_dataset` returns a dataset id for the registered dataset. That is\n the `dataset_id` which should be passed to `from_dataset_id`.\n\n The `element_spec` argument indicates the `tf.TypeSpec`s for the elements\n produced by the dataset. Currently `element_spec` must be explicitly\n specified, and match the dataset registered under `dataset_id`. `element_spec`\n defaults to `None` so that in the future we can support automatically\n discovering the `element_spec` by querying the tf.data service.\n\n `tf.data.experimental.service.distribute` is a convenience method which\n combines `register_dataset` and `from_dataset_id` into a dataset\n transformation.\n See the documentation for `tf.data.experimental.service.distribute` for more\n detail about how `from_dataset_id` works.\n\n >>> dispatcher = tf.data.experimental.service.DispatchServer()\n >>> dispatcher_address = dispatcher.target.split(\"://\")[1]\n >>> worker = tf.data.experimental.service.WorkerServer(\n ... tf.data.experimental.service.WorkerConfig(\n ... dispatcher_address=dispatcher_address))\n >>> dataset = tf.data.Dataset.range(10)\n >>> dataset_id = tf.data.experimental.service.register_dataset(\n ... dispatcher.target, dataset)\n >>> dataset = tf.data.experimental.service.from_dataset_id(\n ... processing_mode=\"parallel_epochs\",\n ... service=dispatcher.target,\n ... dataset_id=dataset_id,\n ... element_spec=dataset.element_spec)\n >>> print(list(dataset.as_numpy_iterator()))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n Args:\n processing_mode: A `tf.data.experimental.service.ShardingPolicy` specifying\n how to shard the dataset among tf.data workers. See\n `tf.data.experimental.service.ShardingPolicy` for details. For backwards\n compatibility, `processing_mode` may also be set to the strings\n `\"parallel_epochs\"` or `\"distributed_epoch\"`, which are respectively\n equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.\n service: A string or a tuple indicating how to connect to the tf.data\n service. If it's a string, it should be in the format\n `[<protocol>://]<address>`, where `<address>` identifies the dispatcher\n address and `<protocol>` can optionally be used to override the default\n protocol to use. If it's a tuple, it should be (protocol, address).\n dataset_id: The id of the dataset to read from. This id is returned by\n `register_dataset` when the dataset is registered with the tf.data\n service.\n element_spec: A nested structure of `tf.TypeSpec`s representing the type of\n elements produced by the dataset. This argument is only required inside a\n tf.function. Use `tf.data.Dataset.element_spec` to get the element spec\n for a given dataset.\n job_name: (Optional.) The name of the job. If provided, it must be a\n non-empty string. This argument makes it possible\n for multiple datasets to share the same job. The default behavior is that\n the dataset creates anonymous, exclusively owned jobs.\n consumer_index: (Optional.) The index of the consumer in the range from `0`\n to `num_consumers`. Must be specified alongside `num_consumers`. When\n specified, consumers will read from the job in a strict round-robin order,\n instead of the default first-come-first-served order.\n num_consumers: (Optional.) The number of consumers which will consume from\n the job. Must be specified alongside `consumer_index`. When specified,\n consumers will read from the job in a strict round-robin order, instead of\n the default first-come-first-served order. When `num_consumers` is\n specified, the dataset must have infinite cardinality to prevent a\n producer from running out of data early and causing consumers to go out of\n sync.\n max_outstanding_requests: (Optional.) A limit on how many elements may be\n requested at the same time. You can use this option to control the amount\n of memory used, since `distribute` won't use more than `element_size` *\n `max_outstanding_requests` of memory.\n data_transfer_protocol: (Optional.) The protocol to use for transferring\n data with the tf.data service. By default, data is transferred using gRPC.\n target_workers: (Optional.) Which workers to read from. If `\"AUTO\"`, tf.data\n runtime decides which workers to read from. If `\"ANY\"`, reads from any\n tf.data service workers. If `\"LOCAL\"`, only reads from local in-processs\n tf.data service workers. `\"AUTO\"` works well for most cases, while users\n can specify other targets. For example, `\"LOCAL\"` helps avoid RPCs and\n data copy if every TF worker colocates with a tf.data service worker.\n Consumers of a shared job must use the same `target_workers`. Defaults\n to `\"AUTO\"`.\n\n Returns:\n A `tf.data.Dataset` which reads from the tf.data service.\n \"\"\"\n _validate_job_name(job_name)\n if job_name is not None:\n job_name = string_ops.string_join(\n [\"dataset_id=\", string_ops.as_string(dataset_id), job_name], \"/\")\n\n return _from_dataset_id(\n processing_mode=processing_mode,\n service=service,\n dataset_id=dataset_id,\n element_spec=element_spec,\n job_name=job_name,\n consumer_index=consumer_index,\n num_consumers=num_consumers,\n max_outstanding_requests=max_outstanding_requests,\n data_transfer_protocol=data_transfer_protocol,\n target_workers=target_workers)\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.tools.docs.generate2.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport types\nfrom unittest import mock\n\nimport tensorflow as tf\n\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.tools.docs import generate2\n\n# Make a mock tensorflow package that won't take too long to test.\nfake_tf = types.ModuleType('FakeTensorFlow')\nfake_tf.estimator = tf.estimator\nfake_tf.keras = tf.keras\nfake_tf.nn = tf.nn\nfake_tf.summary = tf.summary\nfake_tf.raw_ops = types.ModuleType('raw_ops')\nfake_tf.Module = tf.Module\n\nfor name in sorted(dir(tf.raw_ops))[:5]:\n setattr(fake_tf.raw_ops, name, getattr(tf.raw_ops, name))\n\n\nclass Generate2Test(googletest.TestCase):\n\n @mock.patch.object(generate2, 'tf', fake_tf)\n def test_end_to_end(self):\n output_dir = os.path.join(googletest.GetTempDir(), 'output')\n if os.path.exists(output_dir):\n shutil.rmtree(output_dir)\n os.makedirs(output_dir)\n with self.assertRaisesRegex(ValueError, '2000 files'):\n generate2.build_docs(\n output_dir=output_dir,\n code_url_prefix='',\n search_hints=True,\n )\n\n\nif __name__ == '__main__':\n googletest.main()\n"
] | [
[
"tensorflow.python.distribute.distribute_lib.StrategyExtendedV1.__init__",
"tensorflow.python.platform.tf_logging.error",
"tensorflow.python.eager.context.async_wait",
"tensorflow.python.distribute.numpy_dataset.SingleDevice",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.distribute.multi_worker_util.id_in_cluster",
"tensorflow.python.distribute.distribute_lib.distribution_strategy_gauge.get_cell",
"tensorflow.python.framework.ops.device",
"tensorflow.python.eager.context.is_async",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.eager.context.context",
"tensorflow.python.distribute.distribute_lib.ValueContext",
"tensorflow.python.distribute.device_util.get_host_for_device",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.distribute.distribute_lib.InputContext",
"tensorflow.python.distribute.multi_worker_util.worker_count",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.distribute.distribute_lib.distribution_strategy_replica_gauge.get_cell",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.distribute.distribute_utils.regroup",
"tensorflow.python.distribute.collective_util.Options",
"tensorflow.python.distribute.device_util.canonicalize",
"tensorflow.python.distribute.distribution_strategy_context.get_replica_context",
"tensorflow.python.ops.collective_ops.broadcast_recv",
"tensorflow.python.distribute.cross_device_ops.CollectiveAllReduce",
"tensorflow.python.distribute.multi_worker_util.is_chief",
"tensorflow.python.distribute.cross_device_utils.CollectiveKeys",
"tensorflow.python.eager.context.num_gpus",
"tensorflow.python.distribute.multi_worker_util.collective_leader",
"tensorflow.python.distribute.multi_worker_util.normalize_cluster_spec",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.distribute.cluster_resolver.TFConfigClusterResolver",
"tensorflow.python.ops.collective_ops.broadcast_send",
"tensorflow.python.distribute.input_lib.InputWorkers"
],
[
"tensorflow.python.data.experimental.service._pywrap_server_lib.TF_DATA_GetElementSpec",
"tensorflow.python.ops.gen_experimental_dataset_ops.register_dataset",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.ops.gen_experimental_dataset_ops.dummy_iteration_counter",
"tensorflow.python.ops.string_ops.as_string",
"tensorflow.python.data.ops.options.Options",
"tensorflow.python.compat.compat.forward_compatible",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.data.experimental.service._pywrap_utils.TF_DATA_DefaultProtocol",
"tensorflow.python.data.experimental.ops.compression_ops.uncompress",
"tensorflow.python.data.experimental.ops.compression_ops.compress",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.tf2.enabled",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.eager.context.executing_eagerly"
],
[
"tensorflow.python.platform.googletest.GetTempDir",
"tensorflow.python.platform.googletest.main",
"tensorflow.tools.docs.generate2.build_docs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.4",
"2.9",
"2.5",
"2.6",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.4",
"2.3",
"2.9",
"2.5",
"2.6",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SparkJiao/MERIt | [
"e887dd11bd2969345a5fb07c47d49bd0245e41e6"
] | [
"reclor_trainer_base_v2.py"
] | [
"# coding=utf-8\n#\n# Copyright 2020 Heinrich Heine University Duesseldorf\n#\n# Part of this code is based on the source code of BERT-DST\n# (arXiv:1907.03040)\n# Part of this code is based on the source code of Transformers\n# (arXiv:1910.03771)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport json\nimport logging\nimport os\nimport sys\nfrom typing import Dict, Union\n\nimport hydra\nimport numpy as np\nimport torch\nimport transformers\nfrom fairscale.nn.data_parallel.fully_sharded_data_parallel import FullyShardedDataParallel as FullyShardedDDP\nfrom fairscale.nn.wrap.auto_wrap import auto_wrap\nfrom fairscale.optim.grad_scaler import ShardedGradScaler\nfrom omegaconf import DictConfig, OmegaConf\nfrom torch import distributed as dist\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm, trange\nfrom transformers import (get_linear_schedule_with_warmup, AutoTokenizer, PreTrainedTokenizer)\n\nfrom general_util.logger import setting_logger\nfrom general_util.training_utils import batch_to_device, unwrap_model, set_seed, note_best_checkpoint, initialize_optimizer\n\nlogger: logging.Logger\n\n# transformers.logging.set_verbosity_error()\n\n\ndef save_model(model: Union[torch.nn.Module, FullyShardedDDP], cfg: DictConfig, output_dir: str, tokenizer: PreTrainedTokenizer = None):\n # Save model checkpoint.\n if cfg.local_rank != -1:\n state_dict = model.state_dict()\n if cfg.local_rank == 0:\n unwrap_model(model).save_pretrained(output_dir, state_dict=state_dict)\n else:\n model.save_pretrained(output_dir)\n\n # Save tokenizer and training args.\n if cfg.local_rank in [-1, 0]:\n if tokenizer is not None:\n tokenizer.save_pretrained(output_dir)\n OmegaConf.save(cfg, os.path.join(output_dir, \"training_config.yaml\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n\ndef forward_step(model, inputs: Dict[str, torch.Tensor], cfg, scaler):\n if cfg.fp16:\n with torch.cuda.amp.autocast():\n outputs = model(**inputs)\n loss = outputs[\"loss\"] # model outputs are always tuple in transformers (see doc)\n else:\n outputs = model(**inputs)\n loss = outputs[\"loss\"] # model outputs are always tuple in pytorch-transformers (see doc)\n\n if cfg.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training\n if cfg.gradient_accumulation_steps > 1:\n loss = loss / cfg.gradient_accumulation_steps\n\n if cfg.fp16:\n scaler.scale(loss).backward()\n else:\n loss.backward()\n\n return loss.item()\n\n\ndef train(cfg, train_dataset, features, model, tokenizer, continue_from_global_step=0):\n \"\"\" Train the model \"\"\"\n if cfg.local_rank in [-1, 0]:\n _dir_splits = cfg.output_dir.split('/')\n _log_dir = '/'.join([_dir_splits[0], 'runs'] + _dir_splits[1:])\n tb_writer = SummaryWriter(log_dir=_log_dir)\n else:\n tb_writer = None\n\n cfg.train_batch_size = cfg.per_gpu_train_batch_size * max(1, cfg.n_gpu)\n train_sampler = RandomSampler(train_dataset) if cfg.local_rank == -1 else DistributedSampler(train_dataset)\n train_collator = hydra.utils.instantiate(cfg.collator) if \"collator\" in cfg and cfg.collator else None\n train_dataloader = DataLoader(dataset=train_dataset, sampler=train_sampler, batch_size=cfg.train_batch_size,\n collate_fn=train_collator, num_workers=cfg.num_workers, pin_memory=True,\n prefetch_factor=cfg.prefetch_factor)\n\n if \"extended_vocab\" in cfg and cfg.extended_vocab:\n logger.info(f\"Extended extra vocab size: {cfg.extended_vocab}\")\n model.resize_token_embeddings(model.config.vocab_size + cfg.extended_vocab)\n\n if cfg.max_steps > 0:\n t_total = cfg.max_steps\n cfg.num_train_epochs = cfg.max_steps // (len(train_dataloader) // cfg.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // cfg.gradient_accumulation_steps * cfg.num_train_epochs\n\n num_warmup_steps = int(t_total * cfg.warmup_proportion) if cfg.warmup_proportion else cfg.warmup_steps\n\n optimizer = scheduler = None\n # Prepare optimizer and schedule (linear warmup and decay)\n if cfg.local_rank == -1:\n no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']\n optimizer_grouped_parameters = [\n {\n 'params': [p for n, p in model.named_parameters() if (not any(nd in n for nd in no_decay)) and p.requires_grad],\n 'weight_decay': cfg.weight_decay\n },\n {\n 'params': [p for n, p in model.named_parameters() if (any(nd in n for nd in no_decay)) and p.requires_grad],\n 'weight_decay': 0.0\n }\n ]\n optimizer = initialize_optimizer(cfg, optimizer_grouped_parameters)\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)\n\n if cfg.fp16:\n if cfg.local_rank != -1:\n scaler = ShardedGradScaler()\n else:\n from torch.cuda.amp.grad_scaler import GradScaler\n\n scaler = GradScaler()\n else:\n scaler = None\n\n # multi-gpu training (should be after apex fp16 initialization)\n model_single_gpu = model\n if cfg.n_gpu > 1:\n model = torch.nn.DataParallel(model_single_gpu)\n\n # Distributed training (should be after apex fp16 initialization)\n if cfg.local_rank != -1:\n model = auto_wrap(model)\n model = FullyShardedDDP(model,\n mixed_precision=cfg.fp16,\n flatten_parameters=getattr(cfg, \"flatten_parameters\", True),\n reshard_after_forward=cfg.reshard_after_forward,\n move_grads_to_cpu=cfg.move_grads_to_cpu,\n move_params_to_cpu=cfg.move_params_to_cpu)\n if not cfg.move_params_to_cpu:\n model = model.to(cfg.device)\n\n no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']\n optimizer_grouped_parameters = [\n {\n 'params': [p for n, p in model.named_parameters() if (not any(nd in n for nd in no_decay)) and p.requires_grad],\n 'weight_decay': cfg.weight_decay\n },\n {\n 'params': [p for n, p in model.named_parameters() if (any(nd in n for nd in no_decay)) and p.requires_grad],\n 'weight_decay': 0.0\n }\n ]\n optimizer = initialize_optimizer(cfg, optimizer_grouped_parameters)\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)\n\n logger.info(optimizer)\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", cfg.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", cfg.per_gpu_train_batch_size)\n logger.info(\" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n cfg.train_batch_size * cfg.gradient_accumulation_steps * (dist.get_world_size() if cfg.local_rank != -1 else 1))\n logger.info(\" Gradient Accumulation steps = %d\", cfg.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n logger.info(\" Warmup steps = %d\", num_warmup_steps)\n\n if continue_from_global_step > 0:\n logger.info(\"Fast forwarding to global step %d to resume training from latest checkpoint...\", continue_from_global_step)\n\n global_step = 0\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(int(cfg.num_train_epochs), desc=\"Epoch\", disable=cfg.local_rank not in [-1, 0])\n set_seed(cfg) # Added here for reproducibility (even between python 2 and 3)\n\n for epoch in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=cfg.local_rank not in [-1, 0], dynamic_ncols=True)\n if cfg.local_rank != -1:\n train_dataloader.sampler.set_epoch(epoch)\n\n for step, batch in enumerate(epoch_iterator):\n # If training is continued from a checkpoint, fast forward\n # to the state of that checkpoint.\n if global_step < continue_from_global_step:\n if (step + 1) % cfg.gradient_accumulation_steps == 0:\n scheduler.step() # Update learning rate schedule\n global_step += 1\n continue\n\n model.train()\n batch = batch_to_device(batch, cfg.device)\n\n if (step + 1) % cfg.gradient_accumulation_steps != 0 and cfg.local_rank != -1:\n # Avoid unnecessary DDP synchronization since there will be no backward pass on this example.\n with model.no_sync():\n loss = forward_step(model, batch, cfg, scaler)\n else:\n loss = forward_step(model, batch, cfg, scaler)\n\n tr_loss += loss\n if (step + 1) % cfg.gradient_accumulation_steps == 0:\n if cfg.fp16:\n scaler.unscale_(optimizer)\n\n if cfg.max_grad_norm:\n if hasattr(optimizer, \"clip_grad_norm\"):\n optimizer.clip_grad_norm(cfg.max_grad_norm)\n elif hasattr(model, \"clip_grad_norm_\"):\n model.clip_grad_norm_(cfg.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.max_grad_norm)\n\n if cfg.fp16:\n scaler.step(optimizer)\n scaler.update()\n else:\n optimizer.step()\n\n scheduler.step() # Update learning rate schedule\n model.zero_grad(set_to_none=True)\n global_step += 1\n\n # Log metrics\n if cfg.local_rank in [-1, 0] and cfg.logging_steps > 0 and global_step % cfg.logging_steps == 0:\n tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar('loss', (tr_loss - logging_loss) / cfg.logging_steps, global_step)\n logging_loss = tr_loss\n\n # Save model checkpoint\n if cfg.save_steps > 0 and global_step % cfg.save_steps == 0:\n output_dir = os.path.join(cfg.output_dir, 'checkpoint-{}'.format(global_step))\n if cfg.local_rank in [-1, 0] and not os.path.exists(output_dir):\n os.makedirs(output_dir)\n save_model(model, cfg, output_dir, tokenizer)\n\n # Evaluation\n if cfg.evaluate_during_training and cfg.eval_steps > 0 and global_step % cfg.eval_steps == 0:\n state_dict = model.state_dict()\n\n if cfg.local_rank in [-1, 0]:\n results = evaluate(cfg, model, tokenizer, prefix=str(global_step), _split=\"dev\")\n for key, value in results.items():\n tb_writer.add_scalar(f\"eval/{key}\", value, global_step)\n\n sub_path = os.path.join(cfg.output_dir, 'checkpoint-{}'.format(global_step))\n flag = note_best_checkpoint(cfg, results, sub_path)\n if cfg.save_best and flag:\n if cfg.local_rank == 0:\n unwrap_model(model).save_pretrained(cfg.output_dir, state_dict=state_dict)\n else:\n model.save_pretrained(cfg.output_dir)\n\n tokenizer.save_pretrained(cfg.output_dir)\n OmegaConf.save(cfg, os.path.join(cfg.output_dir, \"training_config.yaml\"))\n logger.info(\"Saving best model checkpoint to %s\", cfg.output_dir)\n\n if 0 < cfg.max_steps < global_step:\n epoch_iterator.close()\n break\n\n if 0 < cfg.max_steps < global_step:\n train_iterator.close()\n break\n\n if cfg.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\n\ndef evaluate(cfg, model, tokenizer: PreTrainedTokenizer, prefix=\"\", _split=\"dev\"):\n dataset, features = load_and_cache_examples(cfg, tokenizer, _split=_split)\n\n if not os.path.exists(os.path.join(cfg.output_dir, prefix)):\n os.makedirs(os.path.join(cfg.output_dir, prefix))\n\n cfg.eval_batch_size = cfg.per_gpu_eval_batch_size\n eval_sampler = SequentialSampler(dataset) # Note that DistributedSampler samples randomly\n eval_collator = hydra.utils.instantiate(cfg.collator) if \"collator\" in cfg and cfg.collator else None\n eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=cfg.eval_batch_size,\n collate_fn=eval_collator)\n single_model_gpu = unwrap_model(model)\n single_model_gpu.get_eval_log(reset=True)\n # Eval!\n torch.cuda.empty_cache()\n logger.info(\"***** Running evaluation {}.{} *****\".format(_split, prefix))\n logger.info(\" Num examples = %d\", len(dataset))\n logger.info(\" Batch size = %d\", cfg.eval_batch_size)\n # Seems FSDP does not need to unwrap the model for evaluating.\n model.eval()\n pred_list = []\n prob_list = []\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\", dynamic_ncols=True):\n batch = batch_to_device(batch, cfg.device)\n with torch.cuda.amp.autocast():\n with torch.no_grad():\n outputs = model(**batch)\n probs = outputs[\"logits\"].softmax(dim=-1).detach().float().cpu()\n prob, pred = probs.max(dim=-1)\n pred_list.extend(pred.tolist())\n prob_list.extend(prob.tolist())\n\n metric_log, results = single_model_gpu.get_eval_log(reset=True)\n logger.info(\"****** Evaluation Results ******\")\n logger.info(f\"Global Steps: {prefix}\")\n logger.info(metric_log)\n\n prediction_file = os.path.join(cfg.output_dir, prefix, \"eval_predictions.npy\")\n np.save(prediction_file, pred_list)\n json.dump(prob_list, open(os.path.join(cfg.output_dir, prefix, \"eval_probs.json\"), \"w\"))\n\n return results\n\n\ndef load_and_cache_examples(cfg, tokenizer: PreTrainedTokenizer, _split=\"train\"):\n if cfg.local_rank not in [-1, 0] and _split == \"train\":\n dist.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n if _split == \"train\":\n input_file = cfg.train_file\n elif _split == \"dev\":\n input_file = cfg.dev_file\n elif _split == \"test\":\n input_file = cfg.test_file\n else:\n raise RuntimeError(_split)\n\n examples, features, tensors = hydra.utils.call(cfg.read_tensor, file_path=input_file, tokenizer=tokenizer)\n\n if cfg.local_rank == 0 and _split == \"train\":\n dist.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n dataset = TensorDataset(*tensors)\n\n return dataset, features\n\n\[email protected](config_path=\"conf\", config_name=\"config\")\ndef main(cfg: DictConfig):\n if cfg.local_rank == -1 or cfg.no_cuda:\n device = str(torch.device(\"cuda\" if torch.cuda.is_available() and not cfg.no_cuda else \"cpu\"))\n cfg.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs\n torch.cuda.set_device(cfg.local_rank)\n device = str(torch.device(\"cuda\", cfg.local_rank))\n dist.init_process_group(backend='nccl')\n cfg.n_gpu = 1\n cfg.world_size = dist.get_world_size()\n cfg.device = device\n\n global logger\n logger = setting_logger(cfg.output_dir, local_rank=cfg.local_rank)\n logger.warning(\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n cfg.local_rank, device, cfg.n_gpu, bool(cfg.local_rank != -1), cfg.fp16)\n\n # Set seed\n set_seed(cfg)\n\n # Load pre-trained model and tokenizer\n if cfg.local_rank not in [-1, 0]:\n dist.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n if cfg.pretrain:\n pretrain_state_dict = torch.load(cfg.pretrain, map_location='cpu')\n else:\n pretrain_state_dict = None\n\n tokenizer = AutoTokenizer.from_pretrained(cfg.model_name_or_path)\n model = hydra.utils.call(cfg.model, cfg.model_name_or_path, state_dict=pretrain_state_dict)\n\n if cfg.local_rank == 0:\n dist.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n if cfg.local_rank == -1: # For FullyShardedDDP, place the model on cpu first.\n model.to(cfg.device)\n\n # logger.info(\"Training/evaluation parameters %s\", OmegaConf.to_yaml(cfg))\n if cfg.local_rank in [-1, 0] and cfg.do_train:\n if not os.path.exists(cfg.output_dir):\n os.makedirs(cfg.output_dir)\n OmegaConf.save(cfg, os.path.join(cfg.output_dir, \"training_config.yaml\"))\n\n # Training\n if cfg.do_train:\n # TODO: Add option for continuously training from checkpoint.\n # The operation should be introduced in ``train`` method since both the state dict\n # of schedule and optimizer (and scaler, if any) should be loaded.\n # If output files already exists, assume to continue training from latest checkpoint (unless overwrite_output_dir is set)\n continue_from_global_step = 0 # If set to 0, start training from the beginning\n # if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:\n # checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/*/' + WEIGHTS_NAME, recursive=True)))\n # if len(checkpoints) > 0:\n # checkpoint = checkpoints[-1]\n # logger.info(\"Resuming training from the latest checkpoint: %s\", checkpoint)\n # continue_from_global_step = int(checkpoint.split('-')[-1])\n # model = model_class.from_pretrained(checkpoint)\n # model.to(args.device)\n\n train_dataset, features = load_and_cache_examples(cfg, tokenizer, _split=\"train\")\n global_step, tr_loss = train(cfg, train_dataset, features, model, tokenizer, continue_from_global_step)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n # Test\n results = {}\n if cfg.do_eval and cfg.local_rank in [-1, 0]:\n checkpoints = [cfg.output_dir]\n if cfg.save_best:\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n elif cfg.prediction_cfg.best_checkpoint and os.path.exists(cfg.prediction_cfg.best_checkpoint):\n checkpoints = [cfg.prediction_cfg.best_checkpoint]\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n elif cfg.eval_sub_path:\n checkpoints = list(\n os.path.dirname(c) for c in\n sorted(glob.glob(cfg.output_dir + f\"/{cfg.eval_sub_path}/\" + \"pytorch_model.bin\", recursive=True))\n )\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n logger.info(\" the following checkpoints: %s\", checkpoints)\n for checkpoint in checkpoints:\n global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n prefix = checkpoint.split(\"/\")[-1] if checkpoint.find(\"checkpoint\") != -1 else \"\"\n split = \"dev\"\n\n model = hydra.utils.call(cfg.model, checkpoint)\n model.to(device)\n\n if cfg.test_file:\n prefix = f'test' + (f'-{prefix}' if prefix != \"\" else \"\")\n split = \"test\"\n\n result = evaluate(cfg, model, tokenizer, prefix=prefix, _split=split)\n result = dict((k + \"_{}\".format(global_step), v) for k, v in result.items())\n results.update(result)\n\n return results\n\n\nif __name__ == \"__main__\":\n hydra_formatted_args = []\n # convert the cli params added by torch.distributed.launch into Hydra format\n for arg in sys.argv:\n if arg.startswith(\"--\"):\n hydra_formatted_args.append(arg[len(\"--\"):])\n else:\n hydra_formatted_args.append(arg)\n sys.argv = hydra_formatted_args\n\n main()\n"
] | [
[
"torch.load",
"torch.utils.data.DataLoader",
"torch.cuda.amp.grad_scaler.GradScaler",
"torch.cuda.amp.autocast",
"torch.no_grad",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.is_available",
"torch.device",
"torch.distributed.init_process_group",
"torch.utils.data.distributed.DistributedSampler",
"torch.utils.data.TensorDataset",
"numpy.save",
"torch.distributed.barrier",
"torch.cuda.empty_cache",
"torch.cuda.device_count",
"torch.distributed.get_world_size",
"torch.cuda.set_device",
"torch.utils.data.SequentialSampler",
"torch.utils.data.RandomSampler",
"torch.nn.DataParallel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ChandreyeeB/Blind-Image-Deconvolution-using-Deep-Generative-Priors | [
"4198bd2d325a32ffc4e714c486540e63440ab110"
] | [
"deblurring_celeba_algorithm_1.py"
] | [
"import tensorflow as tf\nimport keras.backend as K\nimport numpy as np\nfrom Utils import *\nfrom generators.MotionBlurGenerator import *\nfrom generators.CelebAGenerator import *\nK.set_learning_phase(0)\nfrom glob import glob\nimport os\n\n\n# paths\nOrig_Path = './results/CelebA/Original Images/*.png'\nRange_Path = './results/CelebA/Range Images/*.png'\nBlur_Path = './results/CelebA/Original Blurs/Test Blurs.npy'\n\n# constants\nREGULARIZORS = [0.01 , 0.01]\nRANDOM_RESTARTS = 10\nNOISE_STD = 0.01\nSTEPS = 10000\nIMAGE_RANGE = [-1,1]\n\ndef step_size(t):\n return 0.01 * np.exp( - t / 1000 )\n\nSAVE_PATH = './results/CelebA/deblurring - alg1 - ' +str(int(NOISE_STD*100)) + 'perc noise - ' +str(RANDOM_RESTARTS) + 'RR/deblurring_'\n# -----------------------------------------------------------------------\n\n# loading test blur images\nW = np.load(Blur_Path) \nBLUR_RES = W.shape[1]\n\n# loading test celeba images\nX_Orig = np.array([ imread(path) for path in glob(Orig_Path)])/255\nX_Range = np.array([ imread(path) for path in glob(Range_Path)])/255\nIMAGE_RES = X_Orig.shape[1]\nCHANNELS = X_Orig.shape[-1]\n\n# loading celeba generator\nCelebAGen = CelebAGenerator()\nCelebAGen.GenerateModel()\nCelebAGen.LoadWeights()\nCelebAGAN = CelebAGen.GetModels()\nceleba_latent_dim = CelebAGen.latent_dim\n\n# loading motion blur generator\nBLURGen = MotionBlur()\nBLURGen.GenerateModel()\nBLURGen.LoadWeights()\nblur_vae, blur_encoder, blur_decoder = BLURGen.GetModels()\nblur_latent_dim = BLURGen.latent_dim\n\n# check if save dir exists, if not create a new one\ntry:\n os.stat(SAVE_PATH[:-11])\nexcept:\n os.mkdir(SAVE_PATH[:-11])\n\n# generating blurry images from test\nY_np = []\nBlurry_Images = []\nfor i in tqdm(range(len(X_Orig)), ascii=True, desc ='Gen-Test-Blurry'):\n x_np = X_Orig[i]\n w_np = W[i]\n y_np, y_f = GenerateBlurry(x_np, w_np, noise_std = NOISE_STD )\n Y_np.append(y_np)\n for _ in range(RANDOM_RESTARTS):\n Blurry_Images.append(y_f)\n\nY_np = np.array(Y_np)\nBlurry_Images = np.array(Blurry_Images)\n\n# generating blurry images from range\nBlurry_Images_range = []\nY_np_range = []\nfor i in tqdm(range(len(X_Orig)), ascii=True, desc ='Gen-Range-Blurry'):\n y_np, y_f = GenerateBlurry(X_Range[i], W[i], noise_std = NOISE_STD )\n Y_np_range.append(y_np)\n for _ in range(RANDOM_RESTARTS):\n Blurry_Images_range.append(y_f)\n\nY_np_range = np.array(Y_np_range)\nBlurry_Images_range = np.array(Blurry_Images_range)\n\n\n# alternating gradient descent for test images\nimage_gradients, blur_gradients, get_loss = Generate_Gradient_Functions(rr = Blurry_Images.shape[0],\n reg = REGULARIZORS, image_range = IMAGE_RANGE,\n decoder = CelebAGAN, blur_decoder = blur_decoder,\n image_res = IMAGE_RES, blur_res = BLUR_RES,\n channels = CHANNELS)\nm_hat, h_hat, Loss = Optimize_Parallel(blurry_fourier = Blurry_Images, stepsize=step_size,steps = STEPS,\n image_grad = image_gradients , blur_grad = blur_gradients, \n getloss = get_loss, latent_image_dim = celeba_latent_dim , latent_blur_dim = blur_latent_dim)\nX_hat_test = []\nW_hat_test = []\nfor i in range(len(X_Orig)):\n m_hat_i = m_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]\n h_hat_i = h_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]\n Loss_i = Loss[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]\n x_hat_test, w_hat_test, loss_last_iter_test = Get_Min_Loss(Loss_i, m_hat_i, h_hat_i, decoder = CelebAGAN, blur_decoder = blur_decoder,\n latent_image_dim = celeba_latent_dim, latent_blur_dim = blur_latent_dim, print_grad=False) \n X_hat_test.append(x_hat_test)\n W_hat_test.append(w_hat_test)\n\nX_hat_test = np.array(X_hat_test)\nW_hat_test = np.array(W_hat_test)\n\n# alternating gradient descent for range images\nm_hat, h_hat, Loss = Optimize_Parallel(blurry_fourier = Blurry_Images_range, stepsize=step_size,steps = STEPS,\n image_grad = image_gradients , blur_grad = blur_gradients, \n getloss = get_loss, latent_image_dim = celeba_latent_dim , latent_blur_dim = blur_latent_dim)\nX_hat_range = []\nW_hat_range = []\nfor i in range(len(X_Orig)):\n m_hat_i = m_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]\n h_hat_i = h_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]\n Loss_i = Loss[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]\n x_hat_range, w_hat_range, loss_last_iter_range = Get_Min_Loss(Loss_i, m_hat_i, h_hat_i, decoder = CelebAGAN, blur_decoder = blur_decoder,\n latent_image_dim = celeba_latent_dim, latent_blur_dim = blur_latent_dim, print_grad=False) \n X_hat_range.append(x_hat_range)\n W_hat_range.append(w_hat_range)\n\nX_hat_range = np.array(X_hat_range)\nW_hat_range = np.array(W_hat_range)\n\nX_hat_test = (X_hat_test + 1)/2\nX_hat_range = (X_hat_range + 1)/2\nMax = 10**len(str(len(X_Orig)-1))\n\n# saving results\nfor i in range(len(X_Orig)):\n Save_Results(path = SAVE_PATH + str(i+Max)[1:], \n x_np = None, \n w_np = None,\n y_np = Y_np[i], \n y_np_range = Y_np_range[i] , \n x_hat_test = X_hat_test[i], \n w_hat_test = W_hat_test[i], \n x_range = None, \n x_hat_range = X_hat_range[i], \n w_hat_range = W_hat_range[i], clip=True)"
] | [
[
"numpy.exp",
"numpy.load",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dmontemayor/CRPM | [
"e896831fad7bed42d17574b137e600fc5adbf6b0",
"e896831fad7bed42d17574b137e600fc5adbf6b0"
] | [
"crpm/pvalue.py",
"crpm/gradientdecent.py"
] | [
"\"\"\" Calcualte p-values, ROC, AUC, and proportion of significant observations for\na set of observations given the null hypothesis distribution\n\n Args:\n variable: array of observed values\n hypothesis: optional null hypotheis distribution (beta distribution by default)\n alpha: optional significance parameter (.05 by default)\n Returns:\n pvalues: for every observation in variable\n ROC: on a grid of 1000 points\n AUC: integral of ROC\n proportion of significant observations\n\"\"\"\n\nimport numpy as np\n\ndef pvalue(variable=None, hypothesis=None, alpha=.05):\n \"\"\" calcualte pvalues, AUC and fraction of significant observations\n \"\"\"\n #set model\n if variable is None:\n variable = np.random.beta(a=3, b=5, size=5000)\n\n else:\n variable = np.array(variable)\n\n #set null-hypothesis\n if hypothesis is None:\n hypothesis = np.random.beta(a=5, b=5, size=1000)\n else:\n hypothesis = np.array(hypothesis)\n\n #calculate prob of left-tail event p(H<=x|H) for every instance of X\n prob = []\n for var in variable:\n prob.append((hypothesis <= var).sum())\n #normalize p\n prob = np.divide(prob, hypothesis.size)\n\n #scan alpha from 0 to 1 and find prob(p<=alpha)\n scanprob = []\n alphagrid = np.linspace(0, 1, num=1000)\n for val in alphagrid:\n #calculate prob p<=alpha\n scanprob.append((prob <= val).sum() / variable.size)\n\n return prob, scanprob, np.sum(prob) / alphagrid.size, (prob <= alpha).sum() /variable.size\n\ndef lefttailpvalue(variable=None, hypothesis=None):\n \"\"\" calcualte left-tail pvalues\n \"\"\"\n #set model\n if variable is None:\n variable = np.random.beta(a=3, b=5, size=5000)\n\n else:\n variable = np.array(variable)\n\n #set null-hypothesis\n if hypothesis is None:\n hypothesis = np.random.beta(a=5, b=5, size=1000)\n else:\n hypothesis = np.array(hypothesis)\n\n #calculate prob of left-tail event p(H<=x|H) for every instance of X\n prob = []\n for var in variable:\n prob.append((hypothesis <= var).sum())\n #normalize p\n prob = np.divide(prob, hypothesis.size)\n\n return prob\n\ndef righttailpvalue(variable=None, hypothesis=None):\n \"\"\" calcualte left-tail pvalues\n \"\"\"\n #set model\n if variable is None:\n variable = np.random.beta(a=3, b=5, size=5000)\n\n else:\n variable = np.array(variable)\n\n #set null-hypothesis\n if hypothesis is None:\n hypothesis = np.random.beta(a=5, b=5, size=1000)\n else:\n hypothesis = np.array(hypothesis)\n\n #calculate prob of right-tail event p(H>=x|H) for every instance of X\n prob = []\n for var in variable:\n prob.append((hypothesis >= var).sum())\n #normalize p\n prob = np.divide(prob, hypothesis.size)\n\n return prob\n",
"\"\"\" NN training by gradient decent\n\"\"\"\n\ndef gradientdecent(model, data, targets, lossname, validata=None,\n valitargets=None, maxepoch=1E6, earlystop=False,\n healforces=True, finetune=6):\n \"\"\"train fnn model by gradient decent\n\n Args:\n model: FFN object or as the body in FFN class\n data: training data with features in columns and observation in rows\n targets: labels with targets in columns and observation in rows\n lossname: loss function string defined in crmp.lossfunctions\n validata: data used to calculate out-sample error\n valitargets: targets used to calculate out-sample error\n maxiteration: hard limit of learning iterations default is 10000\n Returns: final predictions and cost along with exit condition.\n Exit conditions are 0) learning converged, 1) learning not\n converged, 2) learning was stopped early, and -1) learning diverged.\n Training will modify model.\n \"\"\"\n\n import numpy as np\n from crpm.dynamics import setupdynamics\n #from crpm.dynamics import normalizelearningrate\n from crpm.dynamics import computecost\n from crpm.dynamics import computeforces\n from crpm.dynamics import maxforce\n from crpm.ffn_bodyplan import copy_ffn\n from crpm.ffn import FFN\n\n #convergence test constants\n #alpha norm scales learning rate by max force relative to weight\n alpha_norm = 10**(-finetune)\n #alpha_norm = 1E-8#7#5E-6\n #alpha_norm = 1E-7#5 #scales learning rate by max force relative to weight\n nbuffer = 500\n maxslope = -1E-6 #max learning slope should be negative but close to zero\n tgrid = np.array(range(nbuffer))\n tsum = np.sum(tgrid)\n tvar = nbuffer*np.sum(np.multiply(tgrid, tgrid))-tsum*tsum\n\n #setup dynamics if requested (allows for reinit to heal bad forces)\n if healforces:\n forces = setupdynamics(model, data, targets, lossname)\n else:\n forces = computeforces(model, data, targets, lossname)\n\n #check if using validation set\n is_validating = not ((validata is None) or (valitargets is None))\n\n #define out-sample error calculator\n def out_sample_error():\n if is_validating:\n pred, cost = computecost(model, validata, valitargets, lossname)\n else:\n pred, cost = computecost(model, data, targets, lossname)\n return pred, cost\n #calculate out-sample error\n _, cost = out_sample_error()\n\n #init best error and model\n best_cost = np.copy(cost)\n if isinstance(model, FFN):\n best_model = model.copy()\n else:\n best_model = copy_ffn(model)\n\n\n #iterate training until:\n # 1) cost converges - defined as when slope of costbuffer is greater than to -1e-6\n # or\n # 2) out-sample error increases\n # or\n # 3) cost diverges - defined true when cost > 1E16\n # or\n # 4) too many iterations - hardcoded to ensure loop exit\n continuelearning = True\n #Do not do any learning if maxepoch is not a positive integer\n if maxepoch<1 :\n continuelearning = False\n count = 0\n exitcond = 0\n while continuelearning:\n\n #clear cost buffer\n costbuffer = []\n\n #normalize learning rate alpha based on current forces\n alpha = alpha_norm * maxforce(model, forces)\n #alpha = normalizelearningrate(model, forces, alpha_norm)\n\n #loop for training steps in buffer\n for i in tgrid:\n\n #update current learning step\n count += 1\n\n #update body wieghts and biases\n body = model\n if isinstance(model, FFN):\n body = model.body\n\n #loop over layer\n for layer in forces:\n index = layer[\"layer\"]\n body[index][\"weight\"] = body[index][\"weight\"] + alpha * layer[\"fweight\"]\n body[index][\"bias\"] = body[index][\"bias\"] + alpha * layer[\"fbias\"]\n\n #compute forces\n forces = computeforces(model, data, targets, lossname)\n\n #record cost\n _, cost = computecost(model, data, targets, lossname)\n costbuffer.append(cost)\n\n #calculate cost slope to check for convergence\n slope = nbuffer*np.sum(np.multiply(tgrid, costbuffer))-tsum*np.sum(costbuffer)\n slope = slope/tvar\n\n #calculate out-sample error\n _, cost = out_sample_error()\n\n #Record best error and save model\n if cost <= best_cost:\n best_cost = np.copy(cost)\n if isinstance(model, FFN):\n best_model = model.copy()\n else:\n best_model = copy_ffn(model)\n\n\n # - EXIT CONDITIONS -\n #exit if learning is taking too long\n if count > int(maxepoch):\n print(\"Warning gradientdecent.py: Training is taking a long time!\"+\n \" - Try increaseing maxepoch - Training will end\")\n exitcond = 1\n continuelearning = False\n #exit if learning has plateaued\n if slope > maxslope:\n exitcond = 0\n continuelearning = False\n #exit if early stopping and error has risen\n if earlystop and cost > best_cost:\n print(\"early stopping\")\n exitcond = 2\n continuelearning = False\n #exit if cost has diverged\n if cost > 1E16:\n print(\"Warning gradientdecent.py: diverging cost function \"+\n \"- try lowering learning rate or inc regularization constant \"+\n \"- training will end.\")\n exitcond = -1\n continuelearning = False\n\n #return best model\n if isinstance(model, FFN):\n best_model = model.copy()\n else:\n best_model = copy_ffn(model)\n\n #return predictions and cost\n return (*out_sample_error(),exitcond)\n"
] | [
[
"numpy.random.beta",
"numpy.linspace",
"numpy.array",
"numpy.sum",
"numpy.divide"
],
[
"numpy.copy",
"numpy.sum",
"numpy.multiply"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tywang89/pyprobml | [
"82cfdcb8daea653cda8f77e8737e585418476ca7"
] | [
"book/linreg_poly_vs_degree.py"
] | [
"# Plot polynomial regression on 1d problem\n# Based on https://github.com/probml/pmtk3/blob/master/demos/linregPolyVsDegree.m\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pyprobml_utils import save_fig\n\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import MinMaxScaler \nimport sklearn.metrics \nfrom sklearn.metrics import mean_squared_error as mse\n\ndef make_1dregression_data(n=21):\n np.random.seed(0)\n xtrain = np.linspace(0.0, 20, n)\n xtest = np.arange(0.0, 20, 0.1)\n sigma2 = 4\n w = np.array([-1.5, 1/9.])\n fun = lambda x: w[0]*x + w[1]*np.square(x)\n ytrain = fun(xtrain) + np.random.normal(0, 1, xtrain.shape) * \\\n np.sqrt(sigma2)\n ytest= fun(xtest) + np.random.normal(0, 1, xtest.shape) * \\\n np.sqrt(sigma2)\n return xtrain, ytrain, xtest, ytest\n\nxtrain, ytrain, xtest, ytest = make_1dregression_data(n=21)\n\n#Rescaling data\nscaler = MinMaxScaler(feature_range=(-1, 1))\nXtrain = scaler.fit_transform(xtrain.reshape(-1, 1))\nXtest = scaler.transform(xtest.reshape(-1, 1))\n\n\ndegs = np.arange(1, 21, 1)\nndegs = np.max(degs)\nmse_train = np.empty(ndegs)\nmse_test = np.empty(ndegs)\nytest_pred_stored = np.empty(ndegs, dtype=np.ndarray)\nytrain_pred_stored = np.empty(ndegs, dtype=np.ndarray)\nfor deg in degs:\n model = LinearRegression()\n poly_features = PolynomialFeatures(degree=deg, include_bias=False)\n Xtrain_poly = poly_features.fit_transform(Xtrain)\n model.fit(Xtrain_poly, ytrain)\n ytrain_pred = model.predict(Xtrain_poly)\n ytrain_pred_stored[deg-1] = ytrain_pred\n Xtest_poly = poly_features.transform(Xtest)\n ytest_pred = model.predict(Xtest_poly)\n mse_train[deg-1] = mse(ytrain_pred, ytrain) \n mse_test[deg-1] = mse(ytest_pred, ytest)\n ytest_pred_stored[deg-1] = ytest_pred\n \n# Plot MSE vs degree\nfig, ax = plt.subplots()\nmask = degs <= 15\nax.plot(degs[mask], mse_test[mask], color = 'r', marker = 'x',label='test')\nax.plot(degs[mask], mse_train[mask], color='b', marker = 's', label='train')\nax.legend(loc='upper right', shadow=True)\nplt.xlabel('degree')\nplt.ylabel('mse')\nsave_fig('polyfitVsDegree.pdf')\nplt.show()\n\n# Plot fitted functions\nchosen_degs = [1, 2, 14, 20]\nfor deg in chosen_degs:\n fig, ax = plt.subplots()\n ax.scatter(xtrain, ytrain)\n ax.plot(xtest, ytest_pred_stored[deg-1])\n ax.set_ylim((-10, 15))\n plt.title('degree {}'.format(deg))\n save_fig('polyfitDegree{}.pdf'.format(deg))\n plt.show()\n \n# Plot residuals\n#https://blog.minitab.com/blog/adventures-in-statistics-2/why-you-need-to-check-your-residual-plots-for-regression-analysis\nchosen_degs = [1, 2, 14, 20]\nfor deg in chosen_degs:\n fig, ax = plt.subplots()\n ypred = ytrain_pred_stored[deg-1]\n residuals = ytrain - ypred\n ax.plot(ypred, residuals, 'o')\n ax.set_xlabel('predicted y')\n ax.set_ylabel('residual')\n plt.title('degree {}. Predictions on the training set'.format(deg))\n save_fig('polyfitDegree{}Residuals.pdf'.format(deg))\n plt.show()\n\n\n# Plot fit vs actual\n# https://blog.minitab.com/blog/adventures-in-statistics-2/regression-analysis-how-do-i-interpret-r-squared-and-assess-the-goodness-of-fit \nchosen_degs = [1, 2, 14, 20]\nfor deg in chosen_degs:\n for train in [True, False]:\n if train:\n ytrue = ytrain\n ypred = ytrain_pred_stored[deg-1]\n dataset = 'Train'\n else:\n ytrue = ytest\n ypred = ytest_pred_stored[deg-1]\n dataset = 'Test'\n fig, ax = plt.subplots()\n ax.scatter(ytrue, ypred)\n ax.plot(ax.get_xlim(), ax.get_ylim(), ls=\"--\", c=\".3\")\n ax.set_xlabel('true y')\n ax.set_ylabel('predicted y')\n r2 = sklearn.metrics.r2_score(ytrue, ypred)\n plt.title('degree {}. R2 on {} = {:0.3f}'.format(deg, dataset, r2))\n save_fig('polyfitDegree{}FitVsActual{}.pdf'.format(deg, dataset))\n plt.show()"
] | [
[
"numpy.square",
"numpy.sqrt",
"numpy.random.seed",
"numpy.linspace",
"numpy.arange",
"matplotlib.pyplot.subplots",
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.metrics.mean_squared_error",
"numpy.max",
"numpy.random.normal",
"sklearn.linear_model.LinearRegression",
"sklearn.preprocessing.MinMaxScaler",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.empty",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
haymrpig/Pytorch_template | [
"9a0eda43b2da27807461b305ed42e1bd7c1341dd"
] | [
"baseline/utils/mainFunctions.py"
] | [
"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom tqdm import tqdm\n\nclass _BaseWrapper():\n def __init__(self, model):\n super().__init__()\n self.model = model\n self.handlers = []\n\n def forward(self, images):\n self.image_shape = images.shape[2:]\n print(self.image_shape)\n self.logits = self.model(images)\n self.probs = F.softmax(self.logits, dim=1)\n return self.probs.sort(dim=1, descending=True)\n\n def backward(self, ids):\n one_hot = F.one_hot(ids, self.logits.shape[-1])\n one_hot = one_hot.squeeze()\n self.model.zero_grad()\n self.logits.backward(gradient=one_hot, retain_graph=True)\n # gradient는 해당 index에 대해서만 미분을 통한 backpropagation을 하겠다는 의미이다. \n # 즉, 내가 확인하고 싶은 class에 대해서 featuremap이 얼마나 영향을 미쳤는지 확인할 수 있다. \n\n def generate(self):\n raise NotImplementedError\n\n\nclass GradCAM(_BaseWrapper):\n def __init__(self, model, layers=None):\n super().__init__(model)\n self.feature_map = {}\n self.grad_map = {}\n self.layers = layers\n\n def save_fmaps(key):\n def forward_hook(module, input, output):\n self.feature_map[key]=output.detach()\n\n return forward_hook\n\n def save_grads(key):\n def backward_hook(modeul, grad_in, grad_out):\n self.grad_map[key] = grad_out[0].detach()\n\n return backward_hook\n\n for name, module in self.model.named_modules():\n if self.layers is None or name in self.layers:\n self.handlers.append(module.register_forward_hook(save_fmaps(name)))\n self.handlers.append(module.register_backward_hook(save_grads(name)))\n\n def findLayers(self, layers, target_layer):\n if target_layer in layers.keys():\n return layers[target_layer]\n else:\n raise ValueError(f\"{target_layer} not exists\")\n\n def generate(self, target_layer):\n feature_maps = self.findLayers(self.feature_map, target_layer)\n grad_maps = self.findLayers(self.grad_map, target_layer)\n weights = F.adaptive_avg_pool2d(grad_maps, 1)\n grad_cam = torch.mul(feature_maps, weights).sum(dim=1, keepdim=True)\n grad_cam = F.relu(grad_cam)\n grad_cam = F.interpolate(grad_cam, self.image_shape, mode=\"bilinear\", align_corners=False)\n B, C, H, W = grad_cam.shape\n # C는 1인듯?\n\n grad_cam = grad_cam.view(B, -1)\n grad_cam -= grad_cam.min(dim=1, keepdim=True)[0]\n # 양수 만들어주려고 하는듯\n grad_cam /= grad_cam.max(dim=1, keepdim=True)[0]\n grad_cam = grad_cam.view(B, C, H, W)\n\n return grad_cam\n\n"
] | [
[
"torch.nn.functional.softmax",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.functional.relu",
"torch.mul",
"torch.nn.functional.interpolate",
"torch.nn.functional.one_hot"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo | [
"7ee1b530db0eb536666dbc872fbf8200e53dd49b",
"7ee1b530db0eb536666dbc872fbf8200e53dd49b",
"7ee1b530db0eb536666dbc872fbf8200e53dd49b"
] | [
"hmc/tests/test_cox_poisson.py",
"hmc/applications/cox_poisson/prior.py",
"hmc/averaging.py"
] | [
"import unittest\n\nimport numpy as np\n\nfrom hmc.applications.cox_poisson import forward_transform, inverse_transform, generate_data, gaussian_posterior_factory, hyperparameter_posterior_factory\nfrom hmc.applications.cox_poisson.prior import log_prior, grad_log_prior, hess_log_prior, grad_hess_log_prior\n\n\nclass TestCoxPoisson(unittest.TestCase):\n def test_prior(self):\n def transformed_log_prior(qt):\n return log_prior(*inverse_transform(qt)[0])\n\n transformed_grad_log_prior = lambda qt: grad_log_prior(*qt)\n transformed_hess_log_prior = lambda qt: hess_log_prior(*qt)\n transformed_grad_hess_log_prior = lambda qt: grad_hess_log_prior(*qt)\n\n q = np.random.uniform(size=(2, ))\n qt, _ = forward_transform(q)\n\n delta = 1e-5\n\n u = np.random.normal(size=qt.shape)\n fd = (transformed_log_prior(qt + 0.5*delta*u) - transformed_log_prior(qt - 0.5*delta*u)) / delta\n dd = transformed_grad_log_prior(qt)@u\n self.assertTrue(np.allclose(fd, dd))\n\n fd = (transformed_grad_log_prior(qt + 0.5*delta*u) - transformed_grad_log_prior(qt - 0.5*delta*u)) / delta\n dd = transformed_hess_log_prior(qt)@u\n self.assertTrue(np.allclose(fd, dd))\n\n fd = (transformed_hess_log_prior(qt + 0.5*delta*u) - transformed_hess_log_prior(qt - 0.5*delta*u)) / delta\n dd = transformed_grad_hess_log_prior(qt)@u\n self.assertTrue(np.allclose(fd, dd))\n\n def test_gaussian_posterior(self):\n sigmasq, beta = np.random.uniform(size=(2, ))\n mu = np.log(126.0) - sigmasq / 2.0\n dist, x, y = generate_data(10, mu, beta, sigmasq)\n\n euclidean_auxiliaries, metric = gaussian_posterior_factory(dist, mu, sigmasq, beta, y)\n log_posterior = lambda x: euclidean_auxiliaries(x)[0]\n grad_log_posterior = lambda x: euclidean_auxiliaries(x)[1]\n delta = 1e-6\n\n u = np.random.normal(size=x.shape)\n fd = (log_posterior(x + 0.5*delta*u) - log_posterior(x - 0.5*delta*u)) / delta\n dd = grad_log_posterior(x)@u\n self.assertTrue(np.allclose(fd, dd))\n\n def test_hyperparameter_posterior(self):\n sigmasq, beta = np.random.uniform(size=(2, ))\n mu = np.log(126.0) - sigmasq / 2.0\n dist, x, y = generate_data(16, mu, beta, sigmasq)\n\n log_posterior, metric, _, euclidean_auxiliaries, riemannian_auxiliaries = hyperparameter_posterior_factory(dist, mu, x, y)\n\n grad_log_posterior = lambda qt: euclidean_auxiliaries(qt)[1]\n grad_metric = lambda qt: riemannian_auxiliaries(qt)[3]\n\n q = np.array([sigmasq, beta])\n qt, _ = forward_transform(q)\n\n delta = 1e-4\n u = np.random.normal(size=(2, ))\n fd = (log_posterior(qt + 0.5*delta*u) - log_posterior(qt - 0.5*delta*u)) / delta\n dd = grad_log_posterior(qt)@u\n self.assertTrue(np.allclose(fd, dd))\n\n fd = (metric(qt + 0.5*delta*u) - metric(qt - 0.5*delta*u)) / delta\n dd = grad_metric(qt)@u\n self.assertTrue(np.allclose(fd, dd))\n",
"from typing import Tuple\n\nimport numpy as np\n\n\ndef gamma_logpdf(x: float, k: float, theta: float) -> float:\n \"\"\"Log-density of the Gamma distribution up to a constant factor.\n\n Args:\n x: Positive number at which to evaluate the Gamma distribution.\n k: Shape parameter of the Gamma distribution.\n theta: Scale parameter of the Gamma distribution.\n\n Returns:\n out: The log-density of the Gamma distribution.\n\n \"\"\"\n return (k - 1.0)*np.log(x) - x / theta\n\ndef grad_gamma_logpdf(x: float, k: float, theta: float) -> float:\n \"\"\"Gradient of the log-density of the Gamma distribution.\n\n Args:\n x: Positive number at which to evaluate the Gamma distribution.\n k: Shape parameter of the Gamma distribution.\n theta: Scale parameter of the Gamma distribution.\n\n Returns:\n out: The gradient of the log-density of the Gamma distribution.\n\n \"\"\"\n return (k - 1.0) / x - np.reciprocal(theta)\n\ndef hess_gamma_logpdf(x: float, k: float, theta: float) -> float:\n \"\"\"Hessian of the log-density of the Gamma distribution.\n\n Args:\n x: Positive number at which to evaluate the Gamma distribution.\n k: Shape parameter of the Gamma distribution.\n theta: Scale parameter of the Gamma distribution.\n\n Returns:\n out: The Hessian of the log-density of the Gamma distribution.\n\n \"\"\"\n return -(k - 1.0) / np.square(x)\n\ndef grad_hess_gamma_logpdf(x: float, k: float, theta: float) -> float:\n \"\"\"Third-order derivatives of the log-density of the Gamma distribution.\n\n Args:\n x: Positive number at which to evaluate the Gamma distribution.\n k: Shape parameter of the Gamma distribution.\n theta: Scale parameter of the Gamma distribution.\n\n Returns:\n out: The third-order derivative of the log-density of the Gamma\n distribution.\n\n \"\"\"\n return 2.0*(k - 1.0) / np.power(x, 3.0)\n\ndef log_prior(sigmasq: float, beta: float) -> float:\n \"\"\"The log-prior of the log-Gaussian Cox-Poisson model.\n\n Args:\n sigmasq: Amplitude of the Gaussian process kernel.\n beta: Length scale of the Gaussian process kernel.\n\n Returns:\n lp: The log-density of the prior distribution.\n\n \"\"\"\n lp = gamma_logpdf(beta, 2.0, 0.5)\n lp += gamma_logpdf(sigmasq, 2.0, 0.5)\n return lp\n\ndef grad_log_prior(phis: float, phib: float) -> Tuple[float]:\n \"\"\"Gradient of the log-prior with respect to the reparameterized model\n parameters that are unconstrained.\n\n Args:\n phis: Reparameterized aplitude of the Gaussian process kernel.\n phib: Reparameterized length scale of the Gaussian process kernel.\n\n Returns:\n out: The gradient of the log-prior with respect to the reparameterized\n model parameters.\n\n \"\"\"\n sigmasq = np.exp(phis)\n beta = np.exp(phib)\n dphis = grad_gamma_logpdf(sigmasq, 2.0, 0.5) * sigmasq\n dphib = grad_gamma_logpdf(beta, 2.0, 0.5) * beta\n return np.array((dphis, dphib))\n\ndef hess_log_prior(phis: float, phib: float) -> np.ndarray:\n \"\"\"Compute the hessian of the log-prior with respect to the reparameterized\n model parameters.\n\n Args:\n phis: Reparameterized aplitude of the Gaussian process kernel.\n phib: Reparameterized length scale of the Gaussian process kernel.\n\n Returns:\n H: The Hessian of the log-prior with respect to the reparameterized model\n parameters.\n\n \"\"\"\n sigmasq = np.exp(phis)\n beta = np.exp(phib)\n H = np.array([\n [grad_gamma_logpdf(sigmasq, 2.0, 0.5)*sigmasq + np.square(sigmasq)*hess_gamma_logpdf(sigmasq, 2.0, 0.5), 0.0],\n [0.0, grad_gamma_logpdf(beta, 2.0, 0.5)*beta + np.square(beta)*hess_gamma_logpdf(beta, 2.0, 0.5)]\n ])\n return H\n\ndef grad_hess_log_prior(phis: float, phib: float) -> np.ndarray:\n \"\"\"Compute the third-order derivatives of the log-prior with respect to the\n reparameterized model parameters.\n\n Args:\n phis: Reparameterized aplitude of the Gaussian process kernel.\n phib: Reparameterized length scale of the Gaussian process kernel.\n\n Returns:\n dH: The third-order derivatives of the log-prior.\n\n \"\"\"\n sigmasq = np.exp(phis)\n beta = np.exp(phib)\n dH = np.zeros((2, 2, 2))\n a = sigmasq*grad_gamma_logpdf(sigmasq, 2.0, 0.5)\n a += np.square(sigmasq)*hess_gamma_logpdf(sigmasq, 2.0, 0.5)\n a += 2.0*sigmasq*hess_gamma_logpdf(sigmasq, 2.0, 0.5)\n a += np.square(sigmasq)*grad_hess_gamma_logpdf(sigmasq, 2.0, 0.5)\n b = beta*grad_gamma_logpdf(beta, 2.0, 0.5)\n b += np.square(beta)*hess_gamma_logpdf(beta, 2.0, 0.5)\n b += 2.0*beta*hess_gamma_logpdf(beta, 2.0, 0.5)\n b += np.square(beta)*grad_hess_gamma_logpdf(beta, 2.0, 0.5)\n dH = np.array([\n [[a, 0.0], [0.0, 0.0]],\n [[0.0, 0.0], [0.0, b]]\n ])\n return dH\n",
"import numpy as np\n\n\nclass DualAveraging:\n def __init__(\n self,\n x0: float,\n mu: float,\n gamma: float,\n t0: int,\n omega: float,\n maxval: float=np.inf,\n minval: float=-np.inf\n ):\n self.x = x0\n self.xb = x0\n self.mu = mu\n self.gamma = gamma\n self.t0 = t0\n self.omega = omega\n self.t = 0\n self.summ = 0.0\n self.maxval, self.minval = maxval, minval\n\n @property\n def eta(self):\n return self.t**-self.omega\n\n def update(self, new_term: float):\n self.t += 1\n rt = np.sqrt(self.t) / (self.gamma * (self.t + self.t0))\n self.summ += new_term\n self.x = self.mu - rt*self.summ\n self.x = np.clip(self.x, self.minval, self.maxval)\n eta = self.eta\n self.xb = eta*self.x + (1-eta)*self.xb\n\nclass RuppertAveraging:\n def __init__(\n self,\n x0: float,\n omega: float,\n maxval: float=np.inf,\n minval: float=-np.inf\n ):\n self.x = x0\n self.xb = x0\n self.omega = omega\n self.t = 0\n self.maxval, self.minval = maxval, minval\n\n @property\n def eta(self):\n return self.t**-self.omega\n\n def update(self, new_term: float):\n self.t += 1\n self.x -= self.eta*new_term\n self.x = np.clip(self.x, self.minval, self.maxval)\n self.xb = self.t / (self.t + 1) * self.xb + self.x / (self.t + 1)\n"
] | [
[
"numpy.log",
"numpy.allclose",
"numpy.random.normal",
"numpy.random.uniform",
"numpy.array"
],
[
"numpy.square",
"numpy.log",
"numpy.power",
"numpy.exp",
"numpy.reciprocal",
"numpy.array",
"numpy.zeros"
],
[
"numpy.sqrt",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
UF-f1tenth/F1tenth-UFL | [
"93b0a822c67b2b425664642955342138e65974f4"
] | [
"Object detection and depth estimation/catkin_ws/src/f110-fall2018-skeltons/labs/wall_following/scripts/utils/other.py"
] | [
"\"\"\"\nCreated on Fri Oct 29 18:54:18 2021\n\n@author: Krishna Nuthalapati\n\"\"\"\n\nimport numpy as np\n\ndef iou(boxA, boxB):\n\t# determine the (x, y)-coordinates of the intersection rectangle\n\txA = max(boxA[0], boxB[0])\n\tyA = max(boxA[1], boxB[1])\n\txB = min(boxA[2], boxB[2])\n\tyB = min(boxA[3], boxB[3])\n\t# compute the area of intersection rectangle\n\tinterArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)\n\t# compute the area of both the prediction and ground-truth\n\t# rectangles\n\tboxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)\n\tboxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)\n\t# compute the intersection over union by taking the intersection\n\t# area and dividing it by the sum of prediction + ground-truth\n\t# areas - the interesection area\n\tiou_score = interArea / float(boxAArea + boxBArea - interArea)\n\t# return the intersection over union value\n\treturn iou_score\n\ndef nms(boxes, scores, thresh):\n num_boxes = boxes.shape[0]\n indices = np.zeros((num_boxes), dtype=int)\n # print(\"PRINTING : \", num_boxes)\n for i in range(num_boxes):\n if indices[i] == -1:\n continue\n for j in range(i+1, num_boxes):\n if indices[j] == -1:\n continue\n \n base_box = boxes[i]\n curr_box = boxes[j]\n iou_score = iou(base_box, curr_box)\n \n if iou_score >= thresh:\n if scores[i]>scores[j]:\n indices[i] = 1\n indices[j] = -1\n continue\n indices[j] = 1\n indices[i] = -1\n \n idxs = np.where(indices == 1)[0]\n \n return idxs\n"
] | [
[
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alstonlo/fgh-gnn | [
"099aee925a3c5077070803d31b6e45793972239c"
] | [
"fgh_gnn/data/graph_builder.py"
] | [
"import itertools\n\nimport dgl\nimport torch\nfrom rdkit import Chem\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse.csgraph import minimum_spanning_tree\n\nfrom fgh_gnn.utils import FGROUP_MOLS, get_ring_fragments, ogb_graph_to_mol\n\n\nclass FGroupHetGraphBuilder:\n\n def __init__(self, vocab):\n self.vocab = vocab\n\n self.fgroup_vocab = vocab.loc[vocab['type'] == 'fgroup']\n\n self.ring_vocab = vocab.loc[vocab['type'] == 'ring']\n self.ring_smiles_set = set(self.ring_vocab['name'].unique())\n self.misc_ring_idx = len(vocab) - 1\n\n def build_fgroup_heterograph(self, raw_graph):\n\n atom_feats = torch.from_numpy(raw_graph['node_feat'])\n bond_feats = torch.from_numpy(raw_graph['edge_feat'])\n a2a_edges = torch.from_numpy(raw_graph['edge_index'])\n\n # build tree\n mol = ogb_graph_to_mol(raw_graph)\n clusters = self._make_clusters(mol)\n cluster_feats = torch.tensor([c.features for c in clusters],\n dtype=torch.long)\n\n c2atom_edges, atom2c_edges = self._make_inter_edges(clusters)\n c2c_edges, overlap_feats = \\\n self._make_intracluster_edges(raw_graph, clusters)\n\n data_dict = {\n ('atom', 'bond', 'atom'): (a2a_edges[0], a2a_edges[1]),\n ('cluster', 'refine', 'atom'): (c2atom_edges[0], c2atom_edges[1]),\n ('atom', 'pool', 'cluster'): (atom2c_edges[0], atom2c_edges[1]),\n ('cluster', 'overlap', 'cluster'): (c2c_edges[0], c2c_edges[1])\n }\n num_nodes_dict = {\n 'atom': raw_graph['num_nodes'],\n 'cluster': len(clusters)\n }\n\n g = dgl.heterograph(data_dict=data_dict, num_nodes_dict=num_nodes_dict)\n\n g.nodes['atom'].data['x'] = atom_feats\n g.nodes['cluster'].data['x'] = cluster_feats\n\n g.edges['bond'].data['x'] = bond_feats\n g.edges['overlap'].data['x'] = overlap_feats\n\n return g\n\n def _make_clusters(self, mol):\n\n clusters = []\n\n # add all functional groups\n for row in self.fgroup_vocab.itertuples():\n\n row_idx = row.Index\n\n fgroup_query = FGROUP_MOLS[row.name]\n matches = mol.GetSubstructMatches(fgroup_query)\n\n for match_idxs in matches:\n clusters.append(Cluster(row_idx, 'fgroup', match_idxs))\n\n # add all rings\n for ring_idxs in get_ring_fragments(mol):\n\n ring_smiles = Chem.MolFragmentToSmiles(mol, list(ring_idxs),\n isomericSmiles=False,\n kekuleSmiles=True)\n\n if ring_smiles in self.ring_smiles_set:\n row_idx = self.ring_vocab.index[self.ring_vocab['name']\n == ring_smiles]\n row_idx = int(row_idx[0])\n else:\n row_idx = self.misc_ring_idx\n\n clusters.append(Cluster(row_idx, 'ring', ring_idxs))\n\n # add all remaining singular atoms\n leftover_atoms = set(range(mol.GetNumAtoms()))\n for cluster in clusters:\n leftover_atoms.difference_update(cluster.atom_idxs)\n\n for atom_idx in leftover_atoms:\n atomic_num = mol.GetAtomWithIdx(atom_idx).GetAtomicNum()\n clusters.append(Cluster(atomic_num, 'atom', (atom_idx,)))\n\n return clusters\n\n def _make_inter_edges(self, clusters):\n\n c2atom_edges = [[], []]\n atom2c_edges = [[], []]\n\n for cluster_idx, cluster in enumerate(clusters):\n for atom_idx in cluster.atom_idxs:\n c2atom_edges[0].append(cluster_idx)\n c2atom_edges[1].append(atom_idx)\n\n atom2c_edges[0].append(atom_idx)\n atom2c_edges[1].append(cluster_idx)\n\n c2atom_edges = torch.tensor(c2atom_edges, dtype=torch.long)\n atom2c_edges = torch.tensor(atom2c_edges, dtype=torch.long)\n\n return c2atom_edges, atom2c_edges\n\n def _make_intracluster_edges(self, raw_graph, clusters):\n\n edge_index = raw_graph['edge_index']\n\n edge_dict = {i: set() for i in range(raw_graph['num_nodes'])}\n for i, j in zip(edge_index[0], edge_index[1]):\n edge_dict[i].add(j)\n\n num_clusters = len(clusters)\n adj_matrix = [[0] * num_clusters for _ in range(num_clusters)]\n\n cluster_neighbours = []\n for cluster in clusters:\n neighbours = set()\n for atom_idx in cluster.atom_idxs:\n neighbours.add(atom_idx)\n neighbours.update(edge_dict[atom_idx])\n cluster_neighbours.append(neighbours)\n\n for i, j in itertools.combinations(range(num_clusters), r=2):\n ci, cj = clusters[i], clusters[j]\n\n if ci.atom_idxs & cj.atom_idxs:\n edge_weight = len(ci.atom_idxs & cj.atom_idxs) + 1\n elif cluster_neighbours[i] & cluster_neighbours[j]:\n edge_weight = 1\n else:\n continue\n\n adj_matrix[i][j] = edge_weight\n adj_matrix[j][i] = edge_weight\n\n # build spanning tree\n adj_matrix = csr_matrix(adj_matrix)\n span_tree = minimum_spanning_tree(adj_matrix, overwrite=True)\n adj_matrix = torch.from_numpy(span_tree.toarray()).long()\n adj_matrix = to_bidirectional(adj_matrix)\n\n # represent as sparse matrix\n adj_matrix = adj_matrix.to_sparse().coalesce()\n edge_index = adj_matrix.indices()\n edge_feats = adj_matrix.values()\n\n return edge_index, edge_feats\n\n\nclass Cluster:\n\n def __init__(self, vocab_id, cluster_type, atom_idxs):\n\n # for sanity\n if not isinstance(vocab_id, int):\n raise ValueError()\n\n self.vocab_id = vocab_id\n self.cluster_type_idx = ('fgroup', 'ring', 'atom').index(cluster_type)\n self.atom_idxs = frozenset(atom_idxs)\n\n self.features = [self.vocab_id, self.cluster_type_idx]\n\n\n# Helper Method\n\ndef to_bidirectional(X):\n X_T = X.t()\n sym_sum = X + X_T\n X_min = torch.min(X, X_T)\n\n return torch.where(X_min > 0, X_min, sym_sum)\n"
] | [
[
"torch.min",
"torch.from_numpy",
"scipy.sparse.csr_matrix",
"torch.tensor",
"scipy.sparse.csgraph.minimum_spanning_tree",
"torch.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wangjiangtao-NJPI/MachineLearning | [
"78124b56a26ec68efb3c517a4a2420860b6e4a75",
"78124b56a26ec68efb3c517a4a2420860b6e4a75",
"78124b56a26ec68efb3c517a4a2420860b6e4a75"
] | [
"g_CNN/Optimizers.py",
"Notebooks/SVM/zh-cn/Util.py",
"NN/PyTorch/__Dev/Networks.py"
] | [
"import os\nimport sys\nroot_path = os.path.abspath(\"../\")\nif root_path not in sys.path:\n sys.path.append(root_path)\n\nimport tensorflow as tf\n\n\nclass Optimizer:\n def __init__(self, lr=1e-3):\n self._lr = lr\n self._opt = None\n\n @property\n def name(self):\n return str(self)\n\n def minimize(self, x, *args, **kwargs):\n return self._opt.minimize(x, *args, **kwargs)\n\n def __str__(self):\n return self.__class__.__name__\n\n def __repr__(self):\n return str(self)\n\n\nclass MBGD(Optimizer):\n def __init__(self, lr=1e-3):\n Optimizer.__init__(self, lr)\n self._opt = tf.train.GradientDescentOptimizer(self._lr)\n\n\nclass Momentum(Optimizer):\n def __init__(self, lr=1e-3, momentum=0.8):\n Optimizer.__init__(self, lr)\n self._opt = tf.train.MomentumOptimizer(self._lr, momentum)\n\n\nclass NAG(Optimizer):\n def __init__(self, lr=1e-3, momentum=0.8):\n Optimizer.__init__(self, lr)\n self._opt = tf.train.MomentumOptimizer(self._lr, momentum, use_nesterov=True)\n\n\nclass AdaDelta(Optimizer):\n def __init__(self, lr=1e-3, rho=0.95, eps=1e-8):\n Optimizer.__init__(self, lr)\n self._opt = tf.train.AdadeltaOptimizer(self._lr, rho, eps)\n\n\nclass AdaGrad(Optimizer):\n def __init__(self, lr=1e-3, init=0.1):\n Optimizer.__init__(self, lr)\n self._opt = tf.train.AdagradOptimizer(self._lr, init)\n\n\nclass Adam(Optimizer):\n def __init__(self, lr=1e-3, beta1=0.9, beta2=0.999, eps=1e-8):\n Optimizer.__init__(self, lr)\n self._opt = tf.train.AdamOptimizer(self._lr, beta1, beta2, eps)\n\n\nclass RMSProp(Optimizer):\n def __init__(self, lr=1e-3, decay=0.9, momentum=0.0, eps=1e-10):\n Optimizer.__init__(self, lr)\n self._opt = tf.train.RMSPropOptimizer(self._lr, decay, momentum, eps)\n\n\n# Factory\n\nclass OptFactory:\n\n available_optimizers = {\n \"MBGD\": MBGD, \"Momentum\": Momentum, \"NAG\": NAG,\n \"AdaDelta\": AdaDelta, \"AdaGrad\": AdaGrad,\n \"Adam\": Adam, \"RMSProp\": RMSProp\n }\n\n def get_optimizer_by_name(self, name, lr, *args, **kwargs):\n try:\n optimizer = self.available_optimizers[name](lr, *args, **kwargs)\n return optimizer\n except KeyError:\n raise NotImplementedError(\"Undefined Optimizer '{}' found\".format(name))\n",
"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom math import pi\n\nnp.random.seed(142857)\n\n# 生成简单的测试数据集\ndef gen_two_clusters(size=100, center=0, scale=1, dis=2):\n center1 = (np.random.random(2) + center - 0.5) * scale + dis\n center2 = (np.random.random(2) + center - 0.5) * scale - dis\n cluster1 = (np.random.randn(size, 2) + center1) * scale\n cluster2 = (np.random.randn(size, 2) + center2) * scale\n data = np.vstack((cluster1, cluster2)).astype(np.float32)\n labels = np.array([1] * size + [-1] * size)\n indices = np.random.permutation(size * 2)\n data, labels = data[indices], labels[indices]\n return data, labels\n\n# 生成螺旋线数据集\ndef gen_spiral(size=50, n=4, scale=2):\n xs = np.zeros((size * n, 2), dtype=np.float32)\n ys = np.zeros(size * n, dtype=np.int8)\n for i in range(n):\n ix = range(size * i, size * (i + 1))\n r = np.linspace(0.0, 1, size+1)[1:]\n t = np.linspace(2 * i * pi / n, 2 * (i + scale) * pi / n, size) + np.random.random(size=size) * 0.1\n xs[ix] = np.c_[r * np.sin(t), r * np.cos(t)]\n ys[ix] = 2 * (i % 2) - 1\n return xs, ys\n\n# 画出决策边界;如果只关心算法本身,可以略去这一段代码不看\ndef visualize2d(clf, x, y, draw_background=False):\n axis, labels = np.array(x).T, np.array(y)\n decision_function = lambda xx: clf.predict(xx)\n\n nx, ny, padding = 400, 400, 0.2\n x_min, x_max = np.min(axis[0]), np.max(axis[0])\n y_min, y_max = np.min(axis[1]), np.max(axis[1])\n x_padding = max(abs(x_min), abs(x_max)) * padding\n y_padding = max(abs(y_min), abs(y_max)) * padding\n x_min -= x_padding\n x_max += x_padding\n y_min -= y_padding\n y_max += y_padding\n\n def get_base(nx, ny):\n xf = np.linspace(x_min, x_max, nx)\n yf = np.linspace(y_min, y_max, ny)\n n_xf, n_yf = np.meshgrid(xf, yf)\n return xf, yf, np.c_[n_xf.ravel(), n_yf.ravel()]\n\n xf, yf, base_matrix = get_base(nx, ny)\n z = decision_function(base_matrix).reshape((nx, ny))\n \n labels[labels == -1] = 0\n n_label = len(np.unique(labels))\n xy_xf, xy_yf = np.meshgrid(xf, yf, sparse=True)\n colors = plt.cm.rainbow([i / n_label for i in range(n_label)])[labels]\n\n plt.figure()\n if draw_background:\n plt.pcolormesh(xy_xf, xy_yf, z, cmap=plt.cm.Paired)\n else:\n plt.contour(xf, yf, z, c='k-', levels=[0])\n plt.scatter(axis[0], axis[1], c=colors)\n plt.xlim(x_min, x_max)\n plt.ylim(y_min, y_max)\n plt.show()\n\n",
"import os\nimport cv2\nimport time\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom math import sqrt, ceil\nfrom torch.autograd import Variable\n\nfrom NN.PyTorch.Auto.Layers import *\nfrom NN.Basic.Networks import NNConfig, NNVerbose\nfrom NN.PyTorch.Auto.Optimizers import OptFactory\n\nfrom Util.Util import VisUtil\nfrom Util.ProgressBar import ProgressBar\nfrom Util.Bases import TorchAutoClassifierBase\n\n\n# PyTorch Implementation with auto-grad & custom Optimizers\n\nclass NNDist(TorchAutoClassifierBase):\n NNTiming = Timing()\n\n def __init__(self, **kwargs):\n super(NNDist, self).__init__(**kwargs)\n self._layers, self._weights, self._bias = [], [], []\n self._layer_names, self._layer_shapes, self._layer_params = [], [], []\n self._lr, self._epoch, self._regularization_param = 0, 0, 0\n self.verbose = 1\n\n self._apply_bias = False\n self._current_dimension = 0\n\n self._logs = {}\n self._metrics, self._metric_names = [], []\n\n self._x_min, self._x_max = 0, 0\n self._y_min, self._y_max = 0, 0\n\n self._layer_factory = LayerFactory()\n self._optimizer_factory = OptFactory()\n\n self._available_metrics = {\n \"acc\": NNDist.acc, \"_acc\": NNDist.acc,\n \"f1\": NNDist.f1_score, \"_f1_score\": NNDist.f1_score\n }\n\n @NNTiming.timeit(level=4, prefix=\"[Initialize] \")\n def initialize(self):\n self._layers, self._weights, self._bias = [], [], []\n self._layer_names, self._layer_shapes, self._layer_params = [], [], []\n self._lr, self._epoch, self._regularization_param = 0, 0, 0\n self.verbose = 1\n\n self._apply_bias = False\n self._current_dimension = 0\n\n self._logs = []\n self._metrics, self._metric_names = [], []\n\n self._x_min, self._x_max = 0, 0\n self._y_min, self._y_max = 0, 0\n\n # Property\n\n @property\n def name(self):\n return (\n \"-\".join([str(_layer.shape[1]) for _layer in self._layers]) +\n \" at {}\".format(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n )\n\n @property\n def layer_names(self):\n return [layer.name for layer in self._layers]\n\n @layer_names.setter\n def layer_names(self, value):\n self._layer_names = value\n\n @property\n def layer_shapes(self):\n return [layer.size() for layer in self._layers]\n\n @layer_shapes.setter\n def layer_shapes(self, value):\n self._layer_shapes = value\n\n @property\n def layer_params(self):\n return self._layer_params\n\n @layer_params.setter\n def layer_params(self, value):\n self.layer_params = value\n\n @property\n def layer_special_params(self):\n return [layer.special_params for layer in self._layers]\n\n @layer_special_params.setter\n def layer_special_params(self, value):\n for layer, sp_param in zip(self._layers, value):\n if sp_param is not None:\n layer.set_special_params(sp_param)\n\n # Utils\n\n @NNTiming.timeit(level=4)\n def _get_min_max(self, x, y):\n x, y = x.data.numpy(), y.data.numpy()\n self._x_min, self._x_max = np.min(x), np.max(x)\n self._y_min, self._y_max = np.min(y), np.max(y)\n\n @NNTiming.timeit(level=4, prefix=\"[API] \")\n def _split_data(self, x, y, x_test, y_test,\n train_only, training_scale=NNConfig.TRAINING_SCALE):\n if train_only:\n if x_test is not None and y_test is not None:\n x, y = torch.cat((x, x_test)), torch.cat((y, y_test))\n x_train, y_train, x_test, y_test = x, y, x, y\n else:\n shuffle_suffix = torch.randperm(len(x))\n x, y = x[shuffle_suffix], y[shuffle_suffix]\n if x_test is None or y_test is None:\n train_len = int(len(x) * training_scale)\n x_train, y_train = x[:train_len], y[:train_len]\n x_test, y_test = x[train_len:], y[train_len:]\n elif x_test is None or y_test is None:\n raise BuildNetworkError(\"Please provide test sets if you want to split data on your own\")\n else:\n x_train, y_train = x, y\n if NNConfig.BOOST_LESS_SAMPLES:\n if y_train.shape[1] != 2:\n raise BuildNetworkError(\"It is not permitted to boost less samples in multiple classification\")\n y_train_arg = torch.max(y_train, dim=1)[1]\n y0 = y_train_arg == 0\n y1 = ~y0\n y_len, y0_len = len(y_train), torch.sum(y0) # type: float\n if y0_len > int(0.5 * y_len):\n y0, y1 = y1, y0\n y0_len = y_len - y0_len\n boost_suffix = torch.IntTensor(y_len - y0_len).random_(y0_len)\n x_train = torch.cat((x_train[y1], x_train[y0][boost_suffix]))\n y_train = torch.cat((y_train[y1], y_train[y0][boost_suffix]))\n shuffle_suffix = torch.randperm(len(x_train))\n x_train, y_train = x_train[shuffle_suffix], y_train[shuffle_suffix]\n return (x_train, x_test), (y_train, y_test)\n\n @NNTiming.timeit(level=4)\n def _add_weight(self, shape, conv_channel=None, fc_shape=None):\n if fc_shape is not None:\n self._weights.append(Variable(torch.randn(fc_shape, shape[1]), requires_grad=True))\n self._bias.append(Variable(torch.zeros((1, shape[1])), requires_grad=True))\n elif conv_channel is not None:\n if len(shape[1]) <= 2:\n self._weights.append(Variable(\n torch.randn(conv_channel, conv_channel, shape[1][0], shape[1][1]),\n requires_grad=True\n ))\n else:\n self._weights.append(Variable(\n torch.randn(shape[1][0], conv_channel, shape[1][1], shape[1][2]),\n requires_grad=True\n ))\n self._bias.append(Variable(torch.zeros((1, shape[1][0])), requires_grad=True))\n else:\n self._weights.append(Variable(torch.randn(*shape), requires_grad=True))\n self._bias.append(Variable(torch.zeros((1, shape[1])), requires_grad=True))\n\n @NNTiming.timeit(level=4)\n def _add_layer(self, layer, *args, **kwargs):\n if not self._layers and isinstance(layer, str):\n _layer = self._layer_factory.get_root_layer_by_name(layer, *args, **kwargs)\n if _layer:\n self.add(_layer)\n return\n _parent = self._layers[-1]\n if isinstance(_parent, CostLayer):\n raise BuildLayerError(\"Adding layer after CostLayer is not permitted\")\n if isinstance(layer, str):\n layer, shape = self._layer_factory.get_layer_by_name(\n layer, _parent, self._current_dimension, *args, **kwargs\n )\n if shape is None:\n self.add(layer)\n return\n _current, _next = shape\n else:\n _current, _next = args\n if isinstance(layer, SubLayer):\n _parent.child = layer\n layer.is_sub_layer = True\n layer.root = layer.root\n layer.root.last_sub_layer = layer\n self.parent = _parent\n self._layers.append(layer)\n self._weights.append(torch.Tensor(0))\n self._bias.append(torch.Tensor(0))\n self._current_dimension = _next\n else:\n fc_shape, conv_channel, last_layer = None, None, self._layers[-1]\n if isinstance(last_layer, ConvLayer):\n if isinstance(layer, ConvLayer):\n conv_channel = last_layer.n_filters\n _current = (conv_channel, last_layer.out_h, last_layer.out_w)\n layer.feed_shape((_current, _next))\n else:\n layer.is_fc = True\n last_layer.is_fc_base = True\n fc_shape = last_layer.out_h * last_layer.out_w * last_layer.n_filters\n self._layers.append(layer)\n self._add_weight((_current, _next), conv_channel, fc_shape)\n self._current_dimension = _next\n self._update_layer_information(layer)\n\n @NNTiming.timeit(level=4)\n def _update_layer_information(self, layer):\n self._layer_params.append(layer.params)\n if len(self._layer_params) > 1 and not layer.is_sub_layer:\n self._layer_params[-1] = ((self._layer_params[-1][0][1],), *self._layer_params[-1][1:])\n\n @NNTiming.timeit(level=1)\n def _get_prediction(self, x, name=None, batch_size=1e6, verbose=None):\n if verbose is None:\n verbose = self.verbose\n fc_shape = np.prod(x.size()[1:]) # type: int\n single_batch = int(batch_size / fc_shape)\n if not single_batch:\n single_batch = 1\n if single_batch >= len(x):\n return self._get_activations(x, predict=True).pop()\n epoch = int(len(x) / single_batch)\n if not len(x) % single_batch:\n epoch += 1\n name = \"Prediction\" if name is None else \"Prediction ({})\".format(name)\n sub_bar = ProgressBar(max_value=epoch, name=name, start=False)\n if verbose >= NNVerbose.METRICS:\n sub_bar.start()\n rs, count = [self._get_activations(x[:single_batch], predict=True).pop()], single_batch\n if verbose >= NNVerbose.METRICS:\n sub_bar.update()\n while count < len(x):\n count += single_batch\n if count >= len(x):\n rs.append(self._get_activations(x[count-single_batch:], predict=True).pop())\n else:\n rs.append(self._get_activations(x[count-single_batch:count], predict=True).pop())\n if verbose >= NNVerbose.METRICS:\n sub_bar.update()\n return torch.cat(rs)\n\n @NNTiming.timeit(level=1)\n def _get_activations(self, x, predict=False):\n activations = [self._layers[0].activate(x, self._weights[0], self._bias[0], predict)]\n for i, layer in enumerate(self._layers[1:]):\n activations.append(layer.activate(\n activations[-1], self._weights[i + 1], self._bias[i + 1], predict))\n return activations\n\n @NNTiming.timeit(level=1)\n def _get_final_activation(self, x, predict=False):\n activation = self._layers[0].activate(x, self._weights[0], self._bias[0], predict)\n for i, layer in enumerate(self._layers[1:]):\n activation = layer.activate(activation, self._weights[i + 1], self._bias[i + 1], predict)\n return activation\n\n @NNTiming.timeit(level=3)\n def _append_log(self, x, y, name, get_loss=True):\n y_pred = self._get_prediction(x, name)\n for i, metric in enumerate(self._metrics):\n self._logs[name][i].append(metric(\n torch.max(y, dim=1)[1].data.numpy(),\n torch.max(y_pred, dim=1)[1].data.numpy()\n ))\n if get_loss:\n self._logs[name][-1].append(\n (self._layers[-1].calculate(y, y_pred) / len(y)).data.numpy()[0]\n )\n\n @NNTiming.timeit(level=3)\n def _print_metric_logs(self, show_loss, data_type):\n print()\n print(\"=\" * 47)\n for i, name in enumerate(self._metric_names):\n print(\"{:<16s} {:<16s}: {:12.8}\".format(\n data_type, name, self._logs[data_type][i][-1]))\n if show_loss:\n print(\"{:<16s} {:<16s}: {:12.8}\".format(\n data_type, \"loss\", self._logs[data_type][-1][-1]))\n print(\"=\" * 47)\n\n # TODO\n @NNTiming.timeit(level=1)\n def _draw_2d_network(self, radius=6, width=1200, height=800, padding=0.2,\n plot_scale=2, plot_precision=0.03,\n sub_layer_height_scale=0, **kwargs):\n if not kwargs[\"show\"] and not kwargs[\"mp4\"]:\n return\n layers = len(self._layers) + 1\n units = [layer.shape[0] for layer in self._layers] + [self._layers[-1].shape[1]]\n whether_sub_layers = np.array([False] + [isinstance(layer, SubLayer) for layer in self._layers])\n n_sub_layers = np.sum(whether_sub_layers) # type: int\n\n plot_num = int(1 / plot_precision)\n if plot_num % 2 == 1:\n plot_num += 1\n half_plot_num = int(plot_num * 0.5)\n xf = torch.linspace(self._x_min * plot_scale, self._x_max * plot_scale, plot_num)\n yf = torch.linspace(self._x_min * plot_scale, self._x_max * plot_scale, plot_num) * -1\n input_xs = Variable(torch.stack([\n xf.repeat(plot_num), yf.repeat(plot_num, 1).t().contiguous().view(-1)\n ], 1))\n\n activations = [\n activation.data.numpy().T.reshape(units[i + 1], plot_num, plot_num)\n for i, activation in enumerate(\n self._get_activations(input_xs, predict=True)\n )]\n graphs = []\n for j, activation in enumerate(activations):\n graph_group = []\n if j == len(activations) - 1:\n classes = np.argmax(activation, axis=0)\n else:\n classes = None\n for k, ac in enumerate(activation):\n data = np.zeros((plot_num, plot_num, 3), np.uint8)\n if j != len(activations) - 1:\n mask = ac >= np.average(ac)\n else:\n mask = classes == k\n data[mask], data[~mask] = [0, 165, 255], [255, 165, 0]\n graph_group.append(data)\n graphs.append(graph_group)\n\n img = np.full([height, width, 3], 255, dtype=np.uint8)\n axis0_padding = int(height / (layers - 1 + 2 * padding)) * padding + plot_num\n axis0_step = (height - 2 * axis0_padding) / layers\n sub_layer_decrease = int((1 - sub_layer_height_scale) * axis0_step)\n axis0 = np.linspace(\n axis0_padding,\n height + n_sub_layers * sub_layer_decrease - axis0_padding,\n layers, dtype=np.int)\n axis0 -= sub_layer_decrease * np.cumsum(whether_sub_layers)\n axis1_padding = plot_num\n axis1 = [np.linspace(axis1_padding, width - axis1_padding, unit + 2, dtype=np.int)\n for unit in units]\n axis1 = [axis[1:-1] for axis in axis1]\n\n colors, thicknesses, masks = [], [], []\n for weight in self._weights:\n line_info = VisUtil.get_line_info(weight.data.numpy().copy())\n colors.append(line_info[0])\n thicknesses.append(line_info[1])\n masks.append(line_info[2])\n\n for i, (y, xs) in enumerate(zip(axis0, axis1)):\n for j, x in enumerate(xs):\n if i == 0:\n cv2.circle(img, (x, y), radius, (20, 215, 20), int(radius / 2))\n else:\n graph = graphs[i - 1][j]\n img[y - half_plot_num:y + half_plot_num, x - half_plot_num:x + half_plot_num] = graph\n if i > 0:\n cv2.putText(img, self._layers[i - 1].name, (12, y - 36), cv2.LINE_AA, 0.6, (0, 0, 0), 1)\n for i, y in enumerate(axis0):\n if i == len(axis0) - 1:\n break\n for j, x in enumerate(axis1[i]):\n new_y = axis0[i + 1]\n whether_sub_layer = isinstance(self._layers[i], SubLayer)\n for k, new_x in enumerate(axis1[i + 1]):\n if whether_sub_layer and j != k:\n continue\n if masks[i][j][k]:\n cv2.line(img, (x, y + half_plot_num), (new_x, new_y - half_plot_num),\n colors[i][j][k], thicknesses[i][j][k])\n\n return img\n\n # Optimizing Process\n\n @NNTiming.timeit(level=4)\n def _init_optimizer(self):\n if not isinstance(self._optimizer, Optimizer):\n self._optimizer = self._optimizer_factory.get_optimizer_by_name(\n self._optimizer, self._model_parameters, self._lr, self._epoch)\n\n # Batch Work\n\n @NNTiming.timeit(level=2)\n def _batch_work(self, i, sub_bar, x, y, x_test, y_test,\n draw_weights, weight_trace, show_loss):\n if draw_weights:\n for i, weight in enumerate(self._weights):\n for j, new_weight in enumerate(weight.copy()):\n weight_trace[i][j].append(new_weight)\n if self.verbose >= NNVerbose.DEBUG:\n pass\n if self.verbose >= NNVerbose.ITER:\n if sub_bar.update() and self.verbose >= NNVerbose.METRICS_DETAIL:\n self._append_log(x, y, \"train\", get_loss=show_loss)\n self._append_log(x_test, y_test, \"cv\", get_loss=show_loss)\n self._print_metric_logs(show_loss, \"train\")\n self._print_metric_logs(show_loss, \"cv\")\n\n @NNTiming.timeit(level=2)\n def _predict(self, x, get_raw_results=False, **kwargs):\n rs = self._get_final_activation(x)\n if get_raw_results:\n return rs\n return torch.sign(rs)\n\n # API\n\n @NNTiming.timeit(level=4, prefix=\"[API] \")\n def add(self, layer, *args, **kwargs):\n if isinstance(layer, str):\n # noinspection PyTypeChecker\n self._add_layer(layer, *args, **kwargs)\n else:\n if not isinstance(layer, Layer):\n raise BuildLayerError(\"Invalid Layer provided (should be subclass of Layer)\")\n if not self._layers:\n if isinstance(layer, SubLayer):\n raise BuildLayerError(\"Invalid Layer provided (first layer should not be subclass of SubLayer)\")\n if len(layer.shape) != 2:\n raise BuildLayerError(\"Invalid input Layer provided (shape should be {}, {} found)\".format(\n 2, len(layer.shape)\n ))\n self._layers, self._current_dimension = [layer], layer.shape[1]\n self._update_layer_information(layer)\n if isinstance(layer, ConvLayer):\n self._add_weight(layer.shape, layer.n_channels)\n else:\n self._add_weight(layer.shape)\n else:\n if len(layer.shape) > 2:\n raise BuildLayerError(\"Invalid Layer provided (shape should be {}, {} found)\".format(\n 2, len(layer.shape)\n ))\n if len(layer.shape) == 2:\n _current, _next = layer.shape\n if isinstance(layer, SubLayer):\n if _next != self._current_dimension:\n raise BuildLayerError(\"Invalid SubLayer provided (shape[1] should be {}, {} found)\".format(\n self._current_dimension, _next\n ))\n elif not isinstance(layer, ConvLayer) and _current != self._current_dimension:\n raise BuildLayerError(\"Invalid Layer provided (shape[0] should be {}, {} found)\".format(\n self._current_dimension, _current\n ))\n self._add_layer(layer, _current, _next)\n\n elif len(layer.shape) == 1:\n _next = layer.shape[0]\n layer.shape = (self._current_dimension, _next)\n self._add_layer(layer, self._current_dimension, _next)\n else:\n raise LayerError(\"Invalid Layer provided (invalid shape '{}' found)\".format(layer.shape))\n\n # TODO\n @NNTiming.timeit(level=4, prefix=\"[API] \")\n def build(self, units=\"build\"):\n if isinstance(units, str):\n if units == \"build\":\n for name, param in zip(self._layer_names, self._layer_params):\n self.add(name, *param)\n else:\n raise NotImplementedError(\"Invalid param '{}' provided to 'build' method\".format(units))\n else:\n try:\n units = list(units)\n except ValueError as err:\n raise BuildLayerError(err)\n if len(units) < 2:\n raise BuildLayerError(\"At least 2 layers are needed\")\n _input_shape = (units[0], units[1])\n self.initialize()\n self.add(ReLU(_input_shape))\n for unit_num in units[2:-1]:\n self.add(ReLU((unit_num,)))\n self.add(\"CrossEntropy\", (units[-1],))\n\n @NNTiming.timeit(level=4, prefix=\"[API] \")\n def preview(self):\n if not self._layers:\n rs = \"None\"\n else:\n rs = (\n \"Input : {:<10s} - {}\\n\".format(\"Dimension\", self._layers[0].shape[0]) +\n \"\\n\".join([\n \"Layer : {:<16s} - {} {}\".format(\n _layer.name, _layer.shape[1], _layer.description\n ) if isinstance(_layer, SubLayer) else\n \"Layer : {:<16s} - {:<14s} - strides: {:2d} - padding: {:2d} - out: {}\".format(\n _layer.name, str(_layer.shape[1]), _layer.stride, _layer.padding,\n (_layer.n_filters, _layer.out_h, _layer.out_w)\n ) if isinstance(_layer, ConvLayer) else \"Layer : {:<10s} - {}\".format(\n _layer.name, _layer.shape[1]\n ) for _layer in self._layers[:-1]\n ]) + \"\\nCost : {:<10s}\".format(str(self._layers[-1]))\n )\n print(\"=\" * 30 + \"\\n\" + \"Structure\\n\" + \"-\" * 30 + \"\\n\" + rs + \"\\n\" + \"-\" * 30 + \"\\n\")\n\n @NNTiming.timeit(level=1, prefix=\"[API] \")\n def fit(self,\n x, y, x_test=None, y_test=None,\n batch_size=128, record_period=1, train_only=False,\n optimizer=\"Adam\", lr=0.001, lb=0.001, epoch=20, weight_scale=1, apply_bias=True,\n show_loss=True, metrics=None, do_log=True, verbose=None,\n visualize=False, visualize_setting=None,\n draw_weights=False, animation_params=None):\n\n self._lr, self._epoch = lr, epoch\n for weight in self._weights:\n weight.data *= weight_scale\n self._model_parameters = self._weights\n if apply_bias:\n self._model_parameters += self._bias\n self._optimizer = optimizer\n self._init_optimizer()\n assert isinstance(self._optimizer, Optimizer)\n print()\n print(\"=\" * 30)\n print(\"Optimizers\")\n print(\"-\" * 30)\n print(self._optimizer)\n print(\"-\" * 30)\n\n if not self._layers:\n raise BuildNetworkError(\"Please provide layers before fitting data\")\n if y.shape[1] != self._current_dimension:\n raise BuildNetworkError(\"Output layer's shape should be {}, {} found\".format(\n self._current_dimension, y.shape[1]))\n\n x, y = self._arr_to_variable(False, x, y)\n if x_test is not None and y_test is not None:\n x_test, y_test = self._arr_to_variable(False, x_test, y_test)\n (x_train, x_test), (y_train, y_test) = self._split_data(\n x, y, x_test, y_test, train_only)\n train_len = len(x_train)\n batch_size = min(batch_size, train_len)\n do_random_batch = train_len > batch_size\n train_repeat = 1 if not do_random_batch else int(train_len / batch_size) + 1\n self._regularization_param = 1 - lb * lr / batch_size\n self._get_min_max(x_train, y_train)\n\n self._metrics = [\"acc\"] if metrics is None else metrics\n for i, metric in enumerate(self._metrics):\n if isinstance(metric, str):\n if metric not in self._available_metrics:\n raise BuildNetworkError(\"Metric '{}' is not implemented\".format(metric))\n self._metrics[i] = self._available_metrics[metric]\n self._metric_names = [_m.__name__ for _m in self._metrics]\n\n self._logs = {\n name: [[] for _ in range(len(self._metrics) + 1)] for name in (\"train\", \"cv\", \"test\")\n }\n if verbose is not None:\n self.verbose = verbose\n\n self._apply_bias = apply_bias\n\n bar = ProgressBar(max_value=max(1, epoch // record_period), name=\"Epoch\", start=False)\n if self.verbose >= NNVerbose.EPOCH:\n bar.start()\n img, ims = None, []\n\n if draw_weights:\n weight_trace = [[[org] for org in weight] for weight in self._weights]\n else:\n weight_trace = []\n\n loss_function = self._layers[-1].calculate\n args = (\n x_train, y_train, x_test, y_test,\n draw_weights, weight_trace, show_loss\n )\n\n *animation_properties, animation_params = self._get_animation_params(animation_params)\n sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name=\"Iteration\", start=False)\n for counter in range(epoch):\n self._optimizer.update()\n if self.verbose >= NNVerbose.ITER and counter % record_period == 0:\n sub_bar.start()\n self.batch_training(\n x_train, y_train, batch_size, train_repeat, loss_function, sub_bar, *args\n )\n if self.verbose >= NNVerbose.ITER:\n sub_bar.update()\n self._handle_animation(\n counter, x, y, ims, animation_params, *animation_properties,\n img=self._draw_2d_network(**animation_params), name=\"Neural Network\"\n )\n if do_log:\n self._append_log(x, y, \"train\", get_loss=show_loss)\n self._append_log(x_test, y_test, \"cv\", get_loss=show_loss)\n if (counter + 1) % record_period == 0:\n if do_log and self.verbose >= NNVerbose.METRICS:\n self._print_metric_logs(show_loss, \"train\")\n self._print_metric_logs(show_loss, \"cv\")\n if visualize:\n if visualize_setting is None:\n self.visualize2d(x_test, y_test)\n else:\n self.visualize2d(x_test, y_test, *visualize_setting)\n if self.verbose >= NNVerbose.EPOCH:\n bar.update(counter // record_period + 1)\n if self.verbose >= NNVerbose.ITER:\n sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name=\"Iteration\", start=False)\n\n if do_log:\n self._append_log(x_test, y_test, \"test\", get_loss=show_loss)\n if img is not None:\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n if draw_weights:\n ts = np.arange(epoch * train_repeat + 1)\n for i, weight in enumerate(self._weights):\n plt.figure()\n for j in range(len(weight)):\n plt.plot(ts, weight_trace[i][j])\n plt.title(\"Weights toward layer {} ({})\".format(i + 1, self._layers[i].name))\n plt.show()\n self._handle_mp4(ims, animation_properties, \"NN\")\n return self._logs\n\n # TODO\n @NNTiming.timeit(level=2, prefix=\"[API] \")\n def save(self, path=None, name=None, overwrite=True):\n path = os.path.join(\"Models\", \"Cache\") if path is None else os.path.join(\"Models\", path)\n name = \"Model.nn\" if name is None else name\n if not os.path.exists(path):\n os.makedirs(path)\n _dir = os.path.join(path, name)\n if not overwrite and os.path.isfile(_dir):\n _count = 1\n _new_dir = _dir + \"({})\".format(_count)\n while os.path.isfile(_new_dir):\n _count += 1\n _new_dir = _dir + \"({})\".format(_count)\n _dir = _new_dir\n print()\n print(\"=\" * 60)\n print(\"Saving Model to {}...\".format(_dir))\n print(\"-\" * 60)\n with open(_dir, \"wb\") as file:\n pickle.dump({\n \"structures\": {\n \"_layer_names\": self.layer_names,\n \"_layer_params\": self._layer_params,\n \"_cost_layer\": self._layers[-1].name,\n \"_next_dimension\": self._current_dimension\n },\n \"params\": {\n \"_logs\": self._logs,\n \"_metric_names\": self._metric_names,\n \"_weights\": self._weights,\n \"_bias\": self._bias,\n \"_optimizer\": self._optimizer,\n \"layer_special_params\": self.layer_special_params,\n }\n }, file)\n print(\"Done\")\n print(\"=\" * 60)\n\n # TODO\n @NNTiming.timeit(level=2, prefix=\"[API] \")\n def load(self, path=os.path.join(\"Models\", \"Cache\", \"Model.nn\")):\n self.initialize()\n try:\n with open(path, \"rb\") as file:\n dic = pickle.load(file)\n for key, value in dic[\"structures\"].items():\n setattr(self, key, value)\n self.build()\n for key, value in dic[\"params\"].items():\n setattr(self, key, value)\n self._init_optimizer()\n for i in range(len(self._metric_names) - 1, -1, -1):\n name = self._metric_names[i]\n if name not in self._available_metrics:\n self._metric_names.pop(i)\n else:\n self._metrics.insert(0, self._available_metrics[name])\n print()\n print(\"=\" * 30)\n print(\"Model restored\")\n print(\"=\" * 30)\n return dic\n except Exception as err:\n raise BuildNetworkError(\"Failed to load Network ({}), structure initialized\".format(err))\n\n @NNTiming.timeit(level=4, prefix=\"[API] \")\n def predict(self, x, get_raw_results=False, **kwargs):\n if not isinstance(x, Variable):\n x = Variable(torch.from_numpy(np.asarray(x, dtype=np.float32)))\n if len(x.size()) == 1:\n x = x.view(1, -1)\n y_pred = self._get_prediction(x).data.numpy()\n return y_pred if get_raw_results else np.argmax(y_pred, axis=1)\n\n def draw_results(self):\n metrics_log, loss_log = {}, {}\n for key, value in sorted(self._logs.items()):\n metrics_log[key], loss_log[key] = value[:-1], value[-1]\n\n for i, name in enumerate(sorted(self._metric_names)):\n plt.figure()\n plt.title(\"Metric Type: {}\".format(name))\n for key, log in sorted(metrics_log.items()):\n if key == \"test\":\n continue\n xs = np.arange(len(log[i])) + 1\n plt.plot(xs, log[i], label=\"Data Type: {}\".format(key))\n plt.legend(loc=4)\n plt.show()\n plt.close()\n\n plt.figure()\n plt.title(\"Cost\")\n for key, loss in sorted(loss_log.items()):\n if key == \"test\":\n continue\n xs = np.arange(len(loss)) + 1\n plt.plot(xs, loss, label=\"Data Type: {}\".format(key))\n plt.legend()\n plt.show()\n\n def draw_conv_weights(self):\n for i, (name, weight) in enumerate(zip(self.layer_names, self._weights)):\n if len(weight.size()) != 4:\n return\n for j, _w in enumerate(weight):\n for k, _ww in enumerate(_w):\n VisUtil.show_img(_ww, \"{} {} filter {} channel {}\".format(name, i+1, j+1, k+1))\n\n def draw_conv_series(self, x, shape=None):\n for xx in x:\n VisUtil.show_img(VisUtil.trans_img(xx, shape), \"Original\")\n activations = self._get_activations(np.array([xx]), predict=True)\n for i, (layer, ac) in enumerate(zip(self._layers, activations)):\n if len(ac.shape) == 4:\n for n in ac:\n _n, height, width = n.shape\n a = int(ceil(sqrt(_n)))\n g = np.ones((a * height + a, a * width + a), n.dtype)\n g *= np.min(n)\n _i = 0\n for y in range(a):\n for x in range(a):\n if _i < _n:\n g[y * height + y:(y + 1) * height + y, x * width + x:(x + 1) * width + x] = n[\n _i, :, :]\n _i += 1\n # normalize to [0,1]\n max_g = g.max()\n min_g = g.min()\n g = (g - min_g) / (max_g - min_g)\n VisUtil.show_img(g, \"Layer {} ({})\".format(i + 1, layer.name))\n else:\n ac = ac[0]\n length = sqrt(np.prod(ac.shape))\n if length < 10:\n continue\n (height, width) = xx.shape[1:] if shape is None else shape[1:]\n sqrt_shape = sqrt(height * width)\n oh, ow = int(length * height / sqrt_shape), int(length * width / sqrt_shape)\n VisUtil.show_img(ac[:oh*ow].reshape(oh, ow), \"Layer {} ({})\".format(i + 1, layer.name))\n"
] | [
[
"tensorflow.train.AdagradOptimizer",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.train.AdadeltaOptimizer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.train.MomentumOptimizer",
"tensorflow.train.AdamOptimizer"
],
[
"numpy.linspace",
"numpy.max",
"numpy.random.randn",
"numpy.unique",
"numpy.sin",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.min",
"matplotlib.pyplot.ylim",
"numpy.meshgrid",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.random.random",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"numpy.cos",
"matplotlib.pyplot.xlim",
"numpy.random.permutation",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.pcolormesh",
"numpy.vstack"
],
[
"matplotlib.pyplot.legend",
"numpy.linspace",
"numpy.asarray",
"numpy.cumsum",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.arange",
"numpy.full",
"numpy.argmax",
"matplotlib.pyplot.close",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.min",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.ones",
"numpy.prod",
"numpy.average"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fshp971/mcmc-unlearning | [
"3113dedca6de33bcaf316b804cb9c1e636db7fd5"
] | [
"BNN/forget.py"
] | [
"from datetime import datetime\nimport os\nimport pickle\nimport argparse\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom mcmc_unlearner import sgmcmcUnlearner\nimport utils\nimport models\n\n\nclass myUnlearner(sgmcmcUnlearner):\n def _apply_sample(self, z):\n x, y = z\n if not self.cpu: x, y = x.cuda(), y.cuda()\n self.model.train()\n lo = -self.model.log_prior() + F.cross_entropy(self.model(x), y) * self.model.n\n self.optimizer.zero_grad()\n lo.backward()\n self.optimizer.step()\n\n def _fun(self, z):\n x, y = z\n if not self.cpu: x, y = x.cuda(), y.cuda()\n self.model.train()\n return -self.model.log_prior() + F.cross_entropy(self.model(x), y) * self.model.n\n\n def _z_fun(self, z):\n x, y = z\n if not self.cpu: x, y = x.cuda(), y.cuda()\n self.model.train()\n return F.cross_entropy(self.model(x), y, reduction='sum')\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n utils.add_shared_args(parser)\n\n parser.add_argument('--rm-idx-path', type=str, default=None)\n parser.add_argument('--save-freq', type=int, default=-1)\n\n return parser.parse_args()\n\n\ndef get_forget_idx(dataset, kill_num):\n kill_val = 0\n\n if 'targets' in vars(dataset).keys():\n labels = np.array(dataset.targets)\n elif 'labels' in vars(dataset).keys():\n labels = np.array(dataset.labels)\n else:\n raise NotImplementedError\n\n randidx = np.random.permutation( np.where(labels==kill_val)[0] )\n return randidx[:kill_num]\n\n\ndef evaluate(model, loader, cpu):\n ''' average log predictive probability '''\n loss = utils.AverageMeter()\n acc = utils.AverageMeter()\n\n n = len(loader.sampler.indices)\n\n model.eval()\n for x, y in loader:\n if not cpu: x, y = x.cuda(), y.cuda()\n\n with torch.no_grad():\n _y = model(x)\n lo = - model.log_prior() + F.cross_entropy(_y,y) * n\n lo = lo.item()\n ac = (_y.argmax(dim=1) == y).sum().item() / len(y)\n\n loss.update(lo, len(y))\n acc.update(ac, len(y))\n\n return loss.average(), acc.average()\n\n\ndef forget_eval_one_time(model, train_loader, forgetted_train_loader, test_loader, log):\n remain_train_loss, remain_train_acc = evaluate(model, train_loader, args.cpu)\n forgetted_train_loss, forgetted_train_acc = evaluate(model, forgetted_train_loader, args.cpu)\n test_loss, test_acc = evaluate(model, test_loader, args.cpu)\n\n utils.add_log(log, 'remain_train_loss', remain_train_loss)\n utils.add_log(log, 'remain_train_acc', remain_train_acc)\n utils.add_log(log,'forgetted_train_loss', forgetted_train_loss)\n utils.add_log(log,'forgetted_train_acc', forgetted_train_acc)\n utils.add_log(log, 'test_loss', test_loss)\n utils.add_log(log, 'test_acc', test_acc)\n\n logger.info('remaining train loss {:.2e} \\t train acc {:.2%}'\n .format(remain_train_loss, remain_train_acc))\n logger.info('forgetted train loss {:.2e} \\t train acc {:.2%}'\n .format(forgetted_train_loss, forgetted_train_acc))\n logger.info('test loss {:.2e} \\t test acc {:.2%}'\n .format(test_loss, test_acc))\n logger.info('')\n\n\ndef save_checkpoint(save_dir, save_name, log, model, optimizer):\n with open('{}/{}-log.pkl'.format(save_dir, save_name), 'wb') as f:\n pickle.dump(log, f)\n torch.save({\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n }, '{}/{}-model.pkl'.format(save_dir, save_name))\n\n\ndef main(args, logger):\n ''' retrieve lots of data '''\n trainset, testset = utils.get_dataset(args.dataset)\n\n if args.rm_idx_path is not None:\n with open(args.rm_idx_path, 'rb') as f:\n forgetted_idx = pickle.load(f)\n else:\n forgetted_idx = get_forget_idx(trainset, args.ifs_kill_num)\n\n forgetted_idx_loader = utils.IndexBatchSampler(\n batch_size=args.ifs_rm_bs, indices=forgetted_idx)\n\n train_sampler = utils.DataSampler(trainset, args.batch_size)\n\n train_loader = utils.DataLoader(trainset, args.batch_size)\n train_loader.remove(forgetted_idx)\n\n forgetted_train_loader = utils.DataLoader(trainset, args.batch_size)\n forgetted_train_loader.set_sampler_indices(forgetted_idx)\n\n test_loader = utils.DataLoader(testset, args.batch_size)\n ''' end of retrieving data '''\n\n model = utils.get_mcmc_bnn_arch(args.arch, args.dataset, args.prior_sig)\n\n if not args.cpu:\n model.cuda()\n\n args.lr /= len(trainset)\n optimizer = utils.get_optim(model.parameters(), args.optim,\n lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, sghmc_alpha=args.sghmc_alpha)\n\n model.n = len(train_sampler)\n\n ''' restore model / sampler '''\n state_dict = torch.load(args.resume_path)\n model.load_state_dict(state_dict['model_state_dict'])\n optimizer.load_state_dict(state_dict['optimizer_state_dict'])\n\n ''' for backward compatibility '''\n for group in optimizer.param_groups:\n if 'lr_decay' in group:\n group['lr'] *= group['lr_decay']\n group.pop('lr_decay')\n\n del state_dict\n\n unlearner = myUnlearner(\n model = model,\n optimizer = optimizer,\n params = model.parameters(),\n cpu = args.cpu,\n iter_T = args.ifs_iter_T,\n scaling = args.ifs_scaling,\n samp_T = args.ifs_samp_T,)\n\n log = dict()\n log['user_time'] = 0\n utils.add_log(log, 'forgetted_idx', forgetted_idx)\n\n forget_eval_one_time(model, train_loader, forgetted_train_loader, test_loader, log)\n\n removed_nums = 0\n freq_counter = 0\n\n for ii in forgetted_idx_loader:\n ''' create forget-batch '''\n xx, yy = [], []\n for i in ii:\n x, y = trainset[i]\n if len(x.shape) == 3: x = x.reshape(1, *x.shape)\n xx.append(x)\n yy.append(y)\n xx, yy = torch.cat(xx), torch.tensor(yy)\n ''' end '''\n\n scaling = args.ifs_scaling / len(train_sampler)\n unlearner.param_dict['scaling'] = scaling\n\n ''' start calculation of time '''\n start_time = datetime.now()\n\n unlearner.remove([xx,yy], train_sampler)\n\n torch.cuda.synchronize()\n end_time = datetime.now()\n user_time = (end_time - start_time).total_seconds()\n ''' end calculation of time '''\n\n log['user_time'] += user_time\n\n train_sampler.remove(ii)\n ''' after removal, update the number of remaining datums '''\n unlearner.model.n = len(train_sampler)\n\n removed_nums += len(ii)\n freq_counter += len(ii)\n\n ''' update mcmc sampler '''\n for group in unlearner.optimizer.param_groups:\n group['lr'] *= (len(train_sampler) + len(ii)) / len(train_sampler)\n\n logger.info('remaining trainset size {}'.format(len(train_sampler)))\n logger.info('user time {:.3f} sec \\t'\n 'cumulated user time {:.3f} mins'\n .format(user_time, log['user_time']/60) )\n\n if (args.save_freq > 0) and (freq_counter >= args.save_freq):\n freq_counter = 0\n save_checkpoint(args.save_dir, '{}-ckpt-{}'.format(args.save_name, removed_nums), log, model, optimizer)\n\n forget_eval_one_time(model, train_loader, forgetted_train_loader, test_loader, log)\n\n save_checkpoint(args.save_dir, args.save_name, log, model, optimizer)\n\n return\n\n\nif __name__ == '__main__':\n args = get_args()\n logger = utils.generic_init(args)\n\n try:\n main(args, logger)\n except Exception as e:\n logger.exception('Unexpected exception! %s', e)\n"
] | [
[
"torch.cuda.synchronize",
"torch.load",
"torch.cat",
"torch.nn.functional.cross_entropy",
"torch.tensor",
"torch.no_grad",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Nina-pinheiro/Data-Science-Python | [
"b6b2bc28f2f8f925e1b43408330641bd72388232"
] | [
"files/regressao_linear/regressaolinear1.py"
] | [
"# Importar as bibliotecas necessárias\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nimport seaborn as sns\nfrom sklearn.linear_model import LinearRegression\n\n# Leitura do dataset\n\ndf = pd.read_csv(\"dataset/consumo.csv\") \n\n# Converter uma coluna para numerica\n\ndf['Temperatura Maxima (C)'] = df['Temperatura Maxima (C)'].str.replace(',','.').astype(float)\ndf['Temperatura Minima (C)'] = df['Temperatura Minima (C)'].str.replace(',','.').astype(float)\ndf['Precipitacao (mm)'] = df['Precipitacao (mm)'].str.replace(',','.').astype(float)\ndf['Temperatura Media (C)'] = df['Temperatura Media (C)'].str.replace(',','.').astype(float)\n\n# Análise descritiva\n\ndf.describe()\ndf.head()\ndf.dtypes\ndf.info()\ndf.tail()\ndf.shape\n\n# Verificar quais são os valores faltantes\n\ndf.isnull().sum()\n\n# Remover todos os valores faltantes\ndf.dropna(how = \"all\", inplace = True)\n\n# Copiando um data frame em uma nova variável \n\ndf_feature = df.copy()\n\n# Criação de uma nova feature\n\ndf_feature['variacao'] = (df_feature['Temperatura Maxima (C)']) - (df_feature['Temperatura Minima (C)'])\ndf_feature\n\n# Plotando o gráfico da nova feature\ndf_feature.plot(x='variacao', y = 'Consumo de cerveja (litros)')\nplt.xlabel('variacao', fontsize = 15)\nplt.ylabel('Consumo de cerveja (litros)',fontsize = 15)\nplt.grid()\n\n# Excluindo a coluna data\ndf_feature = df_feature.drop(columns = 'Data')\n\n# Realizar a matriz de correlação\n\ndf_feature.corr().round(3)\n\n# Gráficos\n\nplt.figure()\nsns.pairplot(df_feature,x_vars=['Temperatura Minima (C)','Temperatura Media (C)','Temperatura Maxima (C)','Precipitacao (mm)','variacao'],\n y_vars=['Consumo de cerveja (litros)'],hue='Final de Semana',diag_kind=None)\n\n# Realizar o gráfico de final de semana e consumo de cerveja\nplt.figure(2)\nsns.swarmplot(x='Final de Semana',y='Consumo de cerveja (litros)',data= df_feature)\nplt.grid()\nplt.xlabel('Final de semana')\nplt.ylabel('Consumo de cerveja [L]')\n\n# Realizar o gráfico de final de semana e variacao(nova feature criada)\n\nplt.figure(3)\nsns.swarmplot(x = 'Final de Semana', y = 'variacao', data = df_feature)\nplt.grid()\nplt.xlabel('Final de semana')\nplt.ylabel('variacao')\n\n\n# Utilizando o modelo de regressão linear\nmodelo = LinearRegression()\n\n# Colocando a variável target\ny = df_feature['Consumo de cerveja (litros)'].values #target\n\n# colocando as variaveis independentes neste exemplo pega todos menos consumo de cerveja\nx = df_feature.drop(columns='Consumo de cerveja (litros)').values #fetures\nxColunas = df_feature.drop(columns='Consumo de cerveja (litros)').columns\n\n# Realizando o treinamento \n\nxTrain,xTest,yTrain,yTest = train_test_split(x,y, test_size = 0.3, random_state = 54564541)\n\n# Fitando o modelo\n\nmodelo.fit(xTrain,yTrain)\nyPred = modelo.predict(xTest)\n\n# Calcular os resíduos\n\nres = yPred - yTest\n\n# Testes\n\nprint('Valor de R2: {}'.format(modelo.score(xTest,yTest)))\nprint('Valor MSE: {}' .format(mean_squared_error(yTest,yPred)))\nprint('Coeficientes da regressão: {}'.format(modelo.coef_))\nprint('Intercept da regressão: {} \\n'.format(modelo.intercept_))\n\n"
] | [
[
"pandas.read_csv",
"sklearn.metrics.mean_squared_error",
"matplotlib.pyplot.ylabel",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
alancsouza/chip_clas | [
"e6df8713ae7dd70a5719af83b3b6cb5686f87e29"
] | [
"Experimental setup/Window size test/data6.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\ndata6 = Breast cancer\n\n\"\"\"\nfrom chip_clas_new import chip_clas_new\nimport statistics\nfrom functions import remove_noise\nfrom sklearn.model_selection import train_test_split, KFold\nfrom sklearn.preprocessing import MinMaxScaler\nimport numpy as np\nimport pandas as pd\n\ndata_name = \"Breast cancer\"\nprint(data_name)\n\nurl = 'https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data'\ndata1 = pd.read_csv(url, sep=',', header=None, skiprows=1)\n\ndata = data1.iloc[:,1:].copy() # the first is the id\n\n# converting object data into category dtype\ndata.iloc[:,5] = data.iloc[:,5].astype('category') \n# encoding labels\ndata.iloc[:,5] = data.iloc[:,5].cat.codes\n\nX = data.iloc[:,:-1]\nmin_max_scaler = MinMaxScaler(feature_range=(-1, 1)) # Normalizing data between -1 and 1\nX = pd.DataFrame(min_max_scaler.fit_transform(X))\n\ny = data.iloc[:,-1].copy() # Class: (2 for benign, 4 for malignant cancer)\ny[y == 2] = 1\ny[y == 4] = -1\n\n# Filtering data:\nX_new, y_new = remove_noise(X, y)\n\nX_train, X_test, y_train, y_test = train_test_split(X_new, y_new, test_size=0.2, random_state=42)\n\nf = open(\"results_window_size.txt\", \"a+\")\nf.write(\"\\n\\nDatabase: %s \\n\" % data_name)\nf.write(\"Size before filter: %d \\n\" % X.shape[0])\nf.write(\"Dimension: %d \\n\" % X.shape[1])\n\nf.write(\"Size after filter: %d \\n\" % X_new.shape[0])\nf.write(\"Train Size: %d \\n\" % X_train.shape[0])\n\nwindow_size = [50, 30, 20, 10, 5, 1]\n\nfor split in window_size:\n\n y_hat, y_test, result, runtime, final_split_size, arestas_suporte_size = chip_clas_new(X_train, X_test, y_train, y_test, method = \"parallel\", split_size = split)\n\n\n f.write(\"\\nSplit: %d \\n\" % split)\n f.write(\"AUC: %f \\n\" % result)\n f.write(\"Runtime: %d \\n\" % runtime)\n f.write(\"Final_split_size: %d \\n\" % final_split_size)\n f.write(\"arestas_suporte_size: %d \\n\" % arestas_suporte_size)\n \nf.write(\"#######################################################################\") \nf.close()"
] | [
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.MinMaxScaler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
lhcezx/Deteciton_3D | [
"e98b9bb0dd96dfa112e196ec93129caf1ffef39e",
"e98b9bb0dd96dfa112e196ec93129caf1ffef39e"
] | [
"sfa/data_process/transformation.py",
"sfa/data_process/kitti_dataloader.py"
] | [
"import os\nimport sys\nimport math\n\nimport numpy as np\nimport torch\n\nsrc_dir = os.path.dirname(os.path.realpath(__file__))\nwhile not src_dir.endswith(\"sfa\"):\n src_dir = os.path.dirname(src_dir)\nif src_dir not in sys.path:\n sys.path.append(src_dir)\n\nfrom config import kitti_config as cnf\n\n\ndef angle_in_limit(angle):\n # To limit the angle in -pi/2 - pi/2\n limit_degree = 5\n while angle >= np.pi / 2:\n angle -= np.pi\n while angle < -np.pi / 2:\n angle += np.pi\n if abs(angle + np.pi / 2) < limit_degree / 180 * np.pi:\n angle = np.pi / 2\n return angle\n\n# 相机坐标系转雷达坐标系\ndef camera_to_lidar(x, y, z, V2C=None, R0=None, P2=None):\n p = np.array([x, y, z, 1]) # \n if V2C is None or R0 is None:\n p = np.matmul(cnf.R0_inv, p)\n p = np.matmul(cnf.Tr_velo_to_cam_inv, p)\n else:\n # 建立坐标变化矩阵\n R0_i = np.zeros((4, 4))\n R0_i[:3, :3] = R0\n R0_i[3, 3] = 1\n p = np.matmul(np.linalg.inv(R0_i), p) # np.linalg.inv() 求逆矩阵\n p = np.matmul(inverse_rigid_trans(V2C), p)\n p = p[0:3]\n return tuple(p)\n\n# 雷达坐标系转图像坐标系\ndef lidar_to_camera(x, y, z, V2C=None, R0=None, P2=None):\n p = np.array([x, y, z, 1]) # 先将点(x,y,z)变为齐次坐标系\n if V2C is None or R0 is None:\n p = np.matmul(cnf.Tr_velo_to_cam, p) # 将坐标系从雷达坐标坐标系转为相机坐标系\n p = np.matmul(cnf.R0, p) # 将Velodyne坐标中的点x投影到编号为0的相机中点进行修正\n else:\n p = np.matmul(V2C, p)\n p = np.matmul(R0, p)\n p = p[0:3]\n return tuple(p)\n\n\ndef camera_to_lidar_point(points):\n # (N, 3) -> (N, 3)\n N = points.shape[0]\n points = np.hstack([points, np.ones((N, 1))]).T # (N,4) -> (4,N)\n\n points = np.matmul(cnf.R0_inv, points)\n points = np.matmul(cnf.Tr_velo_to_cam_inv, points).T # (4, N) -> (N, 4)\n points = points[:, 0:3]\n return points.reshape(-1, 3)\n\n# \ndef lidar_to_camera_point(points, V2C=None, R0=None):\n # (N, 3) -> (N, 3)\n N = points.shape[0]\n points = np.hstack([points, np.ones((N, 1))]).T # 在水平方向上拼接一个(N,1)的单位向量并转置\n\n if V2C is None or R0 is None:\n points = np.matmul(cnf.Tr_velo_to_cam, points)\n points = np.matmul(cnf.R0, points).T\n else:\n points = np.matmul(V2C, points)\n points = np.matmul(R0, points).T\n points = points[:, 0:3]\n return points.reshape(-1, 3)\n\n# 将相机坐标系下的x,y,z转到雷达坐标系下,同时输出对应的bbox所有信息(x, y, z, h, w, l, rz/y)\ndef camera_to_lidar_box(boxes, V2C=None, R0=None, P2=None):\n # (N, 7) -> (N, 7) x,y,z,h,w,l,r\n ret = []\n for box in boxes:\n x, y, z, h, w, l, ry = box\n # 把相机坐标系x,y,z转换为雷达坐标系x,y,z,并通过ry计算出rz\n (x, y, z), h, w, l, rz = camera_to_lidar(x, y, z, V2C=V2C, R0=R0, P2=P2), h, w, l, -ry - np.pi / 2\n # rz = angle_in_limit(rz)\n ret.append([x, y, z, h, w, l, rz])\n return np.array(ret).reshape(-1, 7)\n\n# 将雷达坐标系下的x,y,z转到相机坐标系下,同时输出对应的bbox所有信息(x, y, z, h, w, l, ry)\ndef lidar_to_camera_box(boxes, V2C=None, R0=None, P2=None):\n # (N, 7) -> (N, 7) x,y,z,h,w,l,r\n # Test模式下读取的prediction结果里面还多一个score\n ret = []\n for box in boxes:\n # x, y, z, h, w, l, rz, score = box\n x, y, z, h, w, l, rz = box\n # 把雷达坐标系下的x,y,z转换为相机坐标系x,y,z\n # (x, y, z), h, w, l, ry, score = lidar_to_camera(x, y, z, V2C=V2C, R0=R0, P2=P2), h, w, l, -rz - np.pi / 2, score\n (x, y, z), h, w, l, ry = lidar_to_camera(x, y, z, V2C=V2C, R0=R0, P2=P2), h, w, l, -rz - np.pi / 2\n # ry = angle_in_limit(ry)\n # ret.append([x, y, z, h, w, l, ry, score])\n ret.append([x, y, z, h, w, l, ry])\n # return np.array(ret).reshape(-1, 8)\n return np.array(ret).reshape(-1, 7)\n\n\ndef center_to_corner_box2d(boxes_center, coordinate='lidar'):\n # (N, 5) -> (N, 4, 2)\n N = boxes_center.shape[0]\n boxes3d_center = np.zeros((N, 7))\n boxes3d_center[:, [0, 1, 4, 5, 6]] = boxes_center\n boxes3d_corner = center_to_corner_box3d(boxes3d_center, coordinate=coordinate)\n\n return boxes3d_corner[:, 0:4, 0:2]\n\n# 将中心点坐标表示法变成八个角点坐标表示3dbbox\ndef center_to_corner_box3d(boxes_center, coordinate='lidar'):\n # (N, 7) -> (N, 8, 3)\n N = boxes_center.shape[0]\n ret = np.zeros((N, 8, 3), dtype=np.float32) # 保存每一个样本的3Dbbox的八个角点坐标\n\n if coordinate == 'camera': \n boxes_center = camera_to_lidar_box(boxes_center) # 如果是相机坐标系,则需要转变到雷达坐标系下并输出3dbbox的信息\n # 样本循环\n for i in range(N):\n box = boxes_center[i] \n translation = box[0:3] # x,y,z\n size = box[3:6] # h,w,l\n rotation = [0, 0, box[-1]] # [0, 0, rz]\n\n h, w, l = size[0], size[1], size[2]\n # 3D bbox的八个点\n trackletBox = np.array([ # in velodyne coordinates around zero point and without orientation yet\n [-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2], \\\n [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2], \\\n [0, 0, 0, 0, h, h, h, h]])\n\n # re-create 3D bounding box in velodyne coordinate system\n yaw = rotation[2] # 绕z轴的偏航角\n rotMat = np.array([\n [np.cos(yaw), -np.sin(yaw), 0.0],\n [np.sin(yaw), np.cos(yaw), 0.0],\n [0.0, 0.0, 1.0]])\n # 根据航向角调整bbox的方向rotation,然后对八个角都加上(x,y,z)中心点坐标,最终获得通过偏航角rz旋转后的3dbbox的八个点坐标\n cornerPosInVelo = np.dot(rotMat, trackletBox) + np.tile(translation, (8, 1)).T # 沿着Y轴复制8个同样的向量,沿着X轴保持不变,最后转置。\n box3d = cornerPosInVelo.transpose()\n ret[i] = box3d\n\n if coordinate == 'camera': # 如果是相机坐标系则需要从雷达坐标系变回相机坐标系\n for idx in range(len(ret)):\n ret[idx] = lidar_to_camera_point(ret[idx])\n\n return ret\n\n\nCORNER2CENTER_AVG = True\n\n# 3dbbox的八个角点表示法变成以3dbbox中心点坐标来表示\ndef corner_to_center_box3d(boxes_corner, coordinate='camera'):\n # (N, 8, 3) -> (N, 7) x,y,z,h,w,l,ry/z\n if coordinate == 'lidar': # 如果是雷达坐标系则需要先变为相机坐标系\n for idx in range(len(boxes_corner)):\n boxes_corner[idx] = lidar_to_camera_point(boxes_corner[idx]) \n\n ret = []\n for roi in boxes_corner:\n if CORNER2CENTER_AVG: # average version\n roi = np.array(roi) # roi = ()\n # 相机坐标系下y轴代表高度\n h = abs(np.sum(roi[:4, 1] - roi[4:, 1]) / 4) # 前四个角点的y轴接近0,后四个角点y轴接近h,对他们四个取平均\n # 前后相邻的两个角点的欧式距离 w = sqrt(x^2+y^2),对四条边求平均值\n # [0, 2]表示x,y坐标\n w = np.sum(\n np.sqrt(np.sum((roi[0, [0, 2]] - roi[3, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[1, [0, 2]] - roi[2, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[4, [0, 2]] - roi[7, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[5, [0, 2]] - roi[6, [0, 2]]) ** 2))\n ) / 4\n # 左右相邻的两个角点的欧式距离 l = sqrt(x^2+y^2),对四条边求平均值\n l = np.sum(\n np.sqrt(np.sum((roi[0, [0, 2]] - roi[1, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[2, [0, 2]] - roi[3, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[4, [0, 2]] - roi[5, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[6, [0, 2]] - roi[7, [0, 2]]) ** 2))\n ) / 4\n x = np.sum(roi[:, 0], axis=0) / 8 # 对八个角点的x坐标求平均值\n y = np.sum(roi[0:4, 1], axis=0) / 4 # 对四个角点的y坐标求平均值\n z = np.sum(roi[:, 2], axis=0) / 8 # 对八个角点的z坐标求平均值\n # 对航向角求平均值\n ry = np.sum(\n math.atan2(roi[2, 0] - roi[1, 0], roi[2, 2] - roi[1, 2]) +\n math.atan2(roi[6, 0] - roi[5, 0], roi[6, 2] - roi[5, 2]) +\n math.atan2(roi[3, 0] - roi[0, 0], roi[3, 2] - roi[0, 2]) +\n math.atan2(roi[7, 0] - roi[4, 0], roi[7, 2] - roi[4, 2]) +\n math.atan2(roi[0, 2] - roi[1, 2], roi[1, 0] - roi[0, 0]) +\n math.atan2(roi[4, 2] - roi[5, 2], roi[5, 0] - roi[4, 0]) +\n math.atan2(roi[3, 2] - roi[2, 2], roi[2, 0] - roi[3, 0]) +\n math.atan2(roi[7, 2] - roi[6, 2], roi[6, 0] - roi[7, 0])\n ) / 8\n if w > l:\n w, l = l, w\n ry = ry - np.pi / 2\n elif l > w:\n l, w = w, l\n ry = ry - np.pi / 2\n ret.append([x, y, z, h, w, l, ry])\n\n else: # max version\n h = max(abs(roi[:4, 1] - roi[4:, 1])) # 前四个角点的z轴接近0,后四个角点z轴接近h,对他们四个取最大\n w = np.max(\n np.sqrt(np.sum((roi[0, [0, 2]] - roi[3, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[1, [0, 2]] - roi[2, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[4, [0, 2]] - roi[7, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[5, [0, 2]] - roi[6, [0, 2]]) ** 2))\n )\n l = np.max(\n np.sqrt(np.sum((roi[0, [0, 2]] - roi[1, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[2, [0, 2]] - roi[3, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[4, [0, 2]] - roi[5, [0, 2]]) ** 2)) +\n np.sqrt(np.sum((roi[6, [0, 2]] - roi[7, [0, 2]]) ** 2))\n )\n x = np.sum(roi[:, 0], axis=0) / 8\n y = np.sum(roi[0:4, 1], axis=0) / 4\n z = np.sum(roi[:, 2], axis=0) / 8\n ry = np.sum(\n math.atan2(roi[2, 0] - roi[1, 0], roi[2, 2] - roi[1, 2]) +\n math.atan2(roi[6, 0] - roi[5, 0], roi[6, 2] - roi[5, 2]) +\n math.atan2(roi[3, 0] - roi[0, 0], roi[3, 2] - roi[0, 2]) +\n math.atan2(roi[7, 0] - roi[4, 0], roi[7, 2] - roi[4, 2]) +\n math.atan2(roi[0, 2] - roi[1, 2], roi[1, 0] - roi[0, 0]) +\n math.atan2(roi[4, 2] - roi[5, 2], roi[5, 0] - roi[4, 0]) +\n math.atan2(roi[3, 2] - roi[2, 2], roi[2, 0] - roi[3, 0]) +\n math.atan2(roi[7, 2] - roi[6, 2], roi[6, 0] - roi[7, 0])\n ) / 8\n if w > l:\n w, l = l, w\n ry = angle_in_limit(ry + np.pi / 2)\n ret.append([x, y, z, h, w, l, ry])\n\n if coordinate == 'lidar':\n ret = camera_to_lidar_box(np.array(ret))\n\n return np.array(ret)\n\n\ndef point_transform(points, tx, ty, tz, rx=0, ry=0, rz=0):\n # Input:\n # points: (N, 3)\n # rx/y/z: in radians\n # Output:\n # points: (N, 3)\n N = points.shape[0]\n points = np.hstack([points, np.ones((N, 1))])\n\n # 点云数据平移\n mat1 = np.eye(4)\n mat1[3, 0:3] = tx, ty, tz\n points = np.matmul(points, mat1)\n \n # 点云数据旋转\n # 4x4围绕x轴旋转的矩阵\n if rx != 0:\n mat = np.zeros((4, 4))\n mat[0, 0] = 1\n mat[3, 3] = 1\n mat[1, 1] = np.cos(rx)\n mat[1, 2] = -np.sin(rx)\n mat[2, 1] = np.sin(rx)\n mat[2, 2] = np.cos(rx)\n points = np.matmul(points, mat)\n\n # 4x4围绕y轴旋转的矩阵\n if ry != 0:\n mat = np.zeros((4, 4))\n mat[1, 1] = 1\n mat[3, 3] = 1\n mat[0, 0] = np.cos(ry)\n mat[0, 2] = np.sin(ry)\n mat[2, 0] = -np.sin(ry)\n mat[2, 2] = np.cos(ry)\n points = np.matmul(points, mat)\n\n # 4x4围绕z轴旋转的矩阵\n if rz != 0:\n mat = np.zeros((4, 4))\n mat[2, 2] = 1\n mat[3, 3] = 1\n mat[0, 0] = np.cos(rz)\n mat[0, 1] = -np.sin(rz)\n mat[1, 0] = np.sin(rz)\n mat[1, 1] = np.cos(rz)\n points = np.matmul(points, mat)\n\n return points[:, 0:3]\n\n\n# 返回旋转过后的label标签,如果雷达坐标系下则返回雷达label,反之camera_label\ndef box_transform(boxes, tx, ty, tz, r=0, coordinate='lidar'):\n # Input:\n # boxes: (N, 7) x y z h w l rz/y\n # Output:\n # boxes: (N, 7) x y z h w l rz/y\n # 将每个样本的label中心点坐标根据长宽高变为其3dbbox八个角点的坐标(这个过程需要在雷达坐标系下进行),如果input_label是雷达坐标系则返回雷达坐标,如果是camera坐标系则需要把雷达坐标变回camera坐标\n boxes_corner = center_to_corner_box3d(boxes, coordinate=coordinate) # (N, 8, 3) \n for idx in range(len(boxes_corner)):\n if coordinate == 'lidar':\n boxes_corner[idx] = point_transform(boxes_corner[idx], tx, ty, tz, rz=r) # 如果是lidar坐标系的话偏向角是沿z轴旋转\n else:\n boxes_corner[idx] = point_transform(boxes_corner[idx], tx, ty, tz, ry=r) # 如果是camera坐标系的话偏向角是沿y轴旋转\n\n return corner_to_center_box3d(boxes_corner, coordinate=coordinate) \n\n# 刚体的坐标变换\ndef inverse_rigid_trans(Tr):\n ''' Inverse a rigid body transform matrix (3x4 as [R|t])\n [R'|-R't; 0|1]\n '''\n inv_Tr = np.zeros_like(Tr) # 3x4\n inv_Tr[0:3, 0:3] = np.transpose(Tr[0:3, 0:3])\n inv_Tr[0:3, 3] = np.dot(-np.transpose(Tr[0:3, 0:3]), Tr[0:3, 3])\n return inv_Tr\n\n# 选择多个方法结合进行数据增强\nclass Compose(object):\n def __init__(self, transforms, p=1.0):\n self.transforms = transforms\n self.p = p\n\n def __call__(self, lidar, labels):\n if np.random.random() <= self.p:\n for t in self.transforms:\n lidar, labels = t(lidar, labels)\n return lidar, labels\n\n# 选择一个方法进行数据增强\nclass OneOf(object):\n def __init__(self, transforms, p=1.0):\n self.transforms = transforms\n self.p = p\n\n def __call__(self, lidar, labels):\n if np.random.random() <= self.p:\n choice = np.random.randint(low=0, high=len(self.transforms))\n lidar, labels = self.transforms[choice](lidar, labels)\n\n return lidar, labels\n\n\nclass Random_Rotation(object):\n def __init__(self, limit_angle=np.pi / 4, p=0.5):\n self.limit_angle = limit_angle\n self.p = p\n\n def __call__(self, lidar, labels):\n \"\"\"\n :param labels: # (N', 7) x, y, z, h, w, l, r\n :return:\n \"\"\"\n if np.random.random() <= self.p:\n # 随机取一个角度在-limit_angle到limit_angle之间\n angle = np.random.uniform(-self.limit_angle, self.limit_angle)\n # 点云数据绕Z轴旋转\n lidar[:, 0:3] = point_transform(lidar[:, 0:3], 0, 0, 0, rz=angle)\n # 把数据对应的label也旋转\n labels = box_transform(labels, 0, 0, 0, r=angle, coordinate='lidar')\n\n return lidar, labels\n\n\nclass Random_Scaling(object):\n def __init__(self, scaling_range=(0.95, 1.05), p=0.5):\n self.scaling_range = scaling_range\n self.p = p\n\n def __call__(self, lidar, labels):\n \"\"\"\n :param labels: # (N', 7) x, y, z, h, w, l, r\n :return:\n \"\"\"\n if np.random.random() <= self.p:\n # 数据缩放因子\n factor = np.random.uniform(self.scaling_range[0], self.scaling_range[0])\n # lidar和label数据缩放\n lidar[:, 0:3] = lidar[:, 0:3] * factor\n labels[:, 0:6] = labels[:, 0:6] * factor\n\n return lidar, labels\n\n\nclass Cutout(object):\n \"\"\"Randomly mask out one or more patches from an image.\n Args:\n n_holes (int): Number of patches to cut out of each image.\n length (int): The length (in pixels) of each square patch.\n Refer from: https://github.com/uoguelph-mlrg/Cutout/blob/master/util/cutout.py\n \"\"\"\n\n def __init__(self, n_holes, ratio, fill_value=0., p=1.0):\n self.n_holes = n_holes\n self.ratio = ratio\n assert 0. <= fill_value <= 1., \"the fill value is in a range of 0 to 1\"\n self.fill_value = fill_value\n self.p = p\n\n def __call__(self, img, targets):\n \"\"\"\n Args:\n img (Tensor): Tensor image of size (C, H, W).\n Returns:\n Tensor: Image with n_holes of dimension length x length cut out of it.\n \"\"\"\n if np.random.random() <= self.p:\n h = img.size(1)\n w = img.size(2)\n\n h_cutout = int(self.ratio * h)\n w_cutout = int(self.ratio * w)\n\n for n in range(self.n_holes):\n y = np.random.randint(h)\n x = np.random.randint(w)\n\n y1 = np.clip(y - h_cutout // 2, 0, h)\n y2 = np.clip(y + h_cutout // 2, 0, h)\n x1 = np.clip(x - w_cutout // 2, 0, w)\n x2 = np.clip(x + w_cutout // 2, 0, w)\n\n img[:, y1: y2, x1: x2] = self.fill_value # Zero out the selected area\n # Remove targets that are in the selected area\n keep_target = []\n for target_idx, target in enumerate(targets):\n _, _, target_x, target_y, target_w, target_l, _, _ = target\n if (x1 <= target_x * w <= x2) and (y1 <= target_y * h <= y2):\n continue\n keep_target.append(target_idx)\n targets = targets[keep_target]\n\n return img, targets\n",
"import os\nimport sys\n\nimport torch\nfrom torch.utils.data import DataLoader\nimport numpy as np\n\nsrc_dir = os.path.dirname(os.path.realpath(__file__))\nwhile not src_dir.endswith(\"sfa\"):\n src_dir = os.path.dirname(src_dir)\nif src_dir not in sys.path:\n sys.path.append(src_dir)\n\nfrom data_process.kitti_dataset import KittiDataset\nfrom data_process.transformation import OneOf, Random_Rotation, Random_Scaling\n\n\ndef create_train_dataloader(configs):\n \"\"\"Create dataloader for training\"\"\"\n # 训练集的雷达数据增强\n # 选择其中之一的方式进行数据增强\n train_lidar_aug = OneOf([\n Random_Rotation(limit_angle=np.pi / 4, p=1.0),\n Random_Scaling(scaling_range=(0.95, 1.05), p=1.0),\n ], p=0.66)\n train_dataset = KittiDataset(configs, mode='train', lidar_aug=train_lidar_aug, hflip_prob=configs.hflip_prob,\n num_samples=configs.num_samples)\n train_sampler = None\n if configs.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, batch_size=configs.batch_size, shuffle=(train_sampler is None),\n pin_memory=configs.pin_memory, num_workers=configs.num_workers, sampler=train_sampler)\n\n return train_dataloader, train_sampler\n\n\ndef create_val_dataloader(configs):\n \"\"\"Create dataloader for validation\"\"\"\n val_sampler = None\n val_dataset = KittiDataset(configs, mode='trainval', lidar_aug=None, hflip_prob=0., num_samples=configs.num_samples)\n if configs.distributed:\n val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False)\n val_dataloader = DataLoader(val_dataset, batch_size=configs.batch_size, shuffle=False,\n pin_memory=configs.pin_memory, num_workers=configs.num_workers, sampler=val_sampler)\n\n return val_dataloader\n\n\ndef create_test_dataloader(configs):\n \"\"\"Create dataloader for testing phase\"\"\"\n\n test_dataset = KittiDataset(configs, mode='test', lidar_aug=None, hflip_prob=0., num_samples=configs.num_samples)\n test_sampler = None\n if configs.distributed:\n test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)\n test_dataloader = DataLoader(test_dataset, batch_size=configs.batch_size, shuffle=False,\n pin_memory=configs.pin_memory, num_workers=configs.num_workers, sampler=test_sampler)\n\n return test_dataloader\n"
] | [
[
"numpy.dot",
"numpy.random.random",
"numpy.clip",
"numpy.linalg.inv",
"numpy.eye",
"numpy.matmul",
"numpy.cos",
"numpy.tile",
"numpy.sin",
"numpy.ones",
"numpy.zeros_like",
"numpy.transpose",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.random.randint"
],
[
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
art-vish/neuro-comma | [
"148ff7150e92d734d926a576c50bcabf1ae0ec0a"
] | [
"src/neuro_comma/dataset.py"
] | [
"from typing import Dict, List, Optional, Tuple, Union\nfrom typing_extensions import TypedDict\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\nfrom tqdm import tqdm\nfrom transformers import PreTrainedTokenizer\n\nfrom neuro_comma.augmentation import AUGMENTATIONS\nfrom neuro_comma.pretrained import TOKEN_IDX\n\n\nclass BaseDataset(torch.utils.data.Dataset):\n def __init__(self,\n files: Union[str, List[str]],\n tokenizer: PreTrainedTokenizer,\n targets: Dict[str, int],\n sequence_len: int,\n token_style: str,\n *args,\n **kwargs) -> None:\n\n self.tokenizer = tokenizer\n self.targets = targets\n self.seq_len = sequence_len\n self.token_style = token_style\n\n if isinstance(files, list):\n self.data = []\n for file in files:\n self.data += self._parse_data(file, *args, **kwargs)\n else:\n self.data = self._parse_data(files, *args, **kwargs)\n\n def _parse_data(self, file_path: str, *args, **kwargs) -> List[List[List[int]]]:\n \"\"\"Parse file to train data\n\n Args:\n file_path (`str`): text file path that contains tokens and punctuations separated by tab in lines\n Returns:\n list[Batch]: each having sequence_len punctuation_mask is used to ignore special indices like padding and intermediate sub-word token during evaluation\n \"\"\"\n with open(file_path, 'r', encoding='utf-8') as file:\n x, y = [], []\n for i, line in enumerate(file):\n if (line.strip()):\n line = line.strip()\n token = line.rsplit('\\t', 1)\n if len(token) == 2:\n x.append(token[0])\n target = self.targets[token[1]]\n y.append(target)\n else:\n continue\n\n data = self.parse_tokens(x, self.tokenizer, self.seq_len, self.token_style, y, *args, **kwargs)\n return data\n\n @classmethod\n def parse_tokens(cls,\n tokens: Union[List[str], Tuple[str]],\n tokenizer: PreTrainedTokenizer,\n seq_len: int,\n token_style: str,\n targets: Optional[List[int]] = None,\n *args,\n **kwargs) -> List[List[List[int]]]:\n \"\"\"\n Convert tokenized data for model prediction\n\n Args:\n tokens (`Union[list[str], tuple[str]]`): splited tokens\n tokenizer (`PreTrainedTokenizer`): tokenizer which split tokens to subtokens\n seq_len (`int`): sequence length\n token_style (`str`): token_style from pretrained.TOKEN_IDX\n\n Returns:\n (`list[BatchWithoutTarget]`): list of bathces\n\n ```txt\n tokens : [token token ##token PAD ]\n x : [321 1233 23121 101 ]\n y : [tar 0 tar 0 ]\n y_mask : [1 0 1 0 ]\n attn_mask : [1 1 1 0 ]\n ```\n\n \"\"\"\n data_items = []\n # loop until end of the entire text\n idx = 0\n\n debug = kwargs.get('debug')\n if debug:\n pbar = tqdm(total=len(tokens))\n\n while idx < len(tokens):\n x = [TOKEN_IDX[token_style]['START_SEQ']]\n w_id = [-1] # word indexes\n y = [0]\n y_mask = [1] if targets else [0]\n\n # loop until we have required sequence length\n # -1 because we will have a special end of sequence token at the end\n while len(x) < seq_len - 1 and idx < len(tokens):\n word_pieces = tokenizer.tokenize(tokens[idx])\n\n # if taking these tokens exceeds sequence length we finish\n # current sequence with padding\n # then start next sequence from this token\n if len(word_pieces) + len(x) >= seq_len:\n break\n for i in range(len(word_pieces) - 1):\n x.append(tokenizer.convert_tokens_to_ids(word_pieces[i]))\n w_id.append(idx)\n y.append(0)\n y_mask.append(0)\n if len(word_pieces) > 0:\n x.append(tokenizer.convert_tokens_to_ids(word_pieces[-1]))\n else:\n x.append(TOKEN_IDX[token_style]['UNK'])\n\n w_id.append(idx)\n\n if targets:\n y.append(targets[idx])\n else:\n y.append(0)\n\n y_mask.append(1)\n\n idx += 1\n if debug:\n pbar.update(1)\n\n x.append(TOKEN_IDX[token_style]['END_SEQ'])\n w_id.append(-1)\n y.append(0)\n if targets:\n y_mask.append(1)\n else:\n y_mask.append(0)\n\n # Fill with pad tokens\n if len(x) < seq_len:\n x = x + [TOKEN_IDX[token_style]['PAD'] for _ in range(seq_len - len(x))]\n w_id = w_id + [-100 for _ in range(seq_len - len(w_id))]\n y = y + [0 for _ in range(seq_len - len(y))]\n y_mask = y_mask + [0 for _ in range(seq_len - len(y_mask))]\n\n attn_mask = [1 if token != TOKEN_IDX[token_style]['PAD'] else 0 for token in x]\n\n data_items.append([x, w_id, attn_mask, y, y_mask])\n\n if debug:\n pbar.close()\n\n return data_items\n\n def __len__(self) -> int:\n return len(self.data)\n\n def __getitem__(self, index: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]:\n x = self.data[index][0]\n attn_mask = self.data[index][2]\n y = self.data[index][3]\n y_mask = self.data[index][4]\n\n x = torch.tensor(x) # type: ignore\n attn_mask = torch.tensor(attn_mask) # type: ignore\n y = torch.tensor(y) # type: ignore\n y_mask = torch.tensor(y_mask) # type: ignore\n\n return x, y, attn_mask, y_mask # type: ignore\n\n\nclass RepunctDataset(BaseDataset):\n def __init__(self,\n files: Union[str, List[str]],\n tokenizer: PreTrainedTokenizer,\n targets: Dict[str, int],\n sequence_len: int,\n token_style: str,\n is_train=False,\n augment_rate=0.,\n augment_type='substitute',\n *args,\n **kwargs) -> None:\n \"\"\"Preprocess data for restore punctuation\n\n Args:\n files (`Union[str, list[str]]`): single file or list of text files containing tokens and punctuations separated by tab in lines\n tokenizer (`PreTrainedTokenizer`): tokenizer that will be used to further tokenize word for BERT like models\n targets (`dict[str, int]`): dict with targets\n sequence_len (`int`): length of each sequence\n token_style (`str`): For getting index of special tokens in pretrained.TOKEN_IDX\n is_train (`bool, optional`): if false do not apply augmentation. Defaults to False.\n augment_rate (`float, optional`): percent of data which should be augmented. Defaults to 0.0.\n augment_type (`str, optional`): augmentation type. Defaults to 'substitute'.\n \"\"\"\n super().__init__(files, tokenizer, targets, sequence_len, token_style, *args, **kwargs)\n\n self.is_train = is_train\n self.augment_type = augment_type\n self.augment_rate = augment_rate\n\n def _augment(self, x, y, y_mask):\n x_aug = []\n y_aug = []\n y_mask_aug = []\n for i in range(len(x)):\n r = np.random.rand()\n if r < self.augment_rate:\n AUGMENTATIONS[self.augment_type](x, y, y_mask, x_aug, y_aug, y_mask_aug, i, self.token_style)\n else:\n x_aug.append(x[i])\n y_aug.append(y[i])\n y_mask_aug.append(y_mask[i])\n\n if len(x_aug) > self.seq_len:\n # len increased due to insert\n x_aug = x_aug[:self.seq_len]\n y_aug = y_aug[:self.seq_len]\n y_mask_aug = y_mask_aug[:self.seq_len]\n elif len(x_aug) < self.seq_len:\n # len decreased due to delete\n x_aug = x_aug + [TOKEN_IDX[self.token_style]['PAD'] for _ in range(self.seq_len - len(x_aug))]\n y_aug = y_aug + [0 for _ in range(self.seq_len - len(y_aug))]\n y_mask_aug = y_mask_aug + [0 for _ in range(self.seq_len - len(y_mask_aug))]\n\n attn_mask = [1 if token != TOKEN_IDX[self.token_style]['PAD'] else 0 for token in x]\n return x_aug, y_aug, attn_mask, y_mask_aug\n\n def __getitem__(self, index: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]:\n x = self.data[index][0]\n attn_mask = self.data[index][2]\n y = self.data[index][3]\n y_mask = self.data[index][4]\n\n if self.is_train and self.augment_rate > 0:\n x, y, attn_mask, y_mask = self._augment(x, y, y_mask)\n\n x = torch.tensor(x) # type: ignore\n attn_mask = torch.tensor(attn_mask) # type: ignore\n y = torch.tensor(y) # type: ignore\n y_mask = torch.tensor(y_mask) # type: ignore\n\n return x, y, attn_mask, y_mask # type: ignore\n"
] | [
[
"numpy.random.rand",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ajayiagbebaku/NFL-Model | [
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed",
"afcc67a85ca7138c58c3334d45988ada2da158ed"
] | [
"venv/Lib/site-packages/streamlit/caching/hashing.py",
"venv/Lib/site-packages/pandas/tests/test_optional_dependency.py",
"venv/Lib/site-packages/pandas/tests/indexing/test_coercion.py",
"venv/Lib/site-packages/pandas/core/indexes/period.py",
"venv/Lib/site-packages/pandas/tests/groupby/aggregate/test_other.py",
"venv/Lib/site-packages/pandas/tests/indexes/multi/test_names.py",
"venv/Lib/site-packages/pandas/tests/extension/base/setitem.py",
"venv/Lib/site-packages/pandas/tests/indexes/datetimes/test_date_range.py",
"venv/Lib/site-packages/numpy/typing/tests/data/reveal/ndarray_conversion.py",
"venv/Lib/site-packages/pandas/core/indexes/datetimes.py",
"venv/Lib/site-packages/pandas/tests/io/parser/test_converters.py",
"venv/Lib/site-packages/pandas/tests/series/methods/test_rank.py",
"venv/Lib/site-packages/pandas/tests/libs/test_hashtable.py",
"venv/Lib/site-packages/pandas/tests/scalar/test_nat.py",
"venv/Lib/site-packages/pandas/core/arrays/datetimelike.py",
"venv/Lib/site-packages/pandas/tests/tseries/offsets/test_offsets.py",
"venv/Lib/site-packages/pandas/tests/arrays/masked/test_function.py",
"venv/Lib/site-packages/pandas/tests/io/pytables/test_read.py",
"venv/Lib/site-packages/pandas/tests/arrays/sparse/test_array.py",
"venv/Lib/site-packages/altair/utils/tests/test_utils.py"
] | [
"# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Hashing for st.memo and st.singleton.\"\"\"\nimport collections\nimport functools\nimport hashlib\nimport inspect\nimport io\nimport os\nimport pickle\nimport sys\nimport tempfile\nimport threading\nimport unittest.mock\nimport weakref\nfrom typing import Any, Pattern, Optional, Dict, List\n\nfrom streamlit import type_util\nfrom streamlit import util\nfrom streamlit.logger import get_logger\nfrom streamlit.uploaded_file_manager import UploadedFile\nfrom .cache_errors import (\n CacheType,\n UnhashableTypeError,\n)\n\n_LOGGER = get_logger(__name__)\n\n\n# If a dataframe has more than this many rows, we consider it large and hash a sample.\n_PANDAS_ROWS_LARGE = 100000\n_PANDAS_SAMPLE_SIZE = 10000\n\n\n# Similar to dataframes, we also sample large numpy arrays.\n_NP_SIZE_LARGE = 1000000\n_NP_SAMPLE_SIZE = 100000\n\n\n# Arbitrary item to denote where we found a cycle in a hashed object.\n# This allows us to hash self-referencing lists, dictionaries, etc.\n_CYCLE_PLACEHOLDER = b\"streamlit-57R34ML17-hesamagicalponyflyingthroughthesky-CYCLE\"\n\n\ndef update_hash(val: Any, hasher, cache_type: CacheType) -> None:\n \"\"\"Updates a hashlib hasher with the hash of val.\n\n This is the main entrypoint to hashing.py.\n \"\"\"\n ch = _CacheFuncHasher(cache_type)\n ch.update(hasher, val)\n\n\nclass _HashStack:\n \"\"\"Stack of what has been hashed, for debug and circular reference detection.\n\n This internally keeps 1 stack per thread.\n\n Internally, this stores the ID of pushed objects rather than the objects\n themselves because otherwise the \"in\" operator inside __contains__ would\n fail for objects that don't return a boolean for \"==\" operator. For\n example, arr == 10 where arr is a NumPy array returns another NumPy array.\n This causes the \"in\" to crash since it expects a boolean.\n \"\"\"\n\n def __init__(self):\n self._stack: collections.OrderedDict[int, List[Any]] = collections.OrderedDict()\n\n def __repr__(self) -> str:\n return util.repr_(self)\n\n def push(self, val: Any):\n self._stack[id(val)] = val\n\n def pop(self):\n self._stack.popitem()\n\n def __contains__(self, val: Any):\n return id(val) in self._stack\n\n\nclass _HashStacks:\n \"\"\"Stacks of what has been hashed, with at most 1 stack per thread.\"\"\"\n\n def __init__(self):\n self._stacks: weakref.WeakKeyDictionary[\n threading.Thread, _HashStack\n ] = weakref.WeakKeyDictionary()\n\n def __repr__(self) -> str:\n return util.repr_(self)\n\n @property\n def current(self) -> _HashStack:\n current_thread = threading.current_thread()\n\n stack = self._stacks.get(current_thread, None)\n\n if stack is None:\n stack = _HashStack()\n self._stacks[current_thread] = stack\n\n return stack\n\n\nhash_stacks = _HashStacks()\n\n\ndef _int_to_bytes(i: int) -> bytes:\n num_bytes = (i.bit_length() + 8) // 8\n return i.to_bytes(num_bytes, \"little\", signed=True)\n\n\ndef _key(obj: Optional[Any]) -> Any:\n \"\"\"Return key for memoization.\"\"\"\n\n if obj is None:\n return None\n\n def is_simple(obj):\n return (\n isinstance(obj, bytes)\n or isinstance(obj, bytearray)\n or isinstance(obj, str)\n or isinstance(obj, float)\n or isinstance(obj, int)\n or isinstance(obj, bool)\n or obj is None\n )\n\n if is_simple(obj):\n return obj\n\n if isinstance(obj, tuple):\n if all(map(is_simple, obj)):\n return obj\n\n if isinstance(obj, list):\n if all(map(is_simple, obj)):\n return (\"__l\", tuple(obj))\n\n if (\n type_util.is_type(obj, \"pandas.core.frame.DataFrame\")\n or type_util.is_type(obj, \"numpy.ndarray\")\n or inspect.isbuiltin(obj)\n or inspect.isroutine(obj)\n or inspect.iscode(obj)\n ):\n return id(obj)\n\n return NoResult\n\n\nclass _CacheFuncHasher:\n \"\"\"A hasher that can hash objects with cycles.\"\"\"\n\n def __init__(self, cache_type: CacheType):\n self._hashes: Dict[Any, bytes] = {}\n\n # The number of the bytes in the hash.\n self.size = 0\n\n self.cache_type = cache_type\n\n def __repr__(self) -> str:\n return util.repr_(self)\n\n def to_bytes(self, obj: Any) -> bytes:\n \"\"\"Add memoization to _to_bytes and protect against cycles in data structures.\"\"\"\n tname = type(obj).__qualname__.encode()\n key = (tname, _key(obj))\n\n # Memoize if possible.\n if key[1] is not NoResult:\n if key in self._hashes:\n return self._hashes[key]\n\n # Break recursive cycles.\n if obj in hash_stacks.current:\n return _CYCLE_PLACEHOLDER\n\n hash_stacks.current.push(obj)\n\n try:\n # Hash the input\n b = b\"%s:%s\" % (tname, self._to_bytes(obj))\n\n # Hmmm... It's possible that the size calculation is wrong. When we\n # call to_bytes inside _to_bytes things get double-counted.\n self.size += sys.getsizeof(b)\n\n if key[1] is not NoResult:\n self._hashes[key] = b\n\n finally:\n # In case an UnhashableTypeError (or other) error is thrown, clean up the\n # stack so we don't get false positives in future hashing calls\n hash_stacks.current.pop()\n\n return b\n\n def update(self, hasher, obj: Any) -> None:\n \"\"\"Update the provided hasher with the hash of an object.\"\"\"\n b = self.to_bytes(obj)\n hasher.update(b)\n\n def _to_bytes(self, obj: Any) -> bytes:\n \"\"\"Hash objects to bytes, including code with dependencies.\n\n Python's built in `hash` does not produce consistent results across\n runs.\n \"\"\"\n\n if isinstance(obj, unittest.mock.Mock):\n # Mock objects can appear to be infinitely\n # deep, so we don't try to hash them at all.\n return self.to_bytes(id(obj))\n\n elif isinstance(obj, bytes) or isinstance(obj, bytearray):\n return obj\n\n elif isinstance(obj, str):\n return obj.encode()\n\n elif isinstance(obj, float):\n return self.to_bytes(hash(obj))\n\n elif isinstance(obj, int):\n return _int_to_bytes(obj)\n\n elif isinstance(obj, (list, tuple)):\n h = hashlib.new(\"md5\")\n for item in obj:\n self.update(h, item)\n return h.digest()\n\n elif isinstance(obj, dict):\n h = hashlib.new(\"md5\")\n for item in obj.items():\n self.update(h, item)\n return h.digest()\n\n elif obj is None:\n return b\"0\"\n\n elif obj is True:\n return b\"1\"\n\n elif obj is False:\n return b\"0\"\n\n elif type_util.is_type(obj, \"pandas.core.frame.DataFrame\") or type_util.is_type(\n obj, \"pandas.core.series.Series\"\n ):\n import pandas as pd\n\n if len(obj) >= _PANDAS_ROWS_LARGE:\n obj = obj.sample(n=_PANDAS_SAMPLE_SIZE, random_state=0)\n try:\n return b\"%s\" % pd.util.hash_pandas_object(obj).sum()\n except TypeError:\n # Use pickle if pandas cannot hash the object for example if\n # it contains unhashable objects.\n return b\"%s\" % pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)\n\n elif type_util.is_type(obj, \"numpy.ndarray\"):\n h = hashlib.new(\"md5\")\n self.update(h, obj.shape)\n\n if obj.size >= _NP_SIZE_LARGE:\n import numpy as np\n\n state = np.random.RandomState(0)\n obj = state.choice(obj.flat, size=_NP_SAMPLE_SIZE)\n\n self.update(h, obj.tobytes())\n return h.digest()\n\n elif inspect.isbuiltin(obj):\n return bytes(obj.__name__.encode())\n\n elif type_util.is_type(obj, \"builtins.mappingproxy\") or type_util.is_type(\n obj, \"builtins.dict_items\"\n ):\n return self.to_bytes(dict(obj))\n\n elif type_util.is_type(obj, \"builtins.getset_descriptor\"):\n return bytes(obj.__qualname__.encode())\n\n elif isinstance(obj, UploadedFile):\n # UploadedFile is a BytesIO (thus IOBase) but has a name.\n # It does not have a timestamp so this must come before\n # temproary files\n h = hashlib.new(\"md5\")\n self.update(h, obj.name)\n self.update(h, obj.tell())\n self.update(h, obj.getvalue())\n return h.digest()\n\n elif hasattr(obj, \"name\") and (\n isinstance(obj, io.IOBase)\n # Handle temporary files used during testing\n or isinstance(obj, tempfile._TemporaryFileWrapper)\n ):\n # Hash files as name + last modification date + offset.\n # NB: we're using hasattr(\"name\") to differentiate between\n # on-disk and in-memory StringIO/BytesIO file representations.\n # That means that this condition must come *before* the next\n # condition, which just checks for StringIO/BytesIO.\n h = hashlib.new(\"md5\")\n obj_name = getattr(obj, \"name\", \"wonthappen\") # Just to appease MyPy.\n self.update(h, obj_name)\n self.update(h, os.path.getmtime(obj_name))\n self.update(h, obj.tell())\n return h.digest()\n\n elif isinstance(obj, Pattern):\n return self.to_bytes([obj.pattern, obj.flags])\n\n elif isinstance(obj, io.StringIO) or isinstance(obj, io.BytesIO):\n # Hash in-memory StringIO/BytesIO by their full contents\n # and seek position.\n h = hashlib.new(\"md5\")\n self.update(h, obj.tell())\n self.update(h, obj.getvalue())\n return h.digest()\n\n elif type_util.is_type(obj, \"numpy.ufunc\"):\n # For numpy.remainder, this returns remainder.\n return bytes(obj.__name__.encode())\n\n elif inspect.ismodule(obj):\n # TODO: Figure out how to best show this kind of warning to the\n # user. In the meantime, show nothing. This scenario is too common,\n # so the current warning is quite annoying...\n # st.warning(('Streamlit does not support hashing modules. '\n # 'We did not hash `%s`.') % obj.__name__)\n # TODO: Hash more than just the name for internal modules.\n return self.to_bytes(obj.__name__)\n\n elif inspect.isclass(obj):\n # TODO: Figure out how to best show this kind of warning to the\n # user. In the meantime, show nothing. This scenario is too common,\n # (e.g. in every \"except\" statement) so the current warning is\n # quite annoying...\n # st.warning(('Streamlit does not support hashing classes. '\n # 'We did not hash `%s`.') % obj.__name__)\n # TODO: Hash more than just the name of classes.\n return self.to_bytes(obj.__name__)\n\n elif isinstance(obj, functools.partial):\n # The return value of functools.partial is not a plain function:\n # it's a callable object that remembers the original function plus\n # the values you pickled into it. So here we need to special-case it.\n h = hashlib.new(\"md5\")\n self.update(h, obj.args)\n self.update(h, obj.func)\n self.update(h, obj.keywords)\n return h.digest()\n\n else:\n # As a last resort, hash the output of the object's __reduce__ method\n h = hashlib.new(\"md5\")\n try:\n reduce_data = obj.__reduce__()\n except BaseException as e:\n raise UnhashableTypeError() from e\n\n for item in reduce_data:\n self.update(h, item)\n return h.digest()\n\n\nclass NoResult:\n \"\"\"Placeholder class for return values when None is meaningful.\"\"\"\n\n pass\n",
"import sys\nimport types\n\nimport pytest\n\nfrom pandas.compat._optional import (\n VERSIONS,\n import_optional_dependency,\n)\n\nimport pandas._testing as tm\n\n\ndef test_import_optional():\n match = \"Missing .*notapackage.* pip .* conda .* notapackage\"\n with pytest.raises(ImportError, match=match):\n import_optional_dependency(\"notapackage\")\n\n result = import_optional_dependency(\"notapackage\", errors=\"ignore\")\n assert result is None\n\n\ndef test_xlrd_version_fallback():\n pytest.importorskip(\"xlrd\")\n import_optional_dependency(\"xlrd\")\n\n\ndef test_bad_version(monkeypatch):\n name = \"fakemodule\"\n module = types.ModuleType(name)\n module.__version__ = \"0.9.0\"\n sys.modules[name] = module\n monkeypatch.setitem(VERSIONS, name, \"1.0.0\")\n\n match = \"Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'\"\n with pytest.raises(ImportError, match=match):\n import_optional_dependency(\"fakemodule\")\n\n # Test min_version parameter\n result = import_optional_dependency(\"fakemodule\", min_version=\"0.8\")\n assert result is module\n\n with tm.assert_produces_warning(UserWarning):\n result = import_optional_dependency(\"fakemodule\", errors=\"warn\")\n assert result is None\n\n module.__version__ = \"1.0.0\" # exact match is OK\n result = import_optional_dependency(\"fakemodule\")\n assert result is module\n\n\ndef test_submodule(monkeypatch):\n # Create a fake module with a submodule\n name = \"fakemodule\"\n module = types.ModuleType(name)\n module.__version__ = \"0.9.0\"\n sys.modules[name] = module\n sub_name = \"submodule\"\n submodule = types.ModuleType(sub_name)\n setattr(module, sub_name, submodule)\n sys.modules[f\"{name}.{sub_name}\"] = submodule\n monkeypatch.setitem(VERSIONS, name, \"1.0.0\")\n\n match = \"Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'\"\n with pytest.raises(ImportError, match=match):\n import_optional_dependency(\"fakemodule.submodule\")\n\n with tm.assert_produces_warning(UserWarning):\n result = import_optional_dependency(\"fakemodule.submodule\", errors=\"warn\")\n assert result is None\n\n module.__version__ = \"1.0.0\" # exact match is OK\n result = import_optional_dependency(\"fakemodule.submodule\")\n assert result is submodule\n\n\ndef test_no_version_raises(monkeypatch):\n name = \"fakemodule\"\n module = types.ModuleType(name)\n sys.modules[name] = module\n monkeypatch.setitem(VERSIONS, name, \"1.0.0\")\n\n with pytest.raises(ImportError, match=\"Can't determine .* fakemodule\"):\n import_optional_dependency(name)\n",
"from __future__ import annotations\n\nfrom datetime import timedelta\nimport itertools\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import (\n IS64,\n is_platform_windows,\n)\n\nimport pandas as pd\nimport pandas._testing as tm\n\n###############################################################\n# Index / Series common tests which may trigger dtype coercions\n###############################################################\n\n\[email protected](autouse=True, scope=\"class\")\ndef check_comprehensiveness(request):\n # Iterate over combination of dtype, method and klass\n # and ensure that each are contained within a collected test\n cls = request.cls\n combos = itertools.product(cls.klasses, cls.dtypes, [cls.method])\n\n def has_test(combo):\n klass, dtype, method = combo\n cls_funcs = request.node.session.items\n return any(\n klass in x.name and dtype in x.name and method in x.name for x in cls_funcs\n )\n\n opts = request.config.option\n if opts.lf or opts.keyword:\n # If we are running with \"last-failed\" or -k foo, we expect to only\n # run a subset of tests.\n yield\n\n else:\n\n for combo in combos:\n if not has_test(combo):\n raise AssertionError(\n f\"test method is not defined: {cls.__name__}, {combo}\"\n )\n\n yield\n\n\nclass CoercionBase:\n\n klasses = [\"index\", \"series\"]\n dtypes = [\n \"object\",\n \"int64\",\n \"float64\",\n \"complex128\",\n \"bool\",\n \"datetime64\",\n \"datetime64tz\",\n \"timedelta64\",\n \"period\",\n ]\n\n @property\n def method(self):\n raise NotImplementedError(self)\n\n\nclass TestSetitemCoercion(CoercionBase):\n\n method = \"setitem\"\n\n def _assert_setitem_series_conversion(\n self, original_series, loc_value, expected_series, expected_dtype\n ):\n \"\"\"test series value's coercion triggered by assignment\"\"\"\n temp = original_series.copy()\n temp[1] = loc_value\n tm.assert_series_equal(temp, expected_series)\n # check dtype explicitly for sure\n assert temp.dtype == expected_dtype\n\n # FIXME: dont leave commented-out\n # .loc works different rule, temporary disable\n # temp = original_series.copy()\n # temp.loc[1] = loc_value\n # tm.assert_series_equal(temp, expected_series)\n\n @pytest.mark.parametrize(\n \"val,exp_dtype\", [(1, object), (1.1, object), (1 + 1j, object), (True, object)]\n )\n def test_setitem_series_object(self, val, exp_dtype):\n obj = pd.Series(list(\"abcd\"))\n assert obj.dtype == object\n\n exp = pd.Series([\"a\", val, \"c\", \"d\"])\n self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)\n\n @pytest.mark.parametrize(\n \"val,exp_dtype\",\n [(1, np.int64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)],\n )\n def test_setitem_series_int64(self, val, exp_dtype, request):\n obj = pd.Series([1, 2, 3, 4])\n assert obj.dtype == np.int64\n\n if exp_dtype is np.float64:\n exp = pd.Series([1, 1, 3, 4])\n self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)\n mark = pytest.mark.xfail(reason=\"GH12747 The result must be float\")\n request.node.add_marker(mark)\n\n exp = pd.Series([1, val, 3, 4])\n self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)\n\n @pytest.mark.parametrize(\n \"val,exp_dtype\", [(np.int32(1), np.int8), (np.int16(2 ** 9), np.int16)]\n )\n def test_setitem_series_int8(self, val, exp_dtype, request):\n obj = pd.Series([1, 2, 3, 4], dtype=np.int8)\n assert obj.dtype == np.int8\n\n if exp_dtype is np.int16:\n exp = pd.Series([1, 0, 3, 4], dtype=np.int8)\n self._assert_setitem_series_conversion(obj, val, exp, np.int8)\n mark = pytest.mark.xfail(\n reason=\"BUG: it must be pd.Series([1, 1, 3, 4], dtype=np.int16\"\n )\n request.node.add_marker(mark)\n\n warn = None if exp_dtype is np.int8 else FutureWarning\n msg = \"Values are too large to be losslessly cast to int8\"\n with tm.assert_produces_warning(warn, match=msg):\n exp = pd.Series([1, val, 3, 4], dtype=np.int8)\n self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)\n\n @pytest.mark.parametrize(\n \"val,exp_dtype\",\n [(1, np.float64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)],\n )\n def test_setitem_series_float64(self, val, exp_dtype):\n obj = pd.Series([1.1, 2.2, 3.3, 4.4])\n assert obj.dtype == np.float64\n\n exp = pd.Series([1.1, val, 3.3, 4.4])\n self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)\n\n @pytest.mark.parametrize(\n \"val,exp_dtype\",\n [\n (1, np.complex128),\n (1.1, np.complex128),\n (1 + 1j, np.complex128),\n (True, object),\n ],\n )\n def test_setitem_series_complex128(self, val, exp_dtype):\n obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])\n assert obj.dtype == np.complex128\n\n exp = pd.Series([1 + 1j, val, 3 + 3j, 4 + 4j])\n self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)\n\n @pytest.mark.parametrize(\n \"val,exp_dtype\",\n [\n (1, object),\n (\"3\", object),\n (3, object),\n (1.1, object),\n (1 + 1j, object),\n (True, np.bool_),\n ],\n )\n def test_setitem_series_bool(self, val, exp_dtype):\n obj = pd.Series([True, False, True, False])\n assert obj.dtype == np.bool_\n\n exp = pd.Series([True, val, True, False], dtype=exp_dtype)\n self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)\n\n @pytest.mark.parametrize(\n \"val,exp_dtype\",\n [(pd.Timestamp(\"2012-01-01\"), \"datetime64[ns]\"), (1, object), (\"x\", object)],\n )\n def test_setitem_series_datetime64(self, val, exp_dtype):\n obj = pd.Series(\n [\n pd.Timestamp(\"2011-01-01\"),\n pd.Timestamp(\"2011-01-02\"),\n pd.Timestamp(\"2011-01-03\"),\n pd.Timestamp(\"2011-01-04\"),\n ]\n )\n assert obj.dtype == \"datetime64[ns]\"\n\n exp = pd.Series(\n [\n pd.Timestamp(\"2011-01-01\"),\n val,\n pd.Timestamp(\"2011-01-03\"),\n pd.Timestamp(\"2011-01-04\"),\n ]\n )\n self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)\n\n @pytest.mark.parametrize(\n \"val,exp_dtype\",\n [\n (pd.Timestamp(\"2012-01-01\", tz=\"US/Eastern\"), \"datetime64[ns, US/Eastern]\"),\n (pd.Timestamp(\"2012-01-01\", tz=\"US/Pacific\"), object),\n (pd.Timestamp(\"2012-01-01\"), object),\n (1, object),\n ],\n )\n def test_setitem_series_datetime64tz(self, val, exp_dtype):\n tz = \"US/Eastern\"\n obj = pd.Series(\n [\n pd.Timestamp(\"2011-01-01\", tz=tz),\n pd.Timestamp(\"2011-01-02\", tz=tz),\n pd.Timestamp(\"2011-01-03\", tz=tz),\n pd.Timestamp(\"2011-01-04\", tz=tz),\n ]\n )\n assert obj.dtype == \"datetime64[ns, US/Eastern]\"\n\n exp = pd.Series(\n [\n pd.Timestamp(\"2011-01-01\", tz=tz),\n val,\n pd.Timestamp(\"2011-01-03\", tz=tz),\n pd.Timestamp(\"2011-01-04\", tz=tz),\n ]\n )\n self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)\n\n @pytest.mark.parametrize(\n \"val,exp_dtype\",\n [(pd.Timedelta(\"12 day\"), \"timedelta64[ns]\"), (1, object), (\"x\", object)],\n )\n def test_setitem_series_timedelta64(self, val, exp_dtype):\n obj = pd.Series(\n [\n pd.Timedelta(\"1 day\"),\n pd.Timedelta(\"2 day\"),\n pd.Timedelta(\"3 day\"),\n pd.Timedelta(\"4 day\"),\n ]\n )\n assert obj.dtype == \"timedelta64[ns]\"\n\n exp = pd.Series(\n [pd.Timedelta(\"1 day\"), val, pd.Timedelta(\"3 day\"), pd.Timedelta(\"4 day\")]\n )\n self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)\n\n def test_setitem_series_no_coercion_from_values_list(self):\n # GH35865 - int casted to str when internally calling np.array(ser.values)\n ser = pd.Series([\"a\", 1])\n ser[:] = list(ser.values)\n\n expected = pd.Series([\"a\", 1])\n\n tm.assert_series_equal(ser, expected)\n\n def _assert_setitem_index_conversion(\n self, original_series, loc_key, expected_index, expected_dtype\n ):\n \"\"\"test index's coercion triggered by assign key\"\"\"\n temp = original_series.copy()\n temp[loc_key] = 5\n exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)\n tm.assert_series_equal(temp, exp)\n # check dtype explicitly for sure\n assert temp.index.dtype == expected_dtype\n\n temp = original_series.copy()\n temp.loc[loc_key] = 5\n exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)\n tm.assert_series_equal(temp, exp)\n # check dtype explicitly for sure\n assert temp.index.dtype == expected_dtype\n\n @pytest.mark.parametrize(\n \"val,exp_dtype\", [(\"x\", object), (5, IndexError), (1.1, object)]\n )\n def test_setitem_index_object(self, val, exp_dtype):\n obj = pd.Series([1, 2, 3, 4], index=list(\"abcd\"))\n assert obj.index.dtype == object\n\n if exp_dtype is IndexError:\n temp = obj.copy()\n msg = \"index 5 is out of bounds for axis 0 with size 4\"\n with pytest.raises(exp_dtype, match=msg):\n temp[5] = 5\n else:\n exp_index = pd.Index(list(\"abcd\") + [val])\n self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype)\n\n @pytest.mark.parametrize(\n \"val,exp_dtype\", [(5, np.int64), (1.1, np.float64), (\"x\", object)]\n )\n def test_setitem_index_int64(self, val, exp_dtype):\n obj = pd.Series([1, 2, 3, 4])\n assert obj.index.dtype == np.int64\n\n exp_index = pd.Index([0, 1, 2, 3, val])\n self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype)\n\n @pytest.mark.parametrize(\n \"val,exp_dtype\", [(5, IndexError), (5.1, np.float64), (\"x\", object)]\n )\n def test_setitem_index_float64(self, val, exp_dtype, request):\n obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])\n assert obj.index.dtype == np.float64\n\n if exp_dtype is IndexError:\n # float + int -> int\n temp = obj.copy()\n msg = \"index 5 is out of bounds for axis 0 with size 4\"\n with pytest.raises(exp_dtype, match=msg):\n temp[5] = 5\n mark = pytest.mark.xfail(reason=\"TODO_GH12747 The result must be float\")\n request.node.add_marker(mark)\n exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, val])\n self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype)\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_setitem_series_period(self):\n raise NotImplementedError\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_setitem_index_complex128(self):\n raise NotImplementedError\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_setitem_index_bool(self):\n raise NotImplementedError\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_setitem_index_datetime64(self):\n raise NotImplementedError\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_setitem_index_datetime64tz(self):\n raise NotImplementedError\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_setitem_index_timedelta64(self):\n raise NotImplementedError\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_setitem_index_period(self):\n raise NotImplementedError\n\n\nclass TestInsertIndexCoercion(CoercionBase):\n\n klasses = [\"index\"]\n method = \"insert\"\n\n def _assert_insert_conversion(self, original, value, expected, expected_dtype):\n \"\"\"test coercion triggered by insert\"\"\"\n target = original.copy()\n res = target.insert(1, value)\n tm.assert_index_equal(res, expected)\n assert res.dtype == expected_dtype\n\n @pytest.mark.parametrize(\n \"insert, coerced_val, coerced_dtype\",\n [\n (1, 1, object),\n (1.1, 1.1, object),\n (False, False, object),\n (\"x\", \"x\", object),\n ],\n )\n def test_insert_index_object(self, insert, coerced_val, coerced_dtype):\n obj = pd.Index(list(\"abcd\"))\n assert obj.dtype == object\n\n exp = pd.Index([\"a\", coerced_val, \"b\", \"c\", \"d\"])\n self._assert_insert_conversion(obj, insert, exp, coerced_dtype)\n\n @pytest.mark.parametrize(\n \"insert, coerced_val, coerced_dtype\",\n [\n (1, 1, np.int64),\n (1.1, 1.1, np.float64),\n (False, False, object), # GH#36319\n (\"x\", \"x\", object),\n ],\n )\n def test_insert_index_int64(self, insert, coerced_val, coerced_dtype):\n obj = pd.Int64Index([1, 2, 3, 4])\n assert obj.dtype == np.int64\n\n exp = pd.Index([1, coerced_val, 2, 3, 4])\n self._assert_insert_conversion(obj, insert, exp, coerced_dtype)\n\n @pytest.mark.parametrize(\n \"insert, coerced_val, coerced_dtype\",\n [\n (1, 1.0, np.float64),\n (1.1, 1.1, np.float64),\n (False, False, object), # GH#36319\n (\"x\", \"x\", object),\n ],\n )\n def test_insert_index_float64(self, insert, coerced_val, coerced_dtype):\n obj = pd.Float64Index([1.0, 2.0, 3.0, 4.0])\n assert obj.dtype == np.float64\n\n exp = pd.Index([1.0, coerced_val, 2.0, 3.0, 4.0])\n self._assert_insert_conversion(obj, insert, exp, coerced_dtype)\n\n @pytest.mark.parametrize(\n \"fill_val,exp_dtype\",\n [\n (pd.Timestamp(\"2012-01-01\"), \"datetime64[ns]\"),\n (pd.Timestamp(\"2012-01-01\", tz=\"US/Eastern\"), \"datetime64[ns, US/Eastern]\"),\n ],\n ids=[\"datetime64\", \"datetime64tz\"],\n )\n @pytest.mark.parametrize(\n \"insert_value\",\n [pd.Timestamp(\"2012-01-01\"), pd.Timestamp(\"2012-01-01\", tz=\"Asia/Tokyo\"), 1],\n )\n def test_insert_index_datetimes(self, request, fill_val, exp_dtype, insert_value):\n\n obj = pd.DatetimeIndex(\n [\"2011-01-01\", \"2011-01-02\", \"2011-01-03\", \"2011-01-04\"], tz=fill_val.tz\n )\n assert obj.dtype == exp_dtype\n\n exp = pd.DatetimeIndex(\n [\"2011-01-01\", fill_val.date(), \"2011-01-02\", \"2011-01-03\", \"2011-01-04\"],\n tz=fill_val.tz,\n )\n self._assert_insert_conversion(obj, fill_val, exp, exp_dtype)\n\n if fill_val.tz:\n\n # mismatched tzawareness\n ts = pd.Timestamp(\"2012-01-01\")\n result = obj.insert(1, ts)\n expected = obj.astype(object).insert(1, ts)\n assert expected.dtype == object\n tm.assert_index_equal(result, expected)\n\n # mismatched tz --> cast to object (could reasonably cast to common tz)\n ts = pd.Timestamp(\"2012-01-01\", tz=\"Asia/Tokyo\")\n result = obj.insert(1, ts)\n expected = obj.astype(object).insert(1, ts)\n assert expected.dtype == object\n tm.assert_index_equal(result, expected)\n\n else:\n # mismatched tzawareness\n ts = pd.Timestamp(\"2012-01-01\", tz=\"Asia/Tokyo\")\n result = obj.insert(1, ts)\n expected = obj.astype(object).insert(1, ts)\n assert expected.dtype == object\n tm.assert_index_equal(result, expected)\n\n item = 1\n result = obj.insert(1, item)\n expected = obj.astype(object).insert(1, item)\n assert expected[1] == item\n assert expected.dtype == object\n tm.assert_index_equal(result, expected)\n\n def test_insert_index_timedelta64(self):\n obj = pd.TimedeltaIndex([\"1 day\", \"2 day\", \"3 day\", \"4 day\"])\n assert obj.dtype == \"timedelta64[ns]\"\n\n # timedelta64 + timedelta64 => timedelta64\n exp = pd.TimedeltaIndex([\"1 day\", \"10 day\", \"2 day\", \"3 day\", \"4 day\"])\n self._assert_insert_conversion(\n obj, pd.Timedelta(\"10 day\"), exp, \"timedelta64[ns]\"\n )\n\n for item in [pd.Timestamp(\"2012-01-01\"), 1]:\n result = obj.insert(1, item)\n expected = obj.astype(object).insert(1, item)\n assert expected.dtype == object\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"insert, coerced_val, coerced_dtype\",\n [\n (pd.Period(\"2012-01\", freq=\"M\"), \"2012-01\", \"period[M]\"),\n (pd.Timestamp(\"2012-01-01\"), pd.Timestamp(\"2012-01-01\"), object),\n (1, 1, object),\n (\"x\", \"x\", object),\n ],\n )\n def test_insert_index_period(self, insert, coerced_val, coerced_dtype):\n obj = pd.PeriodIndex([\"2011-01\", \"2011-02\", \"2011-03\", \"2011-04\"], freq=\"M\")\n assert obj.dtype == \"period[M]\"\n\n data = [\n pd.Period(\"2011-01\", freq=\"M\"),\n coerced_val,\n pd.Period(\"2011-02\", freq=\"M\"),\n pd.Period(\"2011-03\", freq=\"M\"),\n pd.Period(\"2011-04\", freq=\"M\"),\n ]\n if isinstance(insert, pd.Period):\n exp = pd.PeriodIndex(data, freq=\"M\")\n self._assert_insert_conversion(obj, insert, exp, coerced_dtype)\n\n # string that can be parsed to appropriate PeriodDtype\n self._assert_insert_conversion(obj, str(insert), exp, coerced_dtype)\n\n else:\n result = obj.insert(0, insert)\n expected = obj.astype(object).insert(0, insert)\n tm.assert_index_equal(result, expected)\n\n # TODO: ATM inserting '2012-01-01 00:00:00' when we have obj.freq==\"M\"\n # casts that string to Period[M], not clear that is desirable\n if not isinstance(insert, pd.Timestamp):\n # non-castable string\n result = obj.insert(0, str(insert))\n expected = obj.astype(object).insert(0, str(insert))\n tm.assert_index_equal(result, expected)\n\n msg = r\"Unexpected keyword arguments {'freq'}\"\n with pytest.raises(TypeError, match=msg):\n with tm.assert_produces_warning(FutureWarning):\n # passing keywords to pd.Index\n pd.Index(data, freq=\"M\")\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_insert_index_complex128(self):\n raise NotImplementedError\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_insert_index_bool(self):\n raise NotImplementedError\n\n\nclass TestWhereCoercion(CoercionBase):\n\n method = \"where\"\n\n def _assert_where_conversion(\n self, original, cond, values, expected, expected_dtype\n ):\n \"\"\"test coercion triggered by where\"\"\"\n target = original.copy()\n res = target.where(cond, values)\n tm.assert_equal(res, expected)\n assert res.dtype == expected_dtype\n\n @pytest.mark.parametrize(\n \"fill_val,exp_dtype\",\n [(1, object), (1.1, object), (1 + 1j, object), (True, object)],\n )\n def test_where_object(self, index_or_series, fill_val, exp_dtype):\n klass = index_or_series\n obj = klass(list(\"abcd\"))\n assert obj.dtype == object\n cond = klass([True, False, True, False])\n\n if fill_val is True and klass is pd.Series:\n ret_val = 1\n else:\n ret_val = fill_val\n\n exp = klass([\"a\", ret_val, \"c\", ret_val])\n self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)\n\n if fill_val is True:\n values = klass([True, False, True, True])\n else:\n values = klass(x * fill_val for x in [5, 6, 7, 8])\n\n exp = klass([\"a\", values[1], \"c\", values[3]])\n self._assert_where_conversion(obj, cond, values, exp, exp_dtype)\n\n @pytest.mark.parametrize(\n \"fill_val,exp_dtype\",\n [(1, np.int64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)],\n )\n def test_where_int64(self, index_or_series, fill_val, exp_dtype):\n klass = index_or_series\n if klass is pd.Index and exp_dtype is np.complex128:\n pytest.skip(\"Complex Index not supported\")\n obj = klass([1, 2, 3, 4])\n assert obj.dtype == np.int64\n cond = klass([True, False, True, False])\n\n exp = klass([1, fill_val, 3, fill_val])\n self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)\n\n if fill_val is True:\n values = klass([True, False, True, True])\n else:\n values = klass(x * fill_val for x in [5, 6, 7, 8])\n exp = klass([1, values[1], 3, values[3]])\n self._assert_where_conversion(obj, cond, values, exp, exp_dtype)\n\n @pytest.mark.parametrize(\n \"fill_val, exp_dtype\",\n [(1, np.float64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)],\n )\n def test_where_float64(self, index_or_series, fill_val, exp_dtype):\n klass = index_or_series\n if klass is pd.Index and exp_dtype is np.complex128:\n pytest.skip(\"Complex Index not supported\")\n obj = klass([1.1, 2.2, 3.3, 4.4])\n assert obj.dtype == np.float64\n cond = klass([True, False, True, False])\n\n exp = klass([1.1, fill_val, 3.3, fill_val])\n self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)\n\n if fill_val is True:\n values = klass([True, False, True, True])\n else:\n values = klass(x * fill_val for x in [5, 6, 7, 8])\n exp = klass([1.1, values[1], 3.3, values[3]])\n self._assert_where_conversion(obj, cond, values, exp, exp_dtype)\n\n @pytest.mark.parametrize(\n \"fill_val,exp_dtype\",\n [\n (1, np.complex128),\n (1.1, np.complex128),\n (1 + 1j, np.complex128),\n (True, object),\n ],\n )\n def test_where_series_complex128(self, fill_val, exp_dtype):\n klass = pd.Series\n obj = klass([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])\n assert obj.dtype == np.complex128\n cond = klass([True, False, True, False])\n\n exp = klass([1 + 1j, fill_val, 3 + 3j, fill_val])\n self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)\n\n if fill_val is True:\n values = klass([True, False, True, True])\n else:\n values = klass(x * fill_val for x in [5, 6, 7, 8])\n exp = klass([1 + 1j, values[1], 3 + 3j, values[3]], dtype=exp_dtype)\n self._assert_where_conversion(obj, cond, values, exp, exp_dtype)\n\n @pytest.mark.parametrize(\n \"fill_val,exp_dtype\",\n [(1, object), (1.1, object), (1 + 1j, object), (True, np.bool_)],\n )\n def test_where_series_bool(self, fill_val, exp_dtype):\n klass = pd.Series\n\n obj = klass([True, False, True, False])\n assert obj.dtype == np.bool_\n cond = klass([True, False, True, False])\n\n exp = klass([True, fill_val, True, fill_val])\n self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)\n\n if fill_val is True:\n values = klass([True, False, True, True])\n else:\n values = klass(x * fill_val for x in [5, 6, 7, 8])\n exp = klass([True, values[1], True, values[3]])\n self._assert_where_conversion(obj, cond, values, exp, exp_dtype)\n\n @pytest.mark.parametrize(\n \"fill_val,exp_dtype\",\n [\n (pd.Timestamp(\"2012-01-01\"), \"datetime64[ns]\"),\n (pd.Timestamp(\"2012-01-01\", tz=\"US/Eastern\"), object),\n ],\n ids=[\"datetime64\", \"datetime64tz\"],\n )\n def test_where_series_datetime64(self, fill_val, exp_dtype):\n obj = pd.Series(\n [\n pd.Timestamp(\"2011-01-01\"),\n pd.Timestamp(\"2011-01-02\"),\n pd.Timestamp(\"2011-01-03\"),\n pd.Timestamp(\"2011-01-04\"),\n ]\n )\n assert obj.dtype == \"datetime64[ns]\"\n cond = pd.Series([True, False, True, False])\n\n exp = pd.Series(\n [pd.Timestamp(\"2011-01-01\"), fill_val, pd.Timestamp(\"2011-01-03\"), fill_val]\n )\n self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)\n\n values = pd.Series(pd.date_range(fill_val, periods=4))\n if fill_val.tz:\n exp = pd.Series(\n [\n pd.Timestamp(\"2011-01-01\"),\n pd.Timestamp(\"2012-01-02 00:00\", tz=\"US/Eastern\"),\n pd.Timestamp(\"2011-01-03\"),\n pd.Timestamp(\"2012-01-04 00:00\", tz=\"US/Eastern\"),\n ]\n )\n self._assert_where_conversion(obj, cond, values, exp, exp_dtype)\n\n exp = pd.Series(\n [\n pd.Timestamp(\"2011-01-01\"),\n values[1],\n pd.Timestamp(\"2011-01-03\"),\n values[3],\n ]\n )\n self._assert_where_conversion(obj, cond, values, exp, exp_dtype)\n\n @pytest.mark.parametrize(\n \"fill_val\",\n [\n pd.Timestamp(\"2012-01-01\"),\n pd.Timestamp(\"2012-01-01\").to_datetime64(),\n pd.Timestamp(\"2012-01-01\").to_pydatetime(),\n ],\n )\n def test_where_index_datetime(self, fill_val):\n exp_dtype = \"datetime64[ns]\"\n obj = pd.Index(\n [\n pd.Timestamp(\"2011-01-01\"),\n pd.Timestamp(\"2011-01-02\"),\n pd.Timestamp(\"2011-01-03\"),\n pd.Timestamp(\"2011-01-04\"),\n ]\n )\n assert obj.dtype == \"datetime64[ns]\"\n cond = pd.Index([True, False, True, False])\n\n result = obj.where(cond, fill_val)\n expected = pd.DatetimeIndex([obj[0], fill_val, obj[2], fill_val])\n tm.assert_index_equal(result, expected)\n\n values = pd.Index(pd.date_range(fill_val, periods=4))\n exp = pd.Index(\n [\n pd.Timestamp(\"2011-01-01\"),\n pd.Timestamp(\"2012-01-02\"),\n pd.Timestamp(\"2011-01-03\"),\n pd.Timestamp(\"2012-01-04\"),\n ]\n )\n\n self._assert_where_conversion(obj, cond, values, exp, exp_dtype)\n\n @pytest.mark.xfail(reason=\"GH 22839: do not ignore timezone, must be object\")\n def test_where_index_datetime64tz(self):\n fill_val = pd.Timestamp(\"2012-01-01\", tz=\"US/Eastern\")\n exp_dtype = object\n obj = pd.Index(\n [\n pd.Timestamp(\"2011-01-01\"),\n pd.Timestamp(\"2011-01-02\"),\n pd.Timestamp(\"2011-01-03\"),\n pd.Timestamp(\"2011-01-04\"),\n ]\n )\n assert obj.dtype == \"datetime64[ns]\"\n cond = pd.Index([True, False, True, False])\n\n msg = \"Index\\\\(\\\\.\\\\.\\\\.\\\\) must be called with a collection of some kind\"\n with pytest.raises(TypeError, match=msg):\n obj.where(cond, fill_val)\n\n values = pd.Index(pd.date_range(fill_val, periods=4))\n exp = pd.Index(\n [\n pd.Timestamp(\"2011-01-01\"),\n pd.Timestamp(\"2012-01-02\", tz=\"US/Eastern\"),\n pd.Timestamp(\"2011-01-03\"),\n pd.Timestamp(\"2012-01-04\", tz=\"US/Eastern\"),\n ],\n dtype=exp_dtype,\n )\n\n self._assert_where_conversion(obj, cond, values, exp, exp_dtype)\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_where_index_complex128(self):\n raise NotImplementedError\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_where_index_bool(self):\n raise NotImplementedError\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_where_series_timedelta64(self):\n raise NotImplementedError\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_where_series_period(self):\n raise NotImplementedError\n\n @pytest.mark.parametrize(\n \"value\", [pd.Timedelta(days=9), timedelta(days=9), np.timedelta64(9, \"D\")]\n )\n def test_where_index_timedelta64(self, value):\n tdi = pd.timedelta_range(\"1 Day\", periods=4)\n cond = np.array([True, False, False, True])\n\n expected = pd.TimedeltaIndex([\"1 Day\", value, value, \"4 Days\"])\n result = tdi.where(cond, value)\n tm.assert_index_equal(result, expected)\n\n # wrong-dtyped NaT\n dtnat = np.datetime64(\"NaT\", \"ns\")\n expected = pd.Index([tdi[0], dtnat, dtnat, tdi[3]], dtype=object)\n assert expected[1] is dtnat\n\n result = tdi.where(cond, dtnat)\n tm.assert_index_equal(result, expected)\n\n def test_where_index_period(self):\n dti = pd.date_range(\"2016-01-01\", periods=3, freq=\"QS\")\n pi = dti.to_period(\"Q\")\n\n cond = np.array([False, True, False])\n\n # Passinga valid scalar\n value = pi[-1] + pi.freq * 10\n expected = pd.PeriodIndex([value, pi[1], value])\n result = pi.where(cond, value)\n tm.assert_index_equal(result, expected)\n\n # Case passing ndarray[object] of Periods\n other = np.asarray(pi + pi.freq * 10, dtype=object)\n result = pi.where(cond, other)\n expected = pd.PeriodIndex([other[0], pi[1], other[2]])\n tm.assert_index_equal(result, expected)\n\n # Passing a mismatched scalar -> casts to object\n td = pd.Timedelta(days=4)\n expected = pd.Index([td, pi[1], td], dtype=object)\n result = pi.where(cond, td)\n tm.assert_index_equal(result, expected)\n\n per = pd.Period(\"2020-04-21\", \"D\")\n expected = pd.Index([per, pi[1], per], dtype=object)\n result = pi.where(cond, per)\n tm.assert_index_equal(result, expected)\n\n\nclass TestFillnaSeriesCoercion(CoercionBase):\n\n # not indexing, but place here for consistency\n\n method = \"fillna\"\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_has_comprehensive_tests(self):\n raise NotImplementedError\n\n def _assert_fillna_conversion(self, original, value, expected, expected_dtype):\n \"\"\"test coercion triggered by fillna\"\"\"\n target = original.copy()\n res = target.fillna(value)\n tm.assert_equal(res, expected)\n assert res.dtype == expected_dtype\n\n @pytest.mark.parametrize(\n \"fill_val, fill_dtype\",\n [(1, object), (1.1, object), (1 + 1j, object), (True, object)],\n )\n def test_fillna_object(self, index_or_series, fill_val, fill_dtype):\n klass = index_or_series\n obj = klass([\"a\", np.nan, \"c\", \"d\"])\n assert obj.dtype == object\n\n exp = klass([\"a\", fill_val, \"c\", \"d\"])\n self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)\n\n @pytest.mark.parametrize(\n \"fill_val,fill_dtype\",\n [(1, np.float64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)],\n )\n def test_fillna_float64(self, index_or_series, fill_val, fill_dtype):\n klass = index_or_series\n obj = klass([1.1, np.nan, 3.3, 4.4])\n assert obj.dtype == np.float64\n\n exp = klass([1.1, fill_val, 3.3, 4.4])\n # float + complex -> we don't support a complex Index\n # complex for Series,\n # object for Index\n if fill_dtype == np.complex128 and klass == pd.Index:\n fill_dtype = object\n self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)\n\n @pytest.mark.parametrize(\n \"fill_val,fill_dtype\",\n [\n (1, np.complex128),\n (1.1, np.complex128),\n (1 + 1j, np.complex128),\n (True, object),\n ],\n )\n def test_fillna_series_complex128(self, fill_val, fill_dtype):\n obj = pd.Series([1 + 1j, np.nan, 3 + 3j, 4 + 4j])\n assert obj.dtype == np.complex128\n\n exp = pd.Series([1 + 1j, fill_val, 3 + 3j, 4 + 4j])\n self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)\n\n @pytest.mark.parametrize(\n \"fill_val,fill_dtype\",\n [\n (pd.Timestamp(\"2012-01-01\"), \"datetime64[ns]\"),\n (pd.Timestamp(\"2012-01-01\", tz=\"US/Eastern\"), object),\n (1, object),\n (\"x\", object),\n ],\n ids=[\"datetime64\", \"datetime64tz\", \"object\", \"object\"],\n )\n def test_fillna_datetime(self, index_or_series, fill_val, fill_dtype):\n klass = index_or_series\n obj = klass(\n [\n pd.Timestamp(\"2011-01-01\"),\n pd.NaT,\n pd.Timestamp(\"2011-01-03\"),\n pd.Timestamp(\"2011-01-04\"),\n ]\n )\n assert obj.dtype == \"datetime64[ns]\"\n\n exp = klass(\n [\n pd.Timestamp(\"2011-01-01\"),\n fill_val,\n pd.Timestamp(\"2011-01-03\"),\n pd.Timestamp(\"2011-01-04\"),\n ]\n )\n self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)\n\n @pytest.mark.parametrize(\n \"fill_val,fill_dtype\",\n [\n (pd.Timestamp(\"2012-01-01\", tz=\"US/Eastern\"), \"datetime64[ns, US/Eastern]\"),\n (pd.Timestamp(\"2012-01-01\"), object),\n (pd.Timestamp(\"2012-01-01\", tz=\"Asia/Tokyo\"), object),\n (1, object),\n (\"x\", object),\n ],\n )\n def test_fillna_datetime64tz(self, index_or_series, fill_val, fill_dtype):\n klass = index_or_series\n tz = \"US/Eastern\"\n\n obj = klass(\n [\n pd.Timestamp(\"2011-01-01\", tz=tz),\n pd.NaT,\n pd.Timestamp(\"2011-01-03\", tz=tz),\n pd.Timestamp(\"2011-01-04\", tz=tz),\n ]\n )\n assert obj.dtype == \"datetime64[ns, US/Eastern]\"\n\n exp = klass(\n [\n pd.Timestamp(\"2011-01-01\", tz=tz),\n fill_val,\n pd.Timestamp(\"2011-01-03\", tz=tz),\n pd.Timestamp(\"2011-01-04\", tz=tz),\n ]\n )\n self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_fillna_series_int64(self):\n raise NotImplementedError\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_fillna_index_int64(self):\n raise NotImplementedError\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_fillna_series_bool(self):\n raise NotImplementedError\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_fillna_index_bool(self):\n raise NotImplementedError\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_fillna_series_timedelta64(self):\n raise NotImplementedError\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_fillna_series_period(self):\n raise NotImplementedError\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_fillna_index_timedelta64(self):\n raise NotImplementedError\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_fillna_index_period(self):\n raise NotImplementedError\n\n\nclass TestReplaceSeriesCoercion(CoercionBase):\n\n klasses = [\"series\"]\n method = \"replace\"\n\n rep: dict[str, list] = {}\n rep[\"object\"] = [\"a\", \"b\"]\n rep[\"int64\"] = [4, 5]\n rep[\"float64\"] = [1.1, 2.2]\n rep[\"complex128\"] = [1 + 1j, 2 + 2j]\n rep[\"bool\"] = [True, False]\n rep[\"datetime64[ns]\"] = [pd.Timestamp(\"2011-01-01\"), pd.Timestamp(\"2011-01-03\")]\n\n for tz in [\"UTC\", \"US/Eastern\"]:\n # to test tz => different tz replacement\n key = f\"datetime64[ns, {tz}]\"\n rep[key] = [\n pd.Timestamp(\"2011-01-01\", tz=tz),\n pd.Timestamp(\"2011-01-03\", tz=tz),\n ]\n\n rep[\"timedelta64[ns]\"] = [pd.Timedelta(\"1 day\"), pd.Timedelta(\"2 day\")]\n\n @pytest.fixture(params=[\"dict\", \"series\"])\n def how(self, request):\n return request.param\n\n @pytest.fixture(\n params=[\n \"object\",\n \"int64\",\n \"float64\",\n \"complex128\",\n \"bool\",\n \"datetime64[ns]\",\n \"datetime64[ns, UTC]\",\n \"datetime64[ns, US/Eastern]\",\n \"timedelta64[ns]\",\n ]\n )\n def from_key(self, request):\n return request.param\n\n @pytest.fixture(\n params=[\n \"object\",\n \"int64\",\n \"float64\",\n \"complex128\",\n \"bool\",\n \"datetime64[ns]\",\n \"datetime64[ns, UTC]\",\n \"datetime64[ns, US/Eastern]\",\n \"timedelta64[ns]\",\n ],\n ids=[\n \"object\",\n \"int64\",\n \"float64\",\n \"complex128\",\n \"bool\",\n \"datetime64\",\n \"datetime64tz\",\n \"datetime64tz\",\n \"timedelta64\",\n ],\n )\n def to_key(self, request):\n return request.param\n\n @pytest.fixture\n def replacer(self, how, from_key, to_key):\n \"\"\"\n Object we will pass to `Series.replace`\n \"\"\"\n if how == \"dict\":\n replacer = dict(zip(self.rep[from_key], self.rep[to_key]))\n elif how == \"series\":\n replacer = pd.Series(self.rep[to_key], index=self.rep[from_key])\n else:\n raise ValueError\n return replacer\n\n def test_replace_series(self, how, to_key, from_key, replacer):\n index = pd.Index([3, 4], name=\"xxx\")\n obj = pd.Series(self.rep[from_key], index=index, name=\"yyy\")\n assert obj.dtype == from_key\n\n if from_key.startswith(\"datetime\") and to_key.startswith(\"datetime\"):\n # tested below\n return\n elif from_key in [\"datetime64[ns, US/Eastern]\", \"datetime64[ns, UTC]\"]:\n # tested below\n return\n\n result = obj.replace(replacer)\n\n if (from_key == \"float64\" and to_key in (\"int64\")) or (\n from_key == \"complex128\" and to_key in (\"int64\", \"float64\")\n ):\n\n if not IS64 or is_platform_windows():\n pytest.skip(f\"32-bit platform buggy: {from_key} -> {to_key}\")\n\n # Expected: do not downcast by replacement\n exp = pd.Series(self.rep[to_key], index=index, name=\"yyy\", dtype=from_key)\n\n else:\n exp = pd.Series(self.rep[to_key], index=index, name=\"yyy\")\n assert exp.dtype == to_key\n\n tm.assert_series_equal(result, exp)\n\n @pytest.mark.parametrize(\n \"to_key\",\n [\"timedelta64[ns]\", \"bool\", \"object\", \"complex128\", \"float64\", \"int64\"],\n indirect=True,\n )\n @pytest.mark.parametrize(\n \"from_key\", [\"datetime64[ns, UTC]\", \"datetime64[ns, US/Eastern]\"], indirect=True\n )\n def test_replace_series_datetime_tz(self, how, to_key, from_key, replacer):\n index = pd.Index([3, 4], name=\"xyz\")\n obj = pd.Series(self.rep[from_key], index=index, name=\"yyy\")\n assert obj.dtype == from_key\n\n result = obj.replace(replacer)\n exp = pd.Series(self.rep[to_key], index=index, name=\"yyy\")\n assert exp.dtype == to_key\n\n tm.assert_series_equal(result, exp)\n\n @pytest.mark.parametrize(\n \"to_key\",\n [\"datetime64[ns]\", \"datetime64[ns, UTC]\", \"datetime64[ns, US/Eastern]\"],\n indirect=True,\n )\n @pytest.mark.parametrize(\n \"from_key\",\n [\"datetime64[ns]\", \"datetime64[ns, UTC]\", \"datetime64[ns, US/Eastern]\"],\n indirect=True,\n )\n def test_replace_series_datetime_datetime(self, how, to_key, from_key, replacer):\n index = pd.Index([3, 4], name=\"xyz\")\n obj = pd.Series(self.rep[from_key], index=index, name=\"yyy\")\n assert obj.dtype == from_key\n\n result = obj.replace(replacer)\n exp = pd.Series(self.rep[to_key], index=index, name=\"yyy\")\n assert exp.dtype == to_key\n\n tm.assert_series_equal(result, exp)\n\n @pytest.mark.xfail(reason=\"Test not implemented\")\n def test_replace_series_period(self):\n raise NotImplementedError\n",
"from __future__ import annotations\n\nfrom datetime import (\n datetime,\n timedelta,\n)\nfrom typing import Hashable\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import (\n index as libindex,\n lib,\n)\nfrom pandas._libs.tslibs import (\n BaseOffset,\n NaT,\n Period,\n Resolution,\n Tick,\n)\nfrom pandas._libs.tslibs.parsing import (\n DateParseError,\n parse_time_string,\n)\nfrom pandas._typing import (\n Dtype,\n DtypeObj,\n)\nfrom pandas.errors import InvalidIndexError\nfrom pandas.util._decorators import doc\n\nfrom pandas.core.dtypes.common import (\n is_datetime64_any_dtype,\n is_integer,\n is_scalar,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import PeriodDtype\nfrom pandas.core.dtypes.missing import is_valid_na_for_dtype\n\nfrom pandas.core.arrays.period import (\n PeriodArray,\n period_array,\n raise_on_incompatible,\n validate_dtype_freq,\n)\nimport pandas.core.common as com\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.indexes.base import maybe_extract_name\nfrom pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin\nfrom pandas.core.indexes.datetimes import (\n DatetimeIndex,\n Index,\n)\nfrom pandas.core.indexes.extension import inherit_names\nfrom pandas.core.indexes.numeric import Int64Index\n\n_index_doc_kwargs = dict(ibase._index_doc_kwargs)\n_index_doc_kwargs.update({\"target_klass\": \"PeriodIndex or list of Periods\"})\n_shared_doc_kwargs = {\n \"klass\": \"PeriodArray\",\n}\n\n# --- Period index sketch\n\n\ndef _new_PeriodIndex(cls, **d):\n # GH13277 for unpickling\n values = d.pop(\"data\")\n if values.dtype == \"int64\":\n freq = d.pop(\"freq\", None)\n values = PeriodArray(values, freq=freq)\n return cls._simple_new(values, **d)\n else:\n return cls(values, **d)\n\n\n@inherit_names(\n [\"strftime\", \"start_time\", \"end_time\"] + PeriodArray._field_ops,\n PeriodArray,\n wrap=True,\n)\n@inherit_names([\"is_leap_year\", \"_format_native_types\"], PeriodArray)\nclass PeriodIndex(DatetimeIndexOpsMixin):\n \"\"\"\n Immutable ndarray holding ordinal values indicating regular periods in time.\n\n Index keys are boxed to Period objects which carries the metadata (eg,\n frequency information).\n\n Parameters\n ----------\n data : array-like (1d int np.ndarray or PeriodArray), optional\n Optional period-like data to construct index with.\n copy : bool\n Make a copy of input ndarray.\n freq : str or period object, optional\n One of pandas period strings or corresponding objects.\n year : int, array, or Series, default None\n month : int, array, or Series, default None\n quarter : int, array, or Series, default None\n day : int, array, or Series, default None\n hour : int, array, or Series, default None\n minute : int, array, or Series, default None\n second : int, array, or Series, default None\n dtype : str or PeriodDtype, default None\n\n Attributes\n ----------\n day\n dayofweek\n day_of_week\n dayofyear\n day_of_year\n days_in_month\n daysinmonth\n end_time\n freq\n freqstr\n hour\n is_leap_year\n minute\n month\n quarter\n qyear\n second\n start_time\n week\n weekday\n weekofyear\n year\n\n Methods\n -------\n asfreq\n strftime\n to_timestamp\n\n See Also\n --------\n Index : The base pandas Index type.\n Period : Represents a period of time.\n DatetimeIndex : Index with datetime64 data.\n TimedeltaIndex : Index of timedelta64 data.\n period_range : Create a fixed-frequency PeriodIndex.\n\n Examples\n --------\n >>> idx = pd.PeriodIndex(year=[2000, 2002], quarter=[1, 3])\n >>> idx\n PeriodIndex(['2000Q1', '2002Q3'], dtype='period[Q-DEC]')\n \"\"\"\n\n _typ = \"periodindex\"\n _attributes = [\"name\"]\n\n _data: PeriodArray\n freq: BaseOffset\n\n _data_cls = PeriodArray\n _engine_type = libindex.PeriodEngine\n _supports_partial_string_indexing = True\n\n # --------------------------------------------------------------------\n # methods that dispatch to array and wrap result in Index\n # These are defined here instead of via inherit_names for mypy\n\n @doc(\n PeriodArray.asfreq,\n other=\"pandas.arrays.PeriodArray\",\n other_name=\"PeriodArray\",\n **_shared_doc_kwargs,\n )\n def asfreq(self, freq=None, how: str = \"E\") -> PeriodIndex:\n arr = self._data.asfreq(freq, how)\n return type(self)._simple_new(arr, name=self.name)\n\n @doc(PeriodArray.to_timestamp)\n def to_timestamp(self, freq=None, how=\"start\") -> DatetimeIndex:\n arr = self._data.to_timestamp(freq, how)\n return DatetimeIndex._simple_new(arr, name=self.name)\n\n # https://github.com/python/mypy/issues/1362\n # error: Decorated property not supported\n @property # type:ignore[misc]\n @doc(PeriodArray.hour.fget)\n def hour(self) -> Int64Index:\n return Int64Index(self._data.hour, name=self.name)\n\n # https://github.com/python/mypy/issues/1362\n # error: Decorated property not supported\n @property # type:ignore[misc]\n @doc(PeriodArray.minute.fget)\n def minute(self) -> Int64Index:\n return Int64Index(self._data.minute, name=self.name)\n\n # https://github.com/python/mypy/issues/1362\n # error: Decorated property not supported\n @property # type:ignore[misc]\n @doc(PeriodArray.second.fget)\n def second(self) -> Int64Index:\n return Int64Index(self._data.second, name=self.name)\n\n # ------------------------------------------------------------------------\n # Index Constructors\n\n def __new__(\n cls,\n data=None,\n ordinal=None,\n freq=None,\n dtype: Dtype | None = None,\n copy: bool = False,\n name: Hashable = None,\n **fields,\n ) -> PeriodIndex:\n\n valid_field_set = {\n \"year\",\n \"month\",\n \"day\",\n \"quarter\",\n \"hour\",\n \"minute\",\n \"second\",\n }\n\n if not set(fields).issubset(valid_field_set):\n argument = list(set(fields) - valid_field_set)[0]\n raise TypeError(f\"__new__() got an unexpected keyword argument {argument}\")\n\n name = maybe_extract_name(name, data, cls)\n\n if data is None and ordinal is None:\n # range-based.\n data, freq2 = PeriodArray._generate_range(None, None, None, freq, fields)\n # PeriodArray._generate range does validation that fields is\n # empty when really using the range-based constructor.\n freq = freq2\n\n data = PeriodArray(data, freq=freq)\n else:\n freq = validate_dtype_freq(dtype, freq)\n\n # PeriodIndex allow PeriodIndex(period_index, freq=different)\n # Let's not encourage that kind of behavior in PeriodArray.\n\n if freq and isinstance(data, cls) and data.freq != freq:\n # TODO: We can do some of these with no-copy / coercion?\n # e.g. D -> 2D seems to be OK\n data = data.asfreq(freq)\n\n if data is None and ordinal is not None:\n # we strangely ignore `ordinal` if data is passed.\n ordinal = np.asarray(ordinal, dtype=np.int64)\n data = PeriodArray(ordinal, freq=freq)\n else:\n # don't pass copy here, since we copy later.\n data = period_array(data=data, freq=freq)\n\n if copy:\n data = data.copy()\n\n return cls._simple_new(data, name=name)\n\n # ------------------------------------------------------------------------\n # Data\n\n @property\n def values(self) -> np.ndarray:\n return np.asarray(self, dtype=object)\n\n def _maybe_convert_timedelta(self, other):\n \"\"\"\n Convert timedelta-like input to an integer multiple of self.freq\n\n Parameters\n ----------\n other : timedelta, np.timedelta64, DateOffset, int, np.ndarray\n\n Returns\n -------\n converted : int, np.ndarray[int64]\n\n Raises\n ------\n IncompatibleFrequency : if the input cannot be written as a multiple\n of self.freq. Note IncompatibleFrequency subclasses ValueError.\n \"\"\"\n if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):\n if isinstance(self.freq, Tick):\n # _check_timedeltalike_freq_compat will raise if incompatible\n delta = self._data._check_timedeltalike_freq_compat(other)\n return delta\n elif isinstance(other, BaseOffset):\n if other.base == self.freq.base:\n return other.n\n\n raise raise_on_incompatible(self, other)\n elif is_integer(other):\n # integer is passed to .shift via\n # _add_datetimelike_methods basically\n # but ufunc may pass integer to _add_delta\n return other\n\n # raise when input doesn't have freq\n raise raise_on_incompatible(self, None)\n\n def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:\n \"\"\"\n Can we compare values of the given dtype to our own?\n \"\"\"\n if not isinstance(dtype, PeriodDtype):\n return False\n return dtype.freq == self.freq\n\n # ------------------------------------------------------------------------\n # Index Methods\n\n def asof_locs(self, where: Index, mask: np.ndarray) -> np.ndarray:\n \"\"\"\n where : array of timestamps\n mask : np.ndarray[bool]\n Array of booleans where data is not NA.\n \"\"\"\n if isinstance(where, DatetimeIndex):\n where = PeriodIndex(where._values, freq=self.freq)\n elif not isinstance(where, PeriodIndex):\n raise TypeError(\"asof_locs `where` must be DatetimeIndex or PeriodIndex\")\n\n return super().asof_locs(where, mask)\n\n @doc(Index.astype)\n def astype(self, dtype, copy: bool = True, how=lib.no_default):\n dtype = pandas_dtype(dtype)\n\n if how is not lib.no_default:\n # GH#37982\n warnings.warn(\n \"The 'how' keyword in PeriodIndex.astype is deprecated and \"\n \"will be removed in a future version. \"\n \"Use index.to_timestamp(how=how) instead\",\n FutureWarning,\n stacklevel=2,\n )\n else:\n how = \"start\"\n\n if is_datetime64_any_dtype(dtype):\n # 'how' is index-specific, isn't part of the EA interface.\n tz = getattr(dtype, \"tz\", None)\n return self.to_timestamp(how=how).tz_localize(tz)\n\n return super().astype(dtype, copy=copy)\n\n @property\n def is_full(self) -> bool:\n \"\"\"\n Returns True if this PeriodIndex is range-like in that all Periods\n between start and end are present, in order.\n \"\"\"\n if len(self) == 0:\n return True\n if not self.is_monotonic_increasing:\n raise ValueError(\"Index is not monotonic\")\n values = self.asi8\n return ((values[1:] - values[:-1]) < 2).all()\n\n @property\n def inferred_type(self) -> str:\n # b/c data is represented as ints make sure we can't have ambiguous\n # indexing\n return \"period\"\n\n # ------------------------------------------------------------------------\n # Indexing Methods\n\n def _convert_tolerance(self, tolerance, target):\n # Returned tolerance must be in dtype/units so that\n # `|self._get_engine_target() - target._engine_target()| <= tolerance`\n # is meaningful. Since PeriodIndex returns int64 for engine_target,\n # we may need to convert timedelta64 tolerance to int64.\n tolerance = super()._convert_tolerance(tolerance, target)\n\n if self.dtype == target.dtype:\n # convert tolerance to i8\n tolerance = self._maybe_convert_timedelta(tolerance)\n\n return tolerance\n\n def get_loc(self, key, method=None, tolerance=None):\n \"\"\"\n Get integer location for requested label.\n\n Parameters\n ----------\n key : Period, NaT, str, or datetime\n String or datetime key must be parsable as Period.\n\n Returns\n -------\n loc : int or ndarray[int64]\n\n Raises\n ------\n KeyError\n Key is not present in the index.\n TypeError\n If key is listlike or otherwise not hashable.\n \"\"\"\n orig_key = key\n\n if not is_scalar(key):\n raise InvalidIndexError(key)\n\n if is_valid_na_for_dtype(key, self.dtype):\n key = NaT\n\n elif isinstance(key, str):\n\n try:\n loc = self._get_string_slice(key)\n return loc\n except (TypeError, ValueError):\n pass\n\n try:\n asdt, reso_str = parse_time_string(key, self.freq)\n except (ValueError, DateParseError) as err:\n # A string with invalid format\n raise KeyError(f\"Cannot interpret '{key}' as period\") from err\n\n reso = Resolution.from_attrname(reso_str)\n grp = reso.freq_group.value\n freqn = self.dtype.freq_group_code\n\n # _get_string_slice will handle cases where grp < freqn\n assert grp >= freqn\n\n # BusinessDay is a bit strange. It has a *lower* code, but we never parse\n # a string as \"BusinessDay\" resolution, just Day.\n if grp == freqn or (\n reso == Resolution.RESO_DAY and self.dtype.freq.name == \"B\"\n ):\n key = Period(asdt, freq=self.freq)\n loc = self.get_loc(key, method=method, tolerance=tolerance)\n return loc\n elif method is None:\n raise KeyError(key)\n else:\n key = asdt\n\n elif isinstance(key, Period):\n sfreq = self.freq\n kfreq = key.freq\n if not (\n sfreq.n == kfreq.n\n and sfreq._period_dtype_code == kfreq._period_dtype_code\n ):\n # GH#42247 For the subset of DateOffsets that can be Period freqs,\n # checking these two attributes is sufficient to check equality,\n # and much more performant than `self.freq == key.freq`\n raise KeyError(key)\n elif isinstance(key, datetime):\n try:\n key = Period(key, freq=self.freq)\n except ValueError as err:\n # we cannot construct the Period\n raise KeyError(orig_key) from err\n else:\n # in particular integer, which Period constructor would cast to string\n raise KeyError(key)\n\n try:\n key = Period(key, freq=self.freq)\n except ValueError as err:\n # we cannot construct the Period\n raise KeyError(orig_key) from err\n\n try:\n return Index.get_loc(self, key, method, tolerance)\n except KeyError as err:\n raise KeyError(orig_key) from err\n\n def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):\n \"\"\"\n If label is a string or a datetime, cast it to Period.ordinal according\n to resolution.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'loc', 'getitem'}, or None\n\n Returns\n -------\n bound : Period or object\n\n Notes\n -----\n Value of `side` parameter should be validated in caller.\n\n \"\"\"\n assert kind in [\"loc\", \"getitem\", None, lib.no_default]\n self._deprecated_arg(kind, \"kind\", \"_maybe_cast_slice_bound\")\n\n if isinstance(label, datetime):\n return Period(label, freq=self.freq)\n elif isinstance(label, str):\n try:\n parsed, reso_str = parse_time_string(label, self.freq)\n except ValueError as err:\n # string cannot be parsed as datetime-like\n raise self._invalid_indexer(\"slice\", label) from err\n\n reso = Resolution.from_attrname(reso_str)\n lower, upper = self._parsed_string_to_bounds(reso, parsed)\n return lower if side == \"left\" else upper\n elif not isinstance(label, self._data._recognized_scalars):\n raise self._invalid_indexer(\"slice\", label)\n\n return label\n\n def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):\n grp = reso.freq_group\n iv = Period(parsed, freq=grp.value)\n return (iv.asfreq(self.freq, how=\"start\"), iv.asfreq(self.freq, how=\"end\"))\n\n def _validate_partial_date_slice(self, reso: Resolution):\n assert isinstance(reso, Resolution), (type(reso), reso)\n grp = reso.freq_group\n freqn = self.dtype.freq_group_code\n\n if not grp.value < freqn:\n # TODO: we used to also check for\n # reso in [\"day\", \"hour\", \"minute\", \"second\"]\n # why is that check not needed?\n raise ValueError\n\n def _get_string_slice(self, key: str):\n parsed, reso_str = parse_time_string(key, self.freq)\n reso = Resolution.from_attrname(reso_str)\n try:\n return self._partial_date_slice(reso, parsed)\n except KeyError as err:\n raise KeyError(key) from err\n\n\ndef period_range(\n start=None, end=None, periods: int | None = None, freq=None, name=None\n) -> PeriodIndex:\n \"\"\"\n Return a fixed frequency PeriodIndex.\n\n The day (calendar) is the default frequency.\n\n Parameters\n ----------\n start : str or period-like, default None\n Left bound for generating periods.\n end : str or period-like, default None\n Right bound for generating periods.\n periods : int, default None\n Number of periods to generate.\n freq : str or DateOffset, optional\n Frequency alias. By default the freq is taken from `start` or `end`\n if those are Period objects. Otherwise, the default is ``\"D\"`` for\n daily frequency.\n name : str, default None\n Name of the resulting PeriodIndex.\n\n Returns\n -------\n PeriodIndex\n\n Notes\n -----\n Of the three parameters: ``start``, ``end``, and ``periods``, exactly two\n must be specified.\n\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n >>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')\n PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05', '2017-06',\n '2017-07', '2017-08', '2017-09', '2017-10', '2017-11', '2017-12',\n '2018-01'],\n dtype='period[M]')\n\n If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor\n endpoints for a ``PeriodIndex`` with frequency matching that of the\n ``period_range`` constructor.\n\n >>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),\n ... end=pd.Period('2017Q2', freq='Q'), freq='M')\n PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],\n dtype='period[M]')\n \"\"\"\n if com.count_not_none(start, end, periods) != 2:\n raise ValueError(\n \"Of the three parameters: start, end, and periods, \"\n \"exactly two must be specified\"\n )\n if freq is None and (not isinstance(start, Period) and not isinstance(end, Period)):\n freq = \"D\"\n\n data, freq = PeriodArray._generate_range(start, end, periods, freq, fields={})\n data = PeriodArray(data, freq=freq)\n return PeriodIndex(data, name=name)\n",
"\"\"\"\ntest all other .agg behavior\n\"\"\"\n\nimport datetime as dt\nfrom functools import partial\n\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n PeriodIndex,\n Series,\n date_range,\n period_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.base import SpecificationError\n\nfrom pandas.io.formats.printing import pprint_thing\n\n\ndef test_agg_api():\n # GH 6337\n # https://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error\n # different api for agg when passed custom function with mixed frame\n\n df = DataFrame(\n {\n \"data1\": np.random.randn(5),\n \"data2\": np.random.randn(5),\n \"key1\": [\"a\", \"a\", \"b\", \"b\", \"a\"],\n \"key2\": [\"one\", \"two\", \"one\", \"two\", \"one\"],\n }\n )\n grouped = df.groupby(\"key1\")\n\n def peak_to_peak(arr):\n return arr.max() - arr.min()\n\n with tm.assert_produces_warning(\n FutureWarning, match=\"Dropping invalid\", check_stacklevel=False\n ):\n expected = grouped.agg([peak_to_peak])\n expected.columns = [\"data1\", \"data2\"]\n\n with tm.assert_produces_warning(\n FutureWarning, match=\"Dropping invalid\", check_stacklevel=False\n ):\n result = grouped.agg(peak_to_peak)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_datetimes_mixed():\n data = [[1, \"2012-01-01\", 1.0], [2, \"2012-01-02\", 2.0], [3, None, 3.0]]\n\n df1 = DataFrame(\n {\n \"key\": [x[0] for x in data],\n \"date\": [x[1] for x in data],\n \"value\": [x[2] for x in data],\n }\n )\n\n data = [\n [\n row[0],\n (dt.datetime.strptime(row[1], \"%Y-%m-%d\").date() if row[1] else None),\n row[2],\n ]\n for row in data\n ]\n\n df2 = DataFrame(\n {\n \"key\": [x[0] for x in data],\n \"date\": [x[1] for x in data],\n \"value\": [x[2] for x in data],\n }\n )\n\n df1[\"weights\"] = df1[\"value\"] / df1[\"value\"].sum()\n gb1 = df1.groupby(\"date\").aggregate(np.sum)\n\n df2[\"weights\"] = df1[\"value\"] / df1[\"value\"].sum()\n gb2 = df2.groupby(\"date\").aggregate(np.sum)\n\n assert len(gb1) == len(gb2)\n\n\ndef test_agg_period_index():\n prng = period_range(\"2012-1-1\", freq=\"M\", periods=3)\n df = DataFrame(np.random.randn(3, 2), index=prng)\n rs = df.groupby(level=0).sum()\n assert isinstance(rs.index, PeriodIndex)\n\n # GH 3579\n index = period_range(start=\"1999-01\", periods=5, freq=\"M\")\n s1 = Series(np.random.rand(len(index)), index=index)\n s2 = Series(np.random.rand(len(index)), index=index)\n df = DataFrame.from_dict({\"s1\": s1, \"s2\": s2})\n grouped = df.groupby(df.index.month)\n list(grouped)\n\n\ndef test_agg_dict_parameter_cast_result_dtypes():\n # GH 12821\n\n df = DataFrame(\n {\n \"class\": [\"A\", \"A\", \"B\", \"B\", \"C\", \"C\", \"D\", \"D\"],\n \"time\": date_range(\"1/1/2011\", periods=8, freq=\"H\"),\n }\n )\n df.loc[[0, 1, 2, 5], \"time\"] = None\n\n # test for `first` function\n exp = df.loc[[0, 3, 4, 6]].set_index(\"class\")\n grouped = df.groupby(\"class\")\n tm.assert_frame_equal(grouped.first(), exp)\n tm.assert_frame_equal(grouped.agg(\"first\"), exp)\n tm.assert_frame_equal(grouped.agg({\"time\": \"first\"}), exp)\n tm.assert_series_equal(grouped.time.first(), exp[\"time\"])\n tm.assert_series_equal(grouped.time.agg(\"first\"), exp[\"time\"])\n\n # test for `last` function\n exp = df.loc[[0, 3, 4, 7]].set_index(\"class\")\n grouped = df.groupby(\"class\")\n tm.assert_frame_equal(grouped.last(), exp)\n tm.assert_frame_equal(grouped.agg(\"last\"), exp)\n tm.assert_frame_equal(grouped.agg({\"time\": \"last\"}), exp)\n tm.assert_series_equal(grouped.time.last(), exp[\"time\"])\n tm.assert_series_equal(grouped.time.agg(\"last\"), exp[\"time\"])\n\n # count\n exp = Series([2, 2, 2, 2], index=Index(list(\"ABCD\"), name=\"class\"), name=\"time\")\n tm.assert_series_equal(grouped.time.agg(len), exp)\n tm.assert_series_equal(grouped.time.size(), exp)\n\n exp = Series([0, 1, 1, 2], index=Index(list(\"ABCD\"), name=\"class\"), name=\"time\")\n tm.assert_series_equal(grouped.time.count(), exp)\n\n\ndef test_agg_cast_results_dtypes():\n # similar to GH12821\n # xref #11444\n u = [dt.datetime(2015, x + 1, 1) for x in range(12)]\n v = list(\"aaabbbbbbccd\")\n df = DataFrame({\"X\": v, \"Y\": u})\n\n result = df.groupby(\"X\")[\"Y\"].agg(len)\n expected = df.groupby(\"X\")[\"Y\"].count()\n tm.assert_series_equal(result, expected)\n\n\ndef test_aggregate_float64_no_int64():\n # see gh-11199\n df = DataFrame({\"a\": [1, 2, 3, 4, 5], \"b\": [1, 2, 2, 4, 5], \"c\": [1, 2, 3, 4, 5]})\n\n expected = DataFrame({\"a\": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])\n expected.index.name = \"b\"\n\n result = df.groupby(\"b\")[[\"a\"]].mean()\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame({\"a\": [1, 2.5, 4, 5], \"c\": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])\n expected.index.name = \"b\"\n\n result = df.groupby(\"b\")[[\"a\", \"c\"]].mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_aggregate_api_consistency():\n # GH 9052\n # make sure that the aggregates via dict\n # are consistent\n df = DataFrame(\n {\n \"A\": [\"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"foo\"],\n \"B\": [\"one\", \"one\", \"two\", \"two\", \"two\", \"two\", \"one\", \"two\"],\n \"C\": np.random.randn(8) + 1.0,\n \"D\": np.arange(8),\n }\n )\n\n grouped = df.groupby([\"A\", \"B\"])\n c_mean = grouped[\"C\"].mean()\n c_sum = grouped[\"C\"].sum()\n d_mean = grouped[\"D\"].mean()\n d_sum = grouped[\"D\"].sum()\n\n result = grouped[\"D\"].agg([\"sum\", \"mean\"])\n expected = pd.concat([d_sum, d_mean], axis=1)\n expected.columns = [\"sum\", \"mean\"]\n tm.assert_frame_equal(result, expected, check_like=True)\n\n result = grouped.agg([np.sum, np.mean])\n expected = pd.concat([c_sum, c_mean, d_sum, d_mean], axis=1)\n expected.columns = MultiIndex.from_product([[\"C\", \"D\"], [\"sum\", \"mean\"]])\n tm.assert_frame_equal(result, expected, check_like=True)\n\n result = grouped[[\"D\", \"C\"]].agg([np.sum, np.mean])\n expected = pd.concat([d_sum, d_mean, c_sum, c_mean], axis=1)\n expected.columns = MultiIndex.from_product([[\"D\", \"C\"], [\"sum\", \"mean\"]])\n tm.assert_frame_equal(result, expected, check_like=True)\n\n result = grouped.agg({\"C\": \"mean\", \"D\": \"sum\"})\n expected = pd.concat([d_sum, c_mean], axis=1)\n tm.assert_frame_equal(result, expected, check_like=True)\n\n result = grouped.agg({\"C\": [\"mean\", \"sum\"], \"D\": [\"mean\", \"sum\"]})\n expected = pd.concat([c_mean, c_sum, d_mean, d_sum], axis=1)\n expected.columns = MultiIndex.from_product([[\"C\", \"D\"], [\"mean\", \"sum\"]])\n\n msg = r\"Column\\(s\\) \\['r', 'r2'\\] do not exist\"\n with pytest.raises(KeyError, match=msg):\n grouped[[\"D\", \"C\"]].agg({\"r\": np.sum, \"r2\": np.mean})\n\n\ndef test_agg_dict_renaming_deprecation():\n # 15931\n df = DataFrame({\"A\": [1, 1, 1, 2, 2], \"B\": range(5), \"C\": range(5)})\n\n msg = r\"nested renamer is not supported\"\n with pytest.raises(SpecificationError, match=msg):\n df.groupby(\"A\").agg(\n {\"B\": {\"foo\": [\"sum\", \"max\"]}, \"C\": {\"bar\": [\"count\", \"min\"]}}\n )\n\n msg = r\"Column\\(s\\) \\['ma'\\] do not exist\"\n with pytest.raises(KeyError, match=msg):\n df.groupby(\"A\")[[\"B\", \"C\"]].agg({\"ma\": \"max\"})\n\n msg = r\"nested renamer is not supported\"\n with pytest.raises(SpecificationError, match=msg):\n df.groupby(\"A\").B.agg({\"foo\": \"count\"})\n\n\ndef test_agg_compat():\n # GH 12334\n df = DataFrame(\n {\n \"A\": [\"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"foo\"],\n \"B\": [\"one\", \"one\", \"two\", \"two\", \"two\", \"two\", \"one\", \"two\"],\n \"C\": np.random.randn(8) + 1.0,\n \"D\": np.arange(8),\n }\n )\n\n g = df.groupby([\"A\", \"B\"])\n\n msg = r\"nested renamer is not supported\"\n with pytest.raises(SpecificationError, match=msg):\n g[\"D\"].agg({\"C\": [\"sum\", \"std\"]})\n\n with pytest.raises(SpecificationError, match=msg):\n g[\"D\"].agg({\"C\": \"sum\", \"D\": \"std\"})\n\n\ndef test_agg_nested_dicts():\n # API change for disallowing these types of nested dicts\n df = DataFrame(\n {\n \"A\": [\"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"foo\"],\n \"B\": [\"one\", \"one\", \"two\", \"two\", \"two\", \"two\", \"one\", \"two\"],\n \"C\": np.random.randn(8) + 1.0,\n \"D\": np.arange(8),\n }\n )\n\n g = df.groupby([\"A\", \"B\"])\n\n msg = r\"nested renamer is not supported\"\n with pytest.raises(SpecificationError, match=msg):\n g.aggregate({\"r1\": {\"C\": [\"mean\", \"sum\"]}, \"r2\": {\"D\": [\"mean\", \"sum\"]}})\n\n with pytest.raises(SpecificationError, match=msg):\n g.agg({\"C\": {\"ra\": [\"mean\", \"std\"]}, \"D\": {\"rb\": [\"mean\", \"std\"]}})\n\n # same name as the original column\n # GH9052\n with pytest.raises(SpecificationError, match=msg):\n g[\"D\"].agg({\"result1\": np.sum, \"result2\": np.mean})\n\n with pytest.raises(SpecificationError, match=msg):\n g[\"D\"].agg({\"D\": np.sum, \"result2\": np.mean})\n\n\ndef test_agg_item_by_item_raise_typeerror():\n df = DataFrame(np.random.randint(10, size=(20, 10)))\n\n def raiseException(df):\n pprint_thing(\"----------------------------------------\")\n pprint_thing(df.to_string())\n raise TypeError(\"test\")\n\n with pytest.raises(TypeError, match=\"test\"):\n with tm.assert_produces_warning(FutureWarning, match=\"Dropping invalid\"):\n df.groupby(0).agg(raiseException)\n\n\ndef test_series_agg_multikey():\n ts = tm.makeTimeSeries()\n grouped = ts.groupby([lambda x: x.year, lambda x: x.month])\n\n result = grouped.agg(np.sum)\n expected = grouped.sum()\n tm.assert_series_equal(result, expected)\n\n\ndef test_series_agg_multi_pure_python():\n data = DataFrame(\n {\n \"A\": [\n \"foo\",\n \"foo\",\n \"foo\",\n \"foo\",\n \"bar\",\n \"bar\",\n \"bar\",\n \"bar\",\n \"foo\",\n \"foo\",\n \"foo\",\n ],\n \"B\": [\n \"one\",\n \"one\",\n \"one\",\n \"two\",\n \"one\",\n \"one\",\n \"one\",\n \"two\",\n \"two\",\n \"two\",\n \"one\",\n ],\n \"C\": [\n \"dull\",\n \"dull\",\n \"shiny\",\n \"dull\",\n \"dull\",\n \"shiny\",\n \"shiny\",\n \"dull\",\n \"shiny\",\n \"shiny\",\n \"shiny\",\n ],\n \"D\": np.random.randn(11),\n \"E\": np.random.randn(11),\n \"F\": np.random.randn(11),\n }\n )\n\n def bad(x):\n assert len(x.values.base) > 0\n return \"foo\"\n\n result = data.groupby([\"A\", \"B\"]).agg(bad)\n expected = data.groupby([\"A\", \"B\"]).agg(lambda x: \"foo\")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_consistency():\n # agg with ([]) and () not consistent\n # GH 6715\n def P1(a):\n return np.percentile(a.dropna(), q=1)\n\n df = DataFrame(\n {\n \"col1\": [1, 2, 3, 4],\n \"col2\": [10, 25, 26, 31],\n \"date\": [\n dt.date(2013, 2, 10),\n dt.date(2013, 2, 10),\n dt.date(2013, 2, 11),\n dt.date(2013, 2, 11),\n ],\n }\n )\n\n g = df.groupby(\"date\")\n\n expected = g.agg([P1])\n expected.columns = expected.columns.levels[0]\n\n result = g.agg(P1)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_callables():\n # GH 7929\n df = DataFrame({\"foo\": [1, 2], \"bar\": [3, 4]}).astype(np.int64)\n\n class fn_class:\n def __call__(self, x):\n return sum(x)\n\n equiv_callables = [\n sum,\n np.sum,\n lambda x: sum(x),\n lambda x: x.sum(),\n partial(sum),\n fn_class(),\n ]\n\n expected = df.groupby(\"foo\").agg(sum)\n for ecall in equiv_callables:\n result = df.groupby(\"foo\").agg(ecall)\n tm.assert_frame_equal(result, expected)\n\n\[email protected]_array_manager_not_yet_implemented # TODO(ArrayManager) columns with ndarrays\ndef test_agg_over_numpy_arrays():\n # GH 3788\n df = DataFrame(\n [\n [1, np.array([10, 20, 30])],\n [1, np.array([40, 50, 60])],\n [2, np.array([20, 30, 40])],\n ],\n columns=[\"category\", \"arraydata\"],\n )\n gb = df.groupby(\"category\")\n\n expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]]\n expected_index = Index([1, 2], name=\"category\")\n expected_column = [\"arraydata\"]\n expected = DataFrame(expected_data, index=expected_index, columns=expected_column)\n\n alt = gb.sum(numeric_only=False)\n tm.assert_frame_equal(alt, expected)\n\n result = gb.agg(\"sum\", numeric_only=False)\n tm.assert_frame_equal(result, expected)\n\n # FIXME: the original version of this test called `gb.agg(sum)`\n # and that raises TypeError if `numeric_only=False` is passed\n\n\[email protected](\"as_period\", [True, False])\ndef test_agg_tzaware_non_datetime_result(as_period):\n # discussed in GH#29589, fixed in GH#29641, operating on tzaware values\n # with function that is not dtype-preserving\n dti = date_range(\"2012-01-01\", periods=4, tz=\"UTC\")\n if as_period:\n dti = dti.tz_localize(None).to_period(\"D\")\n\n df = DataFrame({\"a\": [0, 0, 1, 1], \"b\": dti})\n gb = df.groupby(\"a\")\n\n # Case that _does_ preserve the dtype\n result = gb[\"b\"].agg(lambda x: x.iloc[0])\n expected = Series(dti[::2], name=\"b\")\n expected.index.name = \"a\"\n tm.assert_series_equal(result, expected)\n\n # Cases that do _not_ preserve the dtype\n result = gb[\"b\"].agg(lambda x: x.iloc[0].year)\n expected = Series([2012, 2012], name=\"b\")\n expected.index.name = \"a\"\n tm.assert_series_equal(result, expected)\n\n result = gb[\"b\"].agg(lambda x: x.iloc[-1] - x.iloc[0])\n expected = Series([pd.Timedelta(days=1), pd.Timedelta(days=1)], name=\"b\")\n expected.index.name = \"a\"\n if as_period:\n expected = Series([pd.offsets.Day(1), pd.offsets.Day(1)], name=\"b\")\n expected.index.name = \"a\"\n tm.assert_series_equal(result, expected)\n\n\ndef test_agg_timezone_round_trip():\n # GH 15426\n ts = pd.Timestamp(\"2016-01-01 12:00:00\", tz=\"US/Pacific\")\n df = DataFrame({\"a\": 1, \"b\": [ts + dt.timedelta(minutes=nn) for nn in range(10)]})\n\n result1 = df.groupby(\"a\")[\"b\"].agg(np.min).iloc[0]\n result2 = df.groupby(\"a\")[\"b\"].agg(lambda x: np.min(x)).iloc[0]\n result3 = df.groupby(\"a\")[\"b\"].min().iloc[0]\n\n assert result1 == ts\n assert result2 == ts\n assert result3 == ts\n\n dates = [\n pd.Timestamp(f\"2016-01-0{i:d} 12:00:00\", tz=\"US/Pacific\") for i in range(1, 5)\n ]\n df = DataFrame({\"A\": [\"a\", \"b\"] * 2, \"B\": dates})\n grouped = df.groupby(\"A\")\n\n ts = df[\"B\"].iloc[0]\n assert ts == grouped.nth(0)[\"B\"].iloc[0]\n assert ts == grouped.head(1)[\"B\"].iloc[0]\n assert ts == grouped.first()[\"B\"].iloc[0]\n\n # GH#27110 applying iloc should return a DataFrame\n assert ts == grouped.apply(lambda x: x.iloc[0]).iloc[0, 1]\n\n ts = df[\"B\"].iloc[2]\n assert ts == grouped.last()[\"B\"].iloc[0]\n\n # GH#27110 applying iloc should return a DataFrame\n assert ts == grouped.apply(lambda x: x.iloc[-1]).iloc[0, 1]\n\n\ndef test_sum_uint64_overflow():\n # see gh-14758\n # Convert to uint64 and don't overflow\n df = DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object)\n df = df + 9223372036854775807\n\n index = Index(\n [9223372036854775808, 9223372036854775810, 9223372036854775812], dtype=np.uint64\n )\n expected = DataFrame(\n {1: [9223372036854775809, 9223372036854775811, 9223372036854775813]},\n index=index,\n )\n\n expected.index.name = 0\n result = df.groupby(0).sum(numeric_only=False)\n tm.assert_frame_equal(result, expected)\n\n # out column is non-numeric, so with numeric_only=True it is dropped\n result2 = df.groupby(0).sum(numeric_only=True)\n expected2 = expected[[]]\n tm.assert_frame_equal(result2, expected2)\n\n\[email protected](\n \"structure, expected\",\n [\n (tuple, DataFrame({\"C\": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})),\n (list, DataFrame({\"C\": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})),\n (\n lambda x: tuple(x),\n DataFrame({\"C\": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}}),\n ),\n (\n lambda x: list(x),\n DataFrame({\"C\": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}}),\n ),\n ],\n)\ndef test_agg_structs_dataframe(structure, expected):\n df = DataFrame(\n {\"A\": [1, 1, 1, 3, 3, 3], \"B\": [1, 1, 1, 4, 4, 4], \"C\": [1, 1, 1, 3, 4, 4]}\n )\n\n result = df.groupby([\"A\", \"B\"]).aggregate(structure)\n expected.index.names = [\"A\", \"B\"]\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"structure, expected\",\n [\n (tuple, Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name=\"C\")),\n (list, Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name=\"C\")),\n (lambda x: tuple(x), Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name=\"C\")),\n (lambda x: list(x), Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name=\"C\")),\n ],\n)\ndef test_agg_structs_series(structure, expected):\n # Issue #18079\n df = DataFrame(\n {\"A\": [1, 1, 1, 3, 3, 3], \"B\": [1, 1, 1, 4, 4, 4], \"C\": [1, 1, 1, 3, 4, 4]}\n )\n\n result = df.groupby(\"A\")[\"C\"].aggregate(structure)\n expected.index.name = \"A\"\n tm.assert_series_equal(result, expected)\n\n\ndef test_agg_category_nansum(observed):\n categories = [\"a\", \"b\", \"c\"]\n df = DataFrame(\n {\"A\": pd.Categorical([\"a\", \"a\", \"b\"], categories=categories), \"B\": [1, 2, 3]}\n )\n result = df.groupby(\"A\", observed=observed).B.agg(np.nansum)\n expected = Series(\n [3, 3, 0],\n index=pd.CategoricalIndex([\"a\", \"b\", \"c\"], categories=categories, name=\"A\"),\n name=\"B\",\n )\n if observed:\n expected = expected[expected != 0]\n tm.assert_series_equal(result, expected)\n\n\ndef test_agg_list_like_func():\n # GH 18473\n df = DataFrame({\"A\": [str(x) for x in range(3)], \"B\": [str(x) for x in range(3)]})\n grouped = df.groupby(\"A\", as_index=False, sort=False)\n result = grouped.agg({\"B\": lambda x: list(x)})\n expected = DataFrame(\n {\"A\": [str(x) for x in range(3)], \"B\": [[str(x)] for x in range(3)]}\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_lambda_with_timezone():\n # GH 23683\n df = DataFrame(\n {\n \"tag\": [1, 1],\n \"date\": [\n pd.Timestamp(\"2018-01-01\", tz=\"UTC\"),\n pd.Timestamp(\"2018-01-02\", tz=\"UTC\"),\n ],\n }\n )\n result = df.groupby(\"tag\").agg({\"date\": lambda e: e.head(1)})\n expected = DataFrame(\n [pd.Timestamp(\"2018-01-01\", tz=\"UTC\")],\n index=Index([1], name=\"tag\"),\n columns=[\"date\"],\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"err_cls\",\n [\n NotImplementedError,\n RuntimeError,\n KeyError,\n IndexError,\n OSError,\n ValueError,\n ArithmeticError,\n AttributeError,\n ],\n)\ndef test_groupby_agg_err_catching(err_cls):\n # make sure we suppress anything other than TypeError or AssertionError\n # in _python_agg_general\n\n # Use a non-standard EA to make sure we don't go down ndarray paths\n from pandas.tests.extension.decimal.array import (\n DecimalArray,\n make_data,\n to_decimal,\n )\n\n data = make_data()[:5]\n df = DataFrame(\n {\"id1\": [0, 0, 0, 1, 1], \"id2\": [0, 1, 0, 1, 1], \"decimals\": DecimalArray(data)}\n )\n\n expected = Series(to_decimal([data[0], data[3]]))\n\n def weird_func(x):\n # weird function that raise something other than TypeError or IndexError\n # in _python_agg_general\n if len(x) == 0:\n raise err_cls\n return x.iloc[0]\n\n result = df[\"decimals\"].groupby(df[\"id1\"]).agg(weird_func)\n tm.assert_series_equal(result, expected, check_names=False)\n",
"import pytest\n\nimport pandas as pd\nfrom pandas import MultiIndex\nimport pandas._testing as tm\n\n\ndef check_level_names(index, names):\n assert [level.name for level in index.levels] == list(names)\n\n\ndef test_slice_keep_name():\n x = MultiIndex.from_tuples([(\"a\", \"b\"), (1, 2), (\"c\", \"d\")], names=[\"x\", \"y\"])\n assert x[1:].names == x.names\n\n\ndef test_index_name_retained():\n # GH9857\n result = pd.DataFrame({\"x\": [1, 2, 6], \"y\": [2, 2, 8], \"z\": [-5, 0, 5]})\n result = result.set_index(\"z\")\n result.loc[10] = [9, 10]\n df_expected = pd.DataFrame(\n {\"x\": [1, 2, 6, 9], \"y\": [2, 2, 8, 10], \"z\": [-5, 0, 5, 10]}\n )\n df_expected = df_expected.set_index(\"z\")\n tm.assert_frame_equal(result, df_expected)\n\n\ndef test_changing_names(idx):\n assert [level.name for level in idx.levels] == [\"first\", \"second\"]\n\n view = idx.view()\n copy = idx.copy()\n shallow_copy = idx._view()\n\n # changing names should not change level names on object\n new_names = [name + \"a\" for name in idx.names]\n idx.names = new_names\n check_level_names(idx, [\"firsta\", \"seconda\"])\n\n # and not on copies\n check_level_names(view, [\"first\", \"second\"])\n check_level_names(copy, [\"first\", \"second\"])\n check_level_names(shallow_copy, [\"first\", \"second\"])\n\n # and copies shouldn't change original\n shallow_copy.names = [name + \"c\" for name in shallow_copy.names]\n check_level_names(idx, [\"firsta\", \"seconda\"])\n\n\ndef test_take_preserve_name(idx):\n taken = idx.take([3, 0, 1])\n assert taken.names == idx.names\n\n\ndef test_copy_names():\n # Check that adding a \"names\" parameter to the copy is honored\n # GH14302\n with tm.assert_produces_warning(FutureWarning):\n # subclass-specific kwargs to pd.Index\n multi_idx = pd.Index([(1, 2), (3, 4)], names=[\"MyName1\", \"MyName2\"])\n multi_idx1 = multi_idx.copy()\n\n assert multi_idx.equals(multi_idx1)\n assert multi_idx.names == [\"MyName1\", \"MyName2\"]\n assert multi_idx1.names == [\"MyName1\", \"MyName2\"]\n\n multi_idx2 = multi_idx.copy(names=[\"NewName1\", \"NewName2\"])\n\n assert multi_idx.equals(multi_idx2)\n assert multi_idx.names == [\"MyName1\", \"MyName2\"]\n assert multi_idx2.names == [\"NewName1\", \"NewName2\"]\n\n multi_idx3 = multi_idx.copy(name=[\"NewName1\", \"NewName2\"])\n\n assert multi_idx.equals(multi_idx3)\n assert multi_idx.names == [\"MyName1\", \"MyName2\"]\n assert multi_idx3.names == [\"NewName1\", \"NewName2\"]\n\n # gh-35592\n with pytest.raises(ValueError, match=\"Length of new names must be 2, got 1\"):\n multi_idx.copy(names=[\"mario\"])\n\n with pytest.raises(TypeError, match=\"MultiIndex.name must be a hashable type\"):\n multi_idx.copy(names=[[\"mario\"], [\"luigi\"]])\n\n\ndef test_names(idx, index_names):\n\n # names are assigned in setup\n assert index_names == [\"first\", \"second\"]\n level_names = [level.name for level in idx.levels]\n assert level_names == index_names\n\n # setting bad names on existing\n index = idx\n with pytest.raises(ValueError, match=\"^Length of names\"):\n setattr(index, \"names\", list(index.names) + [\"third\"])\n with pytest.raises(ValueError, match=\"^Length of names\"):\n setattr(index, \"names\", [])\n\n # initializing with bad names (should always be equivalent)\n major_axis, minor_axis = idx.levels\n major_codes, minor_codes = idx.codes\n with pytest.raises(ValueError, match=\"^Length of names\"):\n MultiIndex(\n levels=[major_axis, minor_axis],\n codes=[major_codes, minor_codes],\n names=[\"first\"],\n )\n with pytest.raises(ValueError, match=\"^Length of names\"):\n MultiIndex(\n levels=[major_axis, minor_axis],\n codes=[major_codes, minor_codes],\n names=[\"first\", \"second\", \"third\"],\n )\n\n # names are assigned on index, but not transferred to the levels\n index.names = [\"a\", \"b\"]\n level_names = [level.name for level in index.levels]\n assert level_names == [\"a\", \"b\"]\n\n\ndef test_duplicate_level_names_access_raises(idx):\n # GH19029\n idx.names = [\"foo\", \"foo\"]\n with pytest.raises(ValueError, match=\"name foo occurs multiple times\"):\n idx._get_level_number(\"foo\")\n\n\ndef test_get_names_from_levels():\n idx = MultiIndex.from_product([[\"a\"], [1, 2]], names=[\"a\", \"b\"])\n\n assert idx.levels[0].name == \"a\"\n assert idx.levels[1].name == \"b\"\n\n\ndef test_setting_names_from_levels_raises():\n idx = MultiIndex.from_product([[\"a\"], [1, 2]], names=[\"a\", \"b\"])\n with pytest.raises(RuntimeError, match=\"set_names\"):\n idx.levels[0].name = \"foo\"\n\n with pytest.raises(RuntimeError, match=\"set_names\"):\n idx.levels[1].name = \"foo\"\n\n new = pd.Series(1, index=idx.levels[0])\n with pytest.raises(RuntimeError, match=\"set_names\"):\n new.index.name = \"bar\"\n\n assert pd.Index._no_setting_name is False\n assert pd.Int64Index._no_setting_name is False\n assert pd.RangeIndex._no_setting_name is False\n\n\[email protected](\"func\", [\"rename\", \"set_names\"])\[email protected](\n \"rename_dict, exp_names\",\n [\n ({\"x\": \"z\"}, [\"z\", \"y\", \"z\"]),\n ({\"x\": \"z\", \"y\": \"x\"}, [\"z\", \"x\", \"z\"]),\n ({\"y\": \"z\"}, [\"x\", \"z\", \"x\"]),\n ({}, [\"x\", \"y\", \"x\"]),\n ({\"z\": \"a\"}, [\"x\", \"y\", \"x\"]),\n ({\"y\": \"z\", \"a\": \"b\"}, [\"x\", \"z\", \"x\"]),\n ],\n)\ndef test_name_mi_with_dict_like_duplicate_names(func, rename_dict, exp_names):\n # GH#20421\n mi = MultiIndex.from_arrays([[1, 2], [3, 4], [5, 6]], names=[\"x\", \"y\", \"x\"])\n result = getattr(mi, func)(rename_dict)\n expected = MultiIndex.from_arrays([[1, 2], [3, 4], [5, 6]], names=exp_names)\n tm.assert_index_equal(result, expected)\n\n\[email protected](\"func\", [\"rename\", \"set_names\"])\[email protected](\n \"rename_dict, exp_names\",\n [\n ({\"x\": \"z\"}, [\"z\", \"y\"]),\n ({\"x\": \"z\", \"y\": \"x\"}, [\"z\", \"x\"]),\n ({\"a\": \"z\"}, [\"x\", \"y\"]),\n ({}, [\"x\", \"y\"]),\n ],\n)\ndef test_name_mi_with_dict_like(func, rename_dict, exp_names):\n # GH#20421\n mi = MultiIndex.from_arrays([[1, 2], [3, 4]], names=[\"x\", \"y\"])\n result = getattr(mi, func)(rename_dict)\n expected = MultiIndex.from_arrays([[1, 2], [3, 4]], names=exp_names)\n tm.assert_index_equal(result, expected)\n\n\ndef test_index_name_with_dict_like_raising():\n # GH#20421\n ix = pd.Index([1, 2])\n msg = \"Can only pass dict-like as `names` for MultiIndex.\"\n with pytest.raises(TypeError, match=msg):\n ix.set_names({\"x\": \"z\"})\n\n\ndef test_multiindex_name_and_level_raising():\n # GH#20421\n mi = MultiIndex.from_arrays([[1, 2], [3, 4]], names=[\"x\", \"y\"])\n with pytest.raises(TypeError, match=\"Can not pass level for dictlike `names`.\"):\n mi.set_names(names={\"x\": \"z\"}, level={\"x\": \"z\"})\n",
"import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.tests.extension.base.base import BaseExtensionTests\n\n\nclass BaseSetitemTests(BaseExtensionTests):\n @pytest.fixture(\n params=[\n lambda x: x.index,\n lambda x: list(x.index),\n lambda x: slice(None),\n lambda x: slice(0, len(x)),\n lambda x: range(len(x)),\n lambda x: list(range(len(x))),\n lambda x: np.ones(len(x), dtype=bool),\n ],\n ids=[\n \"index\",\n \"list[index]\",\n \"null_slice\",\n \"full_slice\",\n \"range\",\n \"list(range)\",\n \"mask\",\n ],\n )\n def full_indexer(self, request):\n \"\"\"\n Fixture for an indexer to pass to obj.loc to get/set the full length of the\n object.\n\n In some cases, assumes that obj.index is the default RangeIndex.\n \"\"\"\n return request.param\n\n def test_setitem_scalar_series(self, data, box_in_series):\n if box_in_series:\n data = pd.Series(data)\n data[0] = data[1]\n assert data[0] == data[1]\n\n def test_setitem_sequence(self, data, box_in_series):\n if box_in_series:\n data = pd.Series(data)\n original = data.copy()\n\n data[[0, 1]] = [data[1], data[0]]\n assert data[0] == original[1]\n assert data[1] == original[0]\n\n def test_setitem_sequence_mismatched_length_raises(self, data, as_array):\n ser = pd.Series(data)\n original = ser.copy()\n value = [data[0]]\n if as_array:\n value = data._from_sequence(value)\n\n xpr = \"cannot set using a {} indexer with a different length\"\n with pytest.raises(ValueError, match=xpr.format(\"list-like\")):\n ser[[0, 1]] = value\n # Ensure no modifications made before the exception\n self.assert_series_equal(ser, original)\n\n with pytest.raises(ValueError, match=xpr.format(\"slice\")):\n ser[slice(3)] = value\n self.assert_series_equal(ser, original)\n\n def test_setitem_empty_indexer(self, data, box_in_series):\n if box_in_series:\n data = pd.Series(data)\n original = data.copy()\n data[np.array([], dtype=int)] = []\n self.assert_equal(data, original)\n\n def test_setitem_sequence_broadcasts(self, data, box_in_series):\n if box_in_series:\n data = pd.Series(data)\n data[[0, 1]] = data[2]\n assert data[0] == data[2]\n assert data[1] == data[2]\n\n @pytest.mark.parametrize(\"setter\", [\"loc\", \"iloc\"])\n def test_setitem_scalar(self, data, setter):\n arr = pd.Series(data)\n setter = getattr(arr, setter)\n setter[0] = data[1]\n assert arr[0] == data[1]\n\n def test_setitem_loc_scalar_mixed(self, data):\n df = pd.DataFrame({\"A\": np.arange(len(data)), \"B\": data})\n df.loc[0, \"B\"] = data[1]\n assert df.loc[0, \"B\"] == data[1]\n\n def test_setitem_loc_scalar_single(self, data):\n df = pd.DataFrame({\"B\": data})\n df.loc[10, \"B\"] = data[1]\n assert df.loc[10, \"B\"] == data[1]\n\n def test_setitem_loc_scalar_multiple_homogoneous(self, data):\n df = pd.DataFrame({\"A\": data, \"B\": data})\n df.loc[10, \"B\"] = data[1]\n assert df.loc[10, \"B\"] == data[1]\n\n def test_setitem_iloc_scalar_mixed(self, data):\n df = pd.DataFrame({\"A\": np.arange(len(data)), \"B\": data})\n df.iloc[0, 1] = data[1]\n assert df.loc[0, \"B\"] == data[1]\n\n def test_setitem_iloc_scalar_single(self, data):\n df = pd.DataFrame({\"B\": data})\n df.iloc[10, 0] = data[1]\n assert df.loc[10, \"B\"] == data[1]\n\n def test_setitem_iloc_scalar_multiple_homogoneous(self, data):\n df = pd.DataFrame({\"A\": data, \"B\": data})\n df.iloc[10, 1] = data[1]\n assert df.loc[10, \"B\"] == data[1]\n\n @pytest.mark.parametrize(\n \"mask\",\n [\n np.array([True, True, True, False, False]),\n pd.array([True, True, True, False, False], dtype=\"boolean\"),\n pd.array([True, True, True, pd.NA, pd.NA], dtype=\"boolean\"),\n ],\n ids=[\"numpy-array\", \"boolean-array\", \"boolean-array-na\"],\n )\n def test_setitem_mask(self, data, mask, box_in_series):\n arr = data[:5].copy()\n expected = arr.take([0, 0, 0, 3, 4])\n if box_in_series:\n arr = pd.Series(arr)\n expected = pd.Series(expected)\n arr[mask] = data[0]\n self.assert_equal(expected, arr)\n\n def test_setitem_mask_raises(self, data, box_in_series):\n # wrong length\n mask = np.array([True, False])\n\n if box_in_series:\n data = pd.Series(data)\n\n with pytest.raises(IndexError, match=\"wrong length\"):\n data[mask] = data[0]\n\n mask = pd.array(mask, dtype=\"boolean\")\n with pytest.raises(IndexError, match=\"wrong length\"):\n data[mask] = data[0]\n\n def test_setitem_mask_boolean_array_with_na(self, data, box_in_series):\n mask = pd.array(np.zeros(data.shape, dtype=\"bool\"), dtype=\"boolean\")\n mask[:3] = True\n mask[3:5] = pd.NA\n\n if box_in_series:\n data = pd.Series(data)\n\n data[mask] = data[0]\n\n assert (data[:3] == data[0]).all()\n\n @pytest.mark.parametrize(\n \"idx\",\n [[0, 1, 2], pd.array([0, 1, 2], dtype=\"Int64\"), np.array([0, 1, 2])],\n ids=[\"list\", \"integer-array\", \"numpy-array\"],\n )\n def test_setitem_integer_array(self, data, idx, box_in_series):\n arr = data[:5].copy()\n expected = data.take([0, 0, 0, 3, 4])\n\n if box_in_series:\n arr = pd.Series(arr)\n expected = pd.Series(expected)\n\n arr[idx] = arr[0]\n self.assert_equal(arr, expected)\n\n @pytest.mark.parametrize(\n \"idx, box_in_series\",\n [\n ([0, 1, 2, pd.NA], False),\n pytest.param(\n [0, 1, 2, pd.NA], True, marks=pytest.mark.xfail(reason=\"GH-31948\")\n ),\n (pd.array([0, 1, 2, pd.NA], dtype=\"Int64\"), False),\n (pd.array([0, 1, 2, pd.NA], dtype=\"Int64\"), False),\n ],\n ids=[\"list-False\", \"list-True\", \"integer-array-False\", \"integer-array-True\"],\n )\n def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series):\n arr = data.copy()\n\n # TODO(xfail) this raises KeyError about labels not found (it tries label-based)\n # for list of labels with Series\n if box_in_series:\n arr = pd.Series(data, index=[tm.rands(4) for _ in range(len(data))])\n\n msg = \"Cannot index with an integer indexer containing NA values\"\n with pytest.raises(ValueError, match=msg):\n arr[idx] = arr[0]\n\n @pytest.mark.parametrize(\"as_callable\", [True, False])\n @pytest.mark.parametrize(\"setter\", [\"loc\", None])\n def test_setitem_mask_aligned(self, data, as_callable, setter):\n ser = pd.Series(data)\n mask = np.zeros(len(data), dtype=bool)\n mask[:2] = True\n\n if as_callable:\n mask2 = lambda x: mask\n else:\n mask2 = mask\n\n if setter:\n # loc\n target = getattr(ser, setter)\n else:\n # Series.__setitem__\n target = ser\n\n target[mask2] = data[5:7]\n\n ser[mask2] = data[5:7]\n assert ser[0] == data[5]\n assert ser[1] == data[6]\n\n @pytest.mark.parametrize(\"setter\", [\"loc\", None])\n def test_setitem_mask_broadcast(self, data, setter):\n ser = pd.Series(data)\n mask = np.zeros(len(data), dtype=bool)\n mask[:2] = True\n\n if setter: # loc\n target = getattr(ser, setter)\n else: # __setitem__\n target = ser\n\n target[mask] = data[10]\n assert ser[0] == data[10]\n assert ser[1] == data[10]\n\n def test_setitem_expand_columns(self, data):\n df = pd.DataFrame({\"A\": data})\n result = df.copy()\n result[\"B\"] = 1\n expected = pd.DataFrame({\"A\": data, \"B\": [1] * len(data)})\n self.assert_frame_equal(result, expected)\n\n result = df.copy()\n result.loc[:, \"B\"] = 1\n self.assert_frame_equal(result, expected)\n\n # overwrite with new type\n result[\"B\"] = data\n expected = pd.DataFrame({\"A\": data, \"B\": data})\n self.assert_frame_equal(result, expected)\n\n def test_setitem_expand_with_extension(self, data):\n df = pd.DataFrame({\"A\": [1] * len(data)})\n result = df.copy()\n result[\"B\"] = data\n expected = pd.DataFrame({\"A\": [1] * len(data), \"B\": data})\n self.assert_frame_equal(result, expected)\n\n result = df.copy()\n result.loc[:, \"B\"] = data\n self.assert_frame_equal(result, expected)\n\n def test_setitem_frame_invalid_length(self, data):\n df = pd.DataFrame({\"A\": [1] * len(data)})\n xpr = (\n rf\"Length of values \\({len(data[:5])}\\) \"\n rf\"does not match length of index \\({len(df)}\\)\"\n )\n with pytest.raises(ValueError, match=xpr):\n df[\"B\"] = data[:5]\n\n def test_setitem_tuple_index(self, data):\n ser = pd.Series(data[:2], index=[(0, 0), (0, 1)])\n expected = pd.Series(data.take([1, 1]), index=ser.index)\n ser[(0, 0)] = data[1]\n self.assert_series_equal(ser, expected)\n\n def test_setitem_slice(self, data, box_in_series):\n arr = data[:5].copy()\n expected = data.take([0, 0, 0, 3, 4])\n if box_in_series:\n arr = pd.Series(arr)\n expected = pd.Series(expected)\n\n arr[:3] = data[0]\n self.assert_equal(arr, expected)\n\n def test_setitem_loc_iloc_slice(self, data):\n arr = data[:5].copy()\n s = pd.Series(arr, index=[\"a\", \"b\", \"c\", \"d\", \"e\"])\n expected = pd.Series(data.take([0, 0, 0, 3, 4]), index=s.index)\n\n result = s.copy()\n result.iloc[:3] = data[0]\n self.assert_equal(result, expected)\n\n result = s.copy()\n result.loc[:\"c\"] = data[0]\n self.assert_equal(result, expected)\n\n def test_setitem_slice_mismatch_length_raises(self, data):\n arr = data[:5]\n with pytest.raises(ValueError):\n arr[:1] = arr[:2]\n\n def test_setitem_slice_array(self, data):\n arr = data[:5].copy()\n arr[:5] = data[-5:]\n self.assert_extension_array_equal(arr, data[-5:])\n\n def test_setitem_scalar_key_sequence_raise(self, data):\n arr = data[:5].copy()\n with pytest.raises(ValueError):\n arr[0] = arr[[0, 1]]\n\n def test_setitem_preserves_views(self, data):\n # GH#28150 setitem shouldn't swap the underlying data\n view1 = data.view()\n view2 = data[:]\n\n data[0] = data[1]\n assert view1[0] == data[1]\n assert view2[0] == data[1]\n\n def test_setitem_with_expansion_dataframe_column(self, data, full_indexer):\n # https://github.com/pandas-dev/pandas/issues/32395\n df = expected = pd.DataFrame({\"data\": pd.Series(data)})\n result = pd.DataFrame(index=df.index)\n\n key = full_indexer(df)\n result.loc[key, \"data\"] = df[\"data\"]\n\n self.assert_frame_equal(result, expected)\n\n def test_setitem_series(self, data, full_indexer):\n # https://github.com/pandas-dev/pandas/issues/32395\n ser = pd.Series(data, name=\"data\")\n result = pd.Series(index=ser.index, dtype=object, name=\"data\")\n\n # because result has object dtype, the attempt to do setting inplace\n # is successful, and object dtype is retained\n key = full_indexer(ser)\n result.loc[key] = ser\n\n expected = pd.Series(\n data.astype(object), index=ser.index, name=\"data\", dtype=object\n )\n self.assert_series_equal(result, expected)\n\n def test_delitem_series(self, data):\n # GH#40763\n ser = pd.Series(data, name=\"data\")\n\n taker = np.arange(len(ser))\n taker = np.delete(taker, 1)\n\n expected = ser[taker]\n del ser[1]\n self.assert_series_equal(ser, expected)\n",
"\"\"\"\ntest date_range, bdate_range construction from the convenience range functions\n\"\"\"\n\nfrom datetime import (\n datetime,\n time,\n timedelta,\n)\n\nimport numpy as np\nimport pytest\nimport pytz\nfrom pytz import timezone\n\nfrom pandas._libs.tslibs import timezones\nfrom pandas._libs.tslibs.offsets import (\n BDay,\n CDay,\n DateOffset,\n MonthEnd,\n prefix_mapping,\n)\nfrom pandas.errors import OutOfBoundsDatetime\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n DatetimeIndex,\n Timedelta,\n Timestamp,\n bdate_range,\n date_range,\n offsets,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays.datetimes import generate_range\n\nSTART, END = datetime(2009, 1, 1), datetime(2010, 1, 1)\n\n\nclass TestTimestampEquivDateRange:\n # Older tests in TestTimeSeries constructed their `stamp` objects\n # using `date_range` instead of the `Timestamp` constructor.\n # TestTimestampEquivDateRange checks that these are equivalent in the\n # pertinent cases.\n\n def test_date_range_timestamp_equiv(self):\n rng = date_range(\"20090415\", \"20090519\", tz=\"US/Eastern\")\n stamp = rng[0]\n\n ts = Timestamp(\"20090415\", tz=\"US/Eastern\")\n assert ts == stamp\n\n def test_date_range_timestamp_equiv_dateutil(self):\n rng = date_range(\"20090415\", \"20090519\", tz=\"dateutil/US/Eastern\")\n stamp = rng[0]\n\n ts = Timestamp(\"20090415\", tz=\"dateutil/US/Eastern\")\n assert ts == stamp\n\n def test_date_range_timestamp_equiv_explicit_pytz(self):\n rng = date_range(\"20090415\", \"20090519\", tz=pytz.timezone(\"US/Eastern\"))\n stamp = rng[0]\n\n ts = Timestamp(\"20090415\", tz=pytz.timezone(\"US/Eastern\"))\n assert ts == stamp\n\n @td.skip_if_windows_python_3\n def test_date_range_timestamp_equiv_explicit_dateutil(self):\n from pandas._libs.tslibs.timezones import dateutil_gettz as gettz\n\n rng = date_range(\"20090415\", \"20090519\", tz=gettz(\"US/Eastern\"))\n stamp = rng[0]\n\n ts = Timestamp(\"20090415\", tz=gettz(\"US/Eastern\"))\n assert ts == stamp\n\n def test_date_range_timestamp_equiv_from_datetime_instance(self):\n datetime_instance = datetime(2014, 3, 4)\n # build a timestamp with a frequency, since then it supports\n # addition/subtraction of integers\n timestamp_instance = date_range(datetime_instance, periods=1, freq=\"D\")[0]\n\n ts = Timestamp(datetime_instance)\n assert ts == timestamp_instance\n\n def test_date_range_timestamp_equiv_preserve_frequency(self):\n timestamp_instance = date_range(\"2014-03-05\", periods=1, freq=\"D\")[0]\n ts = Timestamp(\"2014-03-05\")\n\n assert timestamp_instance == ts\n\n\nclass TestDateRanges:\n def test_date_range_near_implementation_bound(self):\n # GH#???\n freq = Timedelta(1)\n\n with pytest.raises(OutOfBoundsDatetime, match=\"Cannot generate range with\"):\n date_range(end=Timestamp.min, periods=2, freq=freq)\n\n def test_date_range_nat(self):\n # GH#11587\n msg = \"Neither `start` nor `end` can be NaT\"\n with pytest.raises(ValueError, match=msg):\n date_range(start=\"2016-01-01\", end=pd.NaT, freq=\"D\")\n with pytest.raises(ValueError, match=msg):\n date_range(start=pd.NaT, end=\"2016-01-01\", freq=\"D\")\n\n def test_date_range_multiplication_overflow(self):\n # GH#24255\n # check that overflows in calculating `addend = periods * stride`\n # are caught\n with tm.assert_produces_warning(None):\n # we should _not_ be seeing a overflow RuntimeWarning\n dti = date_range(start=\"1677-09-22\", periods=213503, freq=\"D\")\n\n assert dti[0] == Timestamp(\"1677-09-22\")\n assert len(dti) == 213503\n\n msg = \"Cannot generate range with\"\n with pytest.raises(OutOfBoundsDatetime, match=msg):\n date_range(\"1969-05-04\", periods=200000000, freq=\"30000D\")\n\n def test_date_range_unsigned_overflow_handling(self):\n # GH#24255\n # case where `addend = periods * stride` overflows int64 bounds\n # but not uint64 bounds\n dti = date_range(start=\"1677-09-22\", end=\"2262-04-11\", freq=\"D\")\n\n dti2 = date_range(start=dti[0], periods=len(dti), freq=\"D\")\n assert dti2.equals(dti)\n\n dti3 = date_range(end=dti[-1], periods=len(dti), freq=\"D\")\n assert dti3.equals(dti)\n\n def test_date_range_int64_overflow_non_recoverable(self):\n # GH#24255\n # case with start later than 1970-01-01, overflow int64 but not uint64\n msg = \"Cannot generate range with\"\n with pytest.raises(OutOfBoundsDatetime, match=msg):\n date_range(start=\"1970-02-01\", periods=106752 * 24, freq=\"H\")\n\n # case with end before 1970-01-01, overflow int64 but not uint64\n with pytest.raises(OutOfBoundsDatetime, match=msg):\n date_range(end=\"1969-11-14\", periods=106752 * 24, freq=\"H\")\n\n @pytest.mark.slow\n def test_date_range_int64_overflow_stride_endpoint_different_signs(self):\n # cases where stride * periods overflow int64 and stride/endpoint\n # have different signs\n start = Timestamp(\"2262-02-23\")\n end = Timestamp(\"1969-11-14\")\n\n expected = date_range(start=start, end=end, freq=\"-1H\")\n assert expected[0] == start\n assert expected[-1] == end\n\n dti = date_range(end=end, periods=len(expected), freq=\"-1H\")\n tm.assert_index_equal(dti, expected)\n\n start2 = Timestamp(\"1970-02-01\")\n end2 = Timestamp(\"1677-10-22\")\n\n expected2 = date_range(start=start2, end=end2, freq=\"-1H\")\n assert expected2[0] == start2\n assert expected2[-1] == end2\n\n dti2 = date_range(start=start2, periods=len(expected2), freq=\"-1H\")\n tm.assert_index_equal(dti2, expected2)\n\n def test_date_range_out_of_bounds(self):\n # GH#14187\n msg = \"Cannot generate range\"\n with pytest.raises(OutOfBoundsDatetime, match=msg):\n date_range(\"2016-01-01\", periods=100000, freq=\"D\")\n with pytest.raises(OutOfBoundsDatetime, match=msg):\n date_range(end=\"1763-10-12\", periods=100000, freq=\"D\")\n\n def test_date_range_gen_error(self):\n rng = date_range(\"1/1/2000 00:00\", \"1/1/2000 00:18\", freq=\"5min\")\n assert len(rng) == 4\n\n @pytest.mark.parametrize(\"freq\", [\"AS\", \"YS\"])\n def test_begin_year_alias(self, freq):\n # see gh-9313\n rng = date_range(\"1/1/2013\", \"7/1/2017\", freq=freq)\n exp = DatetimeIndex(\n [\"2013-01-01\", \"2014-01-01\", \"2015-01-01\", \"2016-01-01\", \"2017-01-01\"],\n freq=freq,\n )\n tm.assert_index_equal(rng, exp)\n\n @pytest.mark.parametrize(\"freq\", [\"A\", \"Y\"])\n def test_end_year_alias(self, freq):\n # see gh-9313\n rng = date_range(\"1/1/2013\", \"7/1/2017\", freq=freq)\n exp = DatetimeIndex(\n [\"2013-12-31\", \"2014-12-31\", \"2015-12-31\", \"2016-12-31\"], freq=freq\n )\n tm.assert_index_equal(rng, exp)\n\n @pytest.mark.parametrize(\"freq\", [\"BA\", \"BY\"])\n def test_business_end_year_alias(self, freq):\n # see gh-9313\n rng = date_range(\"1/1/2013\", \"7/1/2017\", freq=freq)\n exp = DatetimeIndex(\n [\"2013-12-31\", \"2014-12-31\", \"2015-12-31\", \"2016-12-30\"], freq=freq\n )\n tm.assert_index_equal(rng, exp)\n\n def test_date_range_negative_freq(self):\n # GH 11018\n rng = date_range(\"2011-12-31\", freq=\"-2A\", periods=3)\n exp = DatetimeIndex([\"2011-12-31\", \"2009-12-31\", \"2007-12-31\"], freq=\"-2A\")\n tm.assert_index_equal(rng, exp)\n assert rng.freq == \"-2A\"\n\n rng = date_range(\"2011-01-31\", freq=\"-2M\", periods=3)\n exp = DatetimeIndex([\"2011-01-31\", \"2010-11-30\", \"2010-09-30\"], freq=\"-2M\")\n tm.assert_index_equal(rng, exp)\n assert rng.freq == \"-2M\"\n\n def test_date_range_bms_bug(self):\n # #1645\n rng = date_range(\"1/1/2000\", periods=10, freq=\"BMS\")\n\n ex_first = Timestamp(\"2000-01-03\")\n assert rng[0] == ex_first\n\n def test_date_range_normalize(self):\n snap = datetime.today()\n n = 50\n\n rng = date_range(snap, periods=n, normalize=False, freq=\"2D\")\n\n offset = timedelta(2)\n values = DatetimeIndex([snap + i * offset for i in range(n)], freq=offset)\n\n tm.assert_index_equal(rng, values)\n\n rng = date_range(\"1/1/2000 08:15\", periods=n, normalize=False, freq=\"B\")\n the_time = time(8, 15)\n for val in rng:\n assert val.time() == the_time\n\n def test_date_range_fy5252(self):\n dr = date_range(\n start=\"2013-01-01\",\n periods=2,\n freq=offsets.FY5253(startingMonth=1, weekday=3, variation=\"nearest\"),\n )\n assert dr[0] == Timestamp(\"2013-01-31\")\n assert dr[1] == Timestamp(\"2014-01-30\")\n\n def test_date_range_ambiguous_arguments(self):\n # #2538\n start = datetime(2011, 1, 1, 5, 3, 40)\n end = datetime(2011, 1, 1, 8, 9, 40)\n\n msg = (\n \"Of the four parameters: start, end, periods, and \"\n \"freq, exactly three must be specified\"\n )\n with pytest.raises(ValueError, match=msg):\n date_range(start, end, periods=10, freq=\"s\")\n\n def test_date_range_convenience_periods(self):\n # GH 20808\n result = date_range(\"2018-04-24\", \"2018-04-27\", periods=3)\n expected = DatetimeIndex(\n [\"2018-04-24 00:00:00\", \"2018-04-25 12:00:00\", \"2018-04-27 00:00:00\"],\n freq=None,\n )\n\n tm.assert_index_equal(result, expected)\n\n # Test if spacing remains linear if tz changes to dst in range\n result = date_range(\n \"2018-04-01 01:00:00\",\n \"2018-04-01 04:00:00\",\n tz=\"Australia/Sydney\",\n periods=3,\n )\n expected = DatetimeIndex(\n [\n Timestamp(\"2018-04-01 01:00:00+1100\", tz=\"Australia/Sydney\"),\n Timestamp(\"2018-04-01 02:00:00+1000\", tz=\"Australia/Sydney\"),\n Timestamp(\"2018-04-01 04:00:00+1000\", tz=\"Australia/Sydney\"),\n ]\n )\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"start,end,result_tz\",\n [\n [\"20180101\", \"20180103\", \"US/Eastern\"],\n [datetime(2018, 1, 1), datetime(2018, 1, 3), \"US/Eastern\"],\n [Timestamp(\"20180101\"), Timestamp(\"20180103\"), \"US/Eastern\"],\n [\n Timestamp(\"20180101\", tz=\"US/Eastern\"),\n Timestamp(\"20180103\", tz=\"US/Eastern\"),\n \"US/Eastern\",\n ],\n [\n Timestamp(\"20180101\", tz=\"US/Eastern\"),\n Timestamp(\"20180103\", tz=\"US/Eastern\"),\n None,\n ],\n ],\n )\n def test_date_range_linspacing_tz(self, start, end, result_tz):\n # GH 20983\n result = date_range(start, end, periods=3, tz=result_tz)\n expected = date_range(\"20180101\", periods=3, freq=\"D\", tz=\"US/Eastern\")\n tm.assert_index_equal(result, expected)\n\n def test_date_range_businesshour(self):\n idx = DatetimeIndex(\n [\n \"2014-07-04 09:00\",\n \"2014-07-04 10:00\",\n \"2014-07-04 11:00\",\n \"2014-07-04 12:00\",\n \"2014-07-04 13:00\",\n \"2014-07-04 14:00\",\n \"2014-07-04 15:00\",\n \"2014-07-04 16:00\",\n ],\n freq=\"BH\",\n )\n rng = date_range(\"2014-07-04 09:00\", \"2014-07-04 16:00\", freq=\"BH\")\n tm.assert_index_equal(idx, rng)\n\n idx = DatetimeIndex([\"2014-07-04 16:00\", \"2014-07-07 09:00\"], freq=\"BH\")\n rng = date_range(\"2014-07-04 16:00\", \"2014-07-07 09:00\", freq=\"BH\")\n tm.assert_index_equal(idx, rng)\n\n idx = DatetimeIndex(\n [\n \"2014-07-04 09:00\",\n \"2014-07-04 10:00\",\n \"2014-07-04 11:00\",\n \"2014-07-04 12:00\",\n \"2014-07-04 13:00\",\n \"2014-07-04 14:00\",\n \"2014-07-04 15:00\",\n \"2014-07-04 16:00\",\n \"2014-07-07 09:00\",\n \"2014-07-07 10:00\",\n \"2014-07-07 11:00\",\n \"2014-07-07 12:00\",\n \"2014-07-07 13:00\",\n \"2014-07-07 14:00\",\n \"2014-07-07 15:00\",\n \"2014-07-07 16:00\",\n \"2014-07-08 09:00\",\n \"2014-07-08 10:00\",\n \"2014-07-08 11:00\",\n \"2014-07-08 12:00\",\n \"2014-07-08 13:00\",\n \"2014-07-08 14:00\",\n \"2014-07-08 15:00\",\n \"2014-07-08 16:00\",\n ],\n freq=\"BH\",\n )\n rng = date_range(\"2014-07-04 09:00\", \"2014-07-08 16:00\", freq=\"BH\")\n tm.assert_index_equal(idx, rng)\n\n def test_range_misspecified(self):\n # GH #1095\n msg = (\n \"Of the four parameters: start, end, periods, and \"\n \"freq, exactly three must be specified\"\n )\n\n with pytest.raises(ValueError, match=msg):\n date_range(start=\"1/1/2000\")\n\n with pytest.raises(ValueError, match=msg):\n date_range(end=\"1/1/2000\")\n\n with pytest.raises(ValueError, match=msg):\n date_range(periods=10)\n\n with pytest.raises(ValueError, match=msg):\n date_range(start=\"1/1/2000\", freq=\"H\")\n\n with pytest.raises(ValueError, match=msg):\n date_range(end=\"1/1/2000\", freq=\"H\")\n\n with pytest.raises(ValueError, match=msg):\n date_range(periods=10, freq=\"H\")\n\n with pytest.raises(ValueError, match=msg):\n date_range()\n\n def test_compat_replace(self):\n # https://github.com/statsmodels/statsmodels/issues/3349\n # replace should take ints/longs for compat\n result = date_range(Timestamp(\"1960-04-01 00:00:00\"), periods=76, freq=\"QS-JAN\")\n assert len(result) == 76\n\n def test_catch_infinite_loop(self):\n offset = offsets.DateOffset(minute=5)\n # blow up, don't loop forever\n msg = \"Offset <DateOffset: minute=5> did not increment date\"\n with pytest.raises(ValueError, match=msg):\n date_range(datetime(2011, 11, 11), datetime(2011, 11, 12), freq=offset)\n\n @pytest.mark.parametrize(\"periods\", (1, 2))\n def test_wom_len(self, periods):\n # https://github.com/pandas-dev/pandas/issues/20517\n res = date_range(start=\"20110101\", periods=periods, freq=\"WOM-1MON\")\n assert len(res) == periods\n\n def test_construct_over_dst(self):\n # GH 20854\n pre_dst = Timestamp(\"2010-11-07 01:00:00\").tz_localize(\n \"US/Pacific\", ambiguous=True\n )\n pst_dst = Timestamp(\"2010-11-07 01:00:00\").tz_localize(\n \"US/Pacific\", ambiguous=False\n )\n expect_data = [\n Timestamp(\"2010-11-07 00:00:00\", tz=\"US/Pacific\"),\n pre_dst,\n pst_dst,\n ]\n expected = DatetimeIndex(expect_data, freq=\"H\")\n result = date_range(start=\"2010-11-7\", periods=3, freq=\"H\", tz=\"US/Pacific\")\n tm.assert_index_equal(result, expected)\n\n def test_construct_with_different_start_end_string_format(self):\n # GH 12064\n result = date_range(\n \"2013-01-01 00:00:00+09:00\", \"2013/01/01 02:00:00+09:00\", freq=\"H\"\n )\n expected = DatetimeIndex(\n [\n Timestamp(\"2013-01-01 00:00:00+09:00\"),\n Timestamp(\"2013-01-01 01:00:00+09:00\"),\n Timestamp(\"2013-01-01 02:00:00+09:00\"),\n ],\n freq=\"H\",\n )\n tm.assert_index_equal(result, expected)\n\n def test_error_with_zero_monthends(self):\n msg = r\"Offset <0 \\* MonthEnds> did not increment date\"\n with pytest.raises(ValueError, match=msg):\n date_range(\"1/1/2000\", \"1/1/2001\", freq=MonthEnd(0))\n\n def test_range_bug(self):\n # GH #770\n offset = DateOffset(months=3)\n result = date_range(\"2011-1-1\", \"2012-1-31\", freq=offset)\n\n start = datetime(2011, 1, 1)\n expected = DatetimeIndex([start + i * offset for i in range(5)], freq=offset)\n tm.assert_index_equal(result, expected)\n\n def test_range_tz_pytz(self):\n # see gh-2906\n tz = timezone(\"US/Eastern\")\n start = tz.localize(datetime(2011, 1, 1))\n end = tz.localize(datetime(2011, 1, 3))\n\n dr = date_range(start=start, periods=3)\n assert dr.tz.zone == tz.zone\n assert dr[0] == start\n assert dr[2] == end\n\n dr = date_range(end=end, periods=3)\n assert dr.tz.zone == tz.zone\n assert dr[0] == start\n assert dr[2] == end\n\n dr = date_range(start=start, end=end)\n assert dr.tz.zone == tz.zone\n assert dr[0] == start\n assert dr[2] == end\n\n @pytest.mark.parametrize(\n \"start, end\",\n [\n [\n Timestamp(datetime(2014, 3, 6), tz=\"US/Eastern\"),\n Timestamp(datetime(2014, 3, 12), tz=\"US/Eastern\"),\n ],\n [\n Timestamp(datetime(2013, 11, 1), tz=\"US/Eastern\"),\n Timestamp(datetime(2013, 11, 6), tz=\"US/Eastern\"),\n ],\n ],\n )\n def test_range_tz_dst_straddle_pytz(self, start, end):\n dr = date_range(start, end, freq=\"D\")\n assert dr[0] == start\n assert dr[-1] == end\n assert np.all(dr.hour == 0)\n\n dr = date_range(start, end, freq=\"D\", tz=\"US/Eastern\")\n assert dr[0] == start\n assert dr[-1] == end\n assert np.all(dr.hour == 0)\n\n dr = date_range(\n start.replace(tzinfo=None),\n end.replace(tzinfo=None),\n freq=\"D\",\n tz=\"US/Eastern\",\n )\n assert dr[0] == start\n assert dr[-1] == end\n assert np.all(dr.hour == 0)\n\n def test_range_tz_dateutil(self):\n # see gh-2906\n\n # Use maybe_get_tz to fix filename in tz under dateutil.\n from pandas._libs.tslibs.timezones import maybe_get_tz\n\n tz = lambda x: maybe_get_tz(\"dateutil/\" + x)\n\n start = datetime(2011, 1, 1, tzinfo=tz(\"US/Eastern\"))\n end = datetime(2011, 1, 3, tzinfo=tz(\"US/Eastern\"))\n\n dr = date_range(start=start, periods=3)\n assert dr.tz == tz(\"US/Eastern\")\n assert dr[0] == start\n assert dr[2] == end\n\n dr = date_range(end=end, periods=3)\n assert dr.tz == tz(\"US/Eastern\")\n assert dr[0] == start\n assert dr[2] == end\n\n dr = date_range(start=start, end=end)\n assert dr.tz == tz(\"US/Eastern\")\n assert dr[0] == start\n assert dr[2] == end\n\n @pytest.mark.parametrize(\"freq\", [\"1D\", \"3D\", \"2M\", \"7W\", \"3H\", \"A\"])\n def test_range_closed(self, freq):\n begin = datetime(2011, 1, 1)\n end = datetime(2014, 1, 1)\n\n closed = date_range(begin, end, closed=None, freq=freq)\n left = date_range(begin, end, closed=\"left\", freq=freq)\n right = date_range(begin, end, closed=\"right\", freq=freq)\n expected_left = left\n expected_right = right\n\n if end == closed[-1]:\n expected_left = closed[:-1]\n if begin == closed[0]:\n expected_right = closed[1:]\n\n tm.assert_index_equal(expected_left, left)\n tm.assert_index_equal(expected_right, right)\n\n def test_range_closed_with_tz_aware_start_end(self):\n # GH12409, GH12684\n begin = Timestamp(\"2011/1/1\", tz=\"US/Eastern\")\n end = Timestamp(\"2014/1/1\", tz=\"US/Eastern\")\n\n for freq in [\"1D\", \"3D\", \"2M\", \"7W\", \"3H\", \"A\"]:\n closed = date_range(begin, end, closed=None, freq=freq)\n left = date_range(begin, end, closed=\"left\", freq=freq)\n right = date_range(begin, end, closed=\"right\", freq=freq)\n expected_left = left\n expected_right = right\n\n if end == closed[-1]:\n expected_left = closed[:-1]\n if begin == closed[0]:\n expected_right = closed[1:]\n\n tm.assert_index_equal(expected_left, left)\n tm.assert_index_equal(expected_right, right)\n\n begin = Timestamp(\"2011/1/1\")\n end = Timestamp(\"2014/1/1\")\n begintz = Timestamp(\"2011/1/1\", tz=\"US/Eastern\")\n endtz = Timestamp(\"2014/1/1\", tz=\"US/Eastern\")\n\n for freq in [\"1D\", \"3D\", \"2M\", \"7W\", \"3H\", \"A\"]:\n closed = date_range(begin, end, closed=None, freq=freq, tz=\"US/Eastern\")\n left = date_range(begin, end, closed=\"left\", freq=freq, tz=\"US/Eastern\")\n right = date_range(begin, end, closed=\"right\", freq=freq, tz=\"US/Eastern\")\n expected_left = left\n expected_right = right\n\n if endtz == closed[-1]:\n expected_left = closed[:-1]\n if begintz == closed[0]:\n expected_right = closed[1:]\n\n tm.assert_index_equal(expected_left, left)\n tm.assert_index_equal(expected_right, right)\n\n @pytest.mark.parametrize(\"closed\", [\"right\", \"left\", None])\n def test_range_closed_boundary(self, closed):\n # GH#11804\n right_boundary = date_range(\n \"2015-09-12\", \"2015-12-01\", freq=\"QS-MAR\", closed=closed\n )\n left_boundary = date_range(\n \"2015-09-01\", \"2015-09-12\", freq=\"QS-MAR\", closed=closed\n )\n both_boundary = date_range(\n \"2015-09-01\", \"2015-12-01\", freq=\"QS-MAR\", closed=closed\n )\n expected_right = expected_left = expected_both = both_boundary\n\n if closed == \"right\":\n expected_left = both_boundary[1:]\n if closed == \"left\":\n expected_right = both_boundary[:-1]\n if closed is None:\n expected_right = both_boundary[1:]\n expected_left = both_boundary[:-1]\n\n tm.assert_index_equal(right_boundary, expected_right)\n tm.assert_index_equal(left_boundary, expected_left)\n tm.assert_index_equal(both_boundary, expected_both)\n\n def test_years_only(self):\n # GH 6961\n dr = date_range(\"2014\", \"2015\", freq=\"M\")\n assert dr[0] == datetime(2014, 1, 31)\n assert dr[-1] == datetime(2014, 12, 31)\n\n def test_freq_divides_end_in_nanos(self):\n # GH 10885\n result_1 = date_range(\"2005-01-12 10:00\", \"2005-01-12 16:00\", freq=\"345min\")\n result_2 = date_range(\"2005-01-13 10:00\", \"2005-01-13 16:00\", freq=\"345min\")\n expected_1 = DatetimeIndex(\n [\"2005-01-12 10:00:00\", \"2005-01-12 15:45:00\"],\n dtype=\"datetime64[ns]\",\n freq=\"345T\",\n tz=None,\n )\n expected_2 = DatetimeIndex(\n [\"2005-01-13 10:00:00\", \"2005-01-13 15:45:00\"],\n dtype=\"datetime64[ns]\",\n freq=\"345T\",\n tz=None,\n )\n tm.assert_index_equal(result_1, expected_1)\n tm.assert_index_equal(result_2, expected_2)\n\n def test_cached_range_bug(self):\n rng = date_range(\"2010-09-01 05:00:00\", periods=50, freq=DateOffset(hours=6))\n assert len(rng) == 50\n assert rng[0] == datetime(2010, 9, 1, 5)\n\n def test_timezone_comparaison_bug(self):\n # smoke test\n start = Timestamp(\"20130220 10:00\", tz=\"US/Eastern\")\n result = date_range(start, periods=2, tz=\"US/Eastern\")\n assert len(result) == 2\n\n def test_timezone_comparaison_assert(self):\n start = Timestamp(\"20130220 10:00\", tz=\"US/Eastern\")\n msg = \"Inferred time zone not equal to passed time zone\"\n with pytest.raises(AssertionError, match=msg):\n date_range(start, periods=2, tz=\"Europe/Berlin\")\n\n def test_negative_non_tick_frequency_descending_dates(self, tz_aware_fixture):\n # GH 23270\n tz = tz_aware_fixture\n result = date_range(start=\"2011-06-01\", end=\"2011-01-01\", freq=\"-1MS\", tz=tz)\n expected = date_range(end=\"2011-06-01\", start=\"2011-01-01\", freq=\"1MS\", tz=tz)[\n ::-1\n ]\n tm.assert_index_equal(result, expected)\n\n\nclass TestDateRangeTZ:\n \"\"\"Tests for date_range with timezones\"\"\"\n\n def test_hongkong_tz_convert(self):\n # GH#1673 smoke test\n dr = date_range(\"2012-01-01\", \"2012-01-10\", freq=\"D\", tz=\"Hongkong\")\n\n # it works!\n dr.hour\n\n @pytest.mark.parametrize(\"tzstr\", [\"US/Eastern\", \"dateutil/US/Eastern\"])\n def test_date_range_span_dst_transition(self, tzstr):\n # GH#1778\n\n # Standard -> Daylight Savings Time\n dr = date_range(\"03/06/2012 00:00\", periods=200, freq=\"W-FRI\", tz=\"US/Eastern\")\n\n assert (dr.hour == 0).all()\n\n dr = date_range(\"2012-11-02\", periods=10, tz=tzstr)\n result = dr.hour\n expected = pd.Index([0] * 10)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\"tzstr\", [\"US/Eastern\", \"dateutil/US/Eastern\"])\n def test_date_range_timezone_str_argument(self, tzstr):\n tz = timezones.maybe_get_tz(tzstr)\n result = date_range(\"1/1/2000\", periods=10, tz=tzstr)\n expected = date_range(\"1/1/2000\", periods=10, tz=tz)\n\n tm.assert_index_equal(result, expected)\n\n def test_date_range_with_fixedoffset_noname(self):\n from pandas.tests.indexes.datetimes.test_timezones import fixed_off_no_name\n\n off = fixed_off_no_name\n start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)\n end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)\n rng = date_range(start=start, end=end)\n assert off == rng.tz\n\n idx = pd.Index([start, end])\n assert off == idx.tz\n\n @pytest.mark.parametrize(\"tzstr\", [\"US/Eastern\", \"dateutil/US/Eastern\"])\n def test_date_range_with_tz(self, tzstr):\n stamp = Timestamp(\"3/11/2012 05:00\", tz=tzstr)\n assert stamp.hour == 5\n\n rng = date_range(\"3/11/2012 04:00\", periods=10, freq=\"H\", tz=tzstr)\n\n assert stamp == rng[1]\n\n\nclass TestGenRangeGeneration:\n def test_generate(self):\n rng1 = list(generate_range(START, END, offset=BDay()))\n rng2 = list(generate_range(START, END, offset=\"B\"))\n assert rng1 == rng2\n\n def test_generate_cday(self):\n rng1 = list(generate_range(START, END, offset=CDay()))\n rng2 = list(generate_range(START, END, offset=\"C\"))\n assert rng1 == rng2\n\n def test_1(self):\n rng = list(generate_range(start=datetime(2009, 3, 25), periods=2))\n expected = [datetime(2009, 3, 25), datetime(2009, 3, 26)]\n assert rng == expected\n\n def test_2(self):\n rng = list(generate_range(start=datetime(2008, 1, 1), end=datetime(2008, 1, 3)))\n expected = [datetime(2008, 1, 1), datetime(2008, 1, 2), datetime(2008, 1, 3)]\n assert rng == expected\n\n def test_3(self):\n rng = list(generate_range(start=datetime(2008, 1, 5), end=datetime(2008, 1, 6)))\n expected = []\n assert rng == expected\n\n def test_precision_finer_than_offset(self):\n # GH#9907\n result1 = date_range(\n start=\"2015-04-15 00:00:03\", end=\"2016-04-22 00:00:00\", freq=\"Q\"\n )\n result2 = date_range(\n start=\"2015-04-15 00:00:03\", end=\"2015-06-22 00:00:04\", freq=\"W\"\n )\n expected1_list = [\n \"2015-06-30 00:00:03\",\n \"2015-09-30 00:00:03\",\n \"2015-12-31 00:00:03\",\n \"2016-03-31 00:00:03\",\n ]\n expected2_list = [\n \"2015-04-19 00:00:03\",\n \"2015-04-26 00:00:03\",\n \"2015-05-03 00:00:03\",\n \"2015-05-10 00:00:03\",\n \"2015-05-17 00:00:03\",\n \"2015-05-24 00:00:03\",\n \"2015-05-31 00:00:03\",\n \"2015-06-07 00:00:03\",\n \"2015-06-14 00:00:03\",\n \"2015-06-21 00:00:03\",\n ]\n expected1 = DatetimeIndex(\n expected1_list, dtype=\"datetime64[ns]\", freq=\"Q-DEC\", tz=None\n )\n expected2 = DatetimeIndex(\n expected2_list, dtype=\"datetime64[ns]\", freq=\"W-SUN\", tz=None\n )\n tm.assert_index_equal(result1, expected1)\n tm.assert_index_equal(result2, expected2)\n\n dt1, dt2 = \"2017-01-01\", \"2017-01-01\"\n tz1, tz2 = \"US/Eastern\", \"Europe/London\"\n\n @pytest.mark.parametrize(\n \"start,end\",\n [\n (Timestamp(dt1, tz=tz1), Timestamp(dt2)),\n (Timestamp(dt1), Timestamp(dt2, tz=tz2)),\n (Timestamp(dt1, tz=tz1), Timestamp(dt2, tz=tz2)),\n (Timestamp(dt1, tz=tz2), Timestamp(dt2, tz=tz1)),\n ],\n )\n def test_mismatching_tz_raises_err(self, start, end):\n # issue 18488\n msg = \"Start and end cannot both be tz-aware with different timezones\"\n with pytest.raises(TypeError, match=msg):\n date_range(start, end)\n with pytest.raises(TypeError, match=msg):\n date_range(start, end, freq=BDay())\n\n\nclass TestBusinessDateRange:\n def test_constructor(self):\n bdate_range(START, END, freq=BDay())\n bdate_range(START, periods=20, freq=BDay())\n bdate_range(end=START, periods=20, freq=BDay())\n\n msg = \"periods must be a number, got B\"\n with pytest.raises(TypeError, match=msg):\n date_range(\"2011-1-1\", \"2012-1-1\", \"B\")\n\n with pytest.raises(TypeError, match=msg):\n bdate_range(\"2011-1-1\", \"2012-1-1\", \"B\")\n\n msg = \"freq must be specified for bdate_range; use date_range instead\"\n with pytest.raises(TypeError, match=msg):\n bdate_range(START, END, periods=10, freq=None)\n\n def test_misc(self):\n end = datetime(2009, 5, 13)\n dr = bdate_range(end=end, periods=20)\n firstDate = end - 19 * BDay()\n\n assert len(dr) == 20\n assert dr[0] == firstDate\n assert dr[-1] == end\n\n def test_date_parse_failure(self):\n badly_formed_date = \"2007/100/1\"\n\n msg = \"could not convert string to Timestamp\"\n with pytest.raises(ValueError, match=msg):\n Timestamp(badly_formed_date)\n\n with pytest.raises(ValueError, match=msg):\n bdate_range(start=badly_formed_date, periods=10)\n\n with pytest.raises(ValueError, match=msg):\n bdate_range(end=badly_formed_date, periods=10)\n\n with pytest.raises(ValueError, match=msg):\n bdate_range(badly_formed_date, badly_formed_date)\n\n def test_daterange_bug_456(self):\n # GH #456\n rng1 = bdate_range(\"12/5/2011\", \"12/5/2011\")\n rng2 = bdate_range(\"12/2/2011\", \"12/5/2011\")\n assert rng2._data.freq == BDay()\n\n result = rng1.union(rng2)\n assert isinstance(result, DatetimeIndex)\n\n @pytest.mark.parametrize(\"closed\", [\"left\", \"right\"])\n def test_bdays_and_open_boundaries(self, closed):\n # GH 6673\n start = \"2018-07-21\" # Saturday\n end = \"2018-07-29\" # Sunday\n result = date_range(start, end, freq=\"B\", closed=closed)\n\n bday_start = \"2018-07-23\" # Monday\n bday_end = \"2018-07-27\" # Friday\n expected = date_range(bday_start, bday_end, freq=\"D\")\n tm.assert_index_equal(result, expected)\n # Note: we do _not_ expect the freqs to match here\n\n def test_bday_near_overflow(self):\n # GH#24252 avoid doing unnecessary addition that _would_ overflow\n start = Timestamp.max.floor(\"D\").to_pydatetime()\n rng = date_range(start, end=None, periods=1, freq=\"B\")\n expected = DatetimeIndex([start], freq=\"B\")\n tm.assert_index_equal(rng, expected)\n\n def test_bday_overflow_error(self):\n # GH#24252 check that we get OutOfBoundsDatetime and not OverflowError\n msg = \"Out of bounds nanosecond timestamp\"\n start = Timestamp.max.floor(\"D\").to_pydatetime()\n with pytest.raises(OutOfBoundsDatetime, match=msg):\n date_range(start, periods=2, freq=\"B\")\n\n\nclass TestCustomDateRange:\n def test_constructor(self):\n bdate_range(START, END, freq=CDay())\n bdate_range(START, periods=20, freq=CDay())\n bdate_range(end=START, periods=20, freq=CDay())\n\n msg = \"periods must be a number, got C\"\n with pytest.raises(TypeError, match=msg):\n date_range(\"2011-1-1\", \"2012-1-1\", \"C\")\n\n with pytest.raises(TypeError, match=msg):\n bdate_range(\"2011-1-1\", \"2012-1-1\", \"C\")\n\n def test_misc(self):\n end = datetime(2009, 5, 13)\n dr = bdate_range(end=end, periods=20, freq=\"C\")\n firstDate = end - 19 * CDay()\n\n assert len(dr) == 20\n assert dr[0] == firstDate\n assert dr[-1] == end\n\n def test_daterange_bug_456(self):\n # GH #456\n rng1 = bdate_range(\"12/5/2011\", \"12/5/2011\", freq=\"C\")\n rng2 = bdate_range(\"12/2/2011\", \"12/5/2011\", freq=\"C\")\n assert rng2._data.freq == CDay()\n\n result = rng1.union(rng2)\n assert isinstance(result, DatetimeIndex)\n\n def test_cdaterange(self):\n result = bdate_range(\"2013-05-01\", periods=3, freq=\"C\")\n expected = DatetimeIndex([\"2013-05-01\", \"2013-05-02\", \"2013-05-03\"], freq=\"C\")\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n def test_cdaterange_weekmask(self):\n result = bdate_range(\n \"2013-05-01\", periods=3, freq=\"C\", weekmask=\"Sun Mon Tue Wed Thu\"\n )\n expected = DatetimeIndex(\n [\"2013-05-01\", \"2013-05-02\", \"2013-05-05\"], freq=result.freq\n )\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n # raise with non-custom freq\n msg = (\n \"a custom frequency string is required when holidays or \"\n \"weekmask are passed, got frequency B\"\n )\n with pytest.raises(ValueError, match=msg):\n bdate_range(\"2013-05-01\", periods=3, weekmask=\"Sun Mon Tue Wed Thu\")\n\n def test_cdaterange_holidays(self):\n result = bdate_range(\"2013-05-01\", periods=3, freq=\"C\", holidays=[\"2013-05-01\"])\n expected = DatetimeIndex(\n [\"2013-05-02\", \"2013-05-03\", \"2013-05-06\"], freq=result.freq\n )\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n # raise with non-custom freq\n msg = (\n \"a custom frequency string is required when holidays or \"\n \"weekmask are passed, got frequency B\"\n )\n with pytest.raises(ValueError, match=msg):\n bdate_range(\"2013-05-01\", periods=3, holidays=[\"2013-05-01\"])\n\n def test_cdaterange_weekmask_and_holidays(self):\n result = bdate_range(\n \"2013-05-01\",\n periods=3,\n freq=\"C\",\n weekmask=\"Sun Mon Tue Wed Thu\",\n holidays=[\"2013-05-01\"],\n )\n expected = DatetimeIndex(\n [\"2013-05-02\", \"2013-05-05\", \"2013-05-06\"], freq=result.freq\n )\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n # raise with non-custom freq\n msg = (\n \"a custom frequency string is required when holidays or \"\n \"weekmask are passed, got frequency B\"\n )\n with pytest.raises(ValueError, match=msg):\n bdate_range(\n \"2013-05-01\",\n periods=3,\n weekmask=\"Sun Mon Tue Wed Thu\",\n holidays=[\"2013-05-01\"],\n )\n\n @pytest.mark.parametrize(\n \"freq\", [freq for freq in prefix_mapping if freq.startswith(\"C\")]\n )\n def test_all_custom_freq(self, freq):\n # should not raise\n bdate_range(\n START, END, freq=freq, weekmask=\"Mon Wed Fri\", holidays=[\"2009-03-14\"]\n )\n\n bad_freq = freq + \"FOO\"\n msg = f\"invalid custom frequency string: {bad_freq}\"\n with pytest.raises(ValueError, match=msg):\n bdate_range(START, END, freq=bad_freq)\n\n @pytest.mark.parametrize(\n \"start_end\",\n [\n (\"2018-01-01T00:00:01.000Z\", \"2018-01-03T00:00:01.000Z\"),\n (\"2018-01-01T00:00:00.010Z\", \"2018-01-03T00:00:00.010Z\"),\n (\"2001-01-01T00:00:00.010Z\", \"2001-01-03T00:00:00.010Z\"),\n ],\n )\n def test_range_with_millisecond_resolution(self, start_end):\n # https://github.com/pandas-dev/pandas/issues/24110\n start, end = start_end\n result = date_range(start=start, end=end, periods=2, closed=\"left\")\n expected = DatetimeIndex([start])\n tm.assert_index_equal(result, expected)\n\n\ndef test_date_range_with_custom_holidays():\n # GH 30593\n freq = offsets.CustomBusinessHour(start=\"15:00\", holidays=[\"2020-11-26\"])\n result = date_range(start=\"2020-11-25 15:00\", periods=4, freq=freq)\n expected = DatetimeIndex(\n [\n \"2020-11-25 15:00:00\",\n \"2020-11-25 16:00:00\",\n \"2020-11-27 15:00:00\",\n \"2020-11-27 16:00:00\",\n ],\n freq=freq,\n )\n tm.assert_index_equal(result, expected)\n",
"import numpy as np\nimport numpy.typing as npt\n\nnd: npt.NDArray[np.int_] = np.array([[1, 2], [3, 4]])\n\n# item\nreveal_type(nd.item()) # E: int\nreveal_type(nd.item(1)) # E: int\nreveal_type(nd.item(0, 1)) # E: int\nreveal_type(nd.item((0, 1))) # E: int\n\n# tolist\nreveal_type(nd.tolist()) # E: Any\n\n# itemset does not return a value\n# tostring is pretty simple\n# tobytes is pretty simple\n# tofile does not return a value\n# dump does not return a value\n# dumps is pretty simple\n\n# astype\nreveal_type(nd.astype(\"float\")) # E: numpy.ndarray[Any, numpy.dtype[Any]]\nreveal_type(nd.astype(float)) # E: numpy.ndarray[Any, numpy.dtype[Any]]\nreveal_type(nd.astype(np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]\nreveal_type(nd.astype(np.float64, \"K\")) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]\nreveal_type(nd.astype(np.float64, \"K\", \"unsafe\")) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]\nreveal_type(nd.astype(np.float64, \"K\", \"unsafe\", True)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]\nreveal_type(nd.astype(np.float64, \"K\", \"unsafe\", True, True)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]\n\n# byteswap\nreveal_type(nd.byteswap()) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]\nreveal_type(nd.byteswap(True)) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]\n\n# copy\nreveal_type(nd.copy()) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]\nreveal_type(nd.copy(\"C\")) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]\n\nreveal_type(nd.view()) # E: numpy.ndarray[Any, numpy.dtype[{int_}]]\nreveal_type(nd.view(np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]\nreveal_type(nd.view(float)) # E: numpy.ndarray[Any, numpy.dtype[Any]]\nreveal_type(nd.view(np.float64, np.matrix)) # E: numpy.matrix[Any, Any]\n\n# getfield\nreveal_type(nd.getfield(\"float\")) # E: numpy.ndarray[Any, numpy.dtype[Any]]\nreveal_type(nd.getfield(float)) # E: numpy.ndarray[Any, numpy.dtype[Any]]\nreveal_type(nd.getfield(np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]\nreveal_type(nd.getfield(np.float64, 8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]\n\n# setflags does not return a value\n# fill does not return a value\n",
"from __future__ import annotations\n\nfrom datetime import (\n date,\n datetime,\n time,\n timedelta,\n tzinfo,\n)\nimport operator\nfrom typing import (\n TYPE_CHECKING,\n Hashable,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import (\n NaT,\n Period,\n Timestamp,\n index as libindex,\n lib,\n)\nfrom pandas._libs.tslibs import (\n Resolution,\n parsing,\n timezones,\n to_offset,\n)\nfrom pandas._libs.tslibs.offsets import prefix_mapping\nfrom pandas._typing import (\n Dtype,\n DtypeObj,\n)\nfrom pandas.errors import InvalidIndexError\nfrom pandas.util._decorators import (\n cache_readonly,\n doc,\n)\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import (\n DT64NS_DTYPE,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_scalar,\n)\nfrom pandas.core.dtypes.missing import is_valid_na_for_dtype\n\nfrom pandas.core.arrays.datetimes import (\n DatetimeArray,\n tz_to_dtype,\n)\nimport pandas.core.common as com\nfrom pandas.core.indexes.base import (\n Index,\n get_unanimous_names,\n maybe_extract_name,\n)\nfrom pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin\nfrom pandas.core.indexes.extension import inherit_names\nfrom pandas.core.tools.times import to_time\n\nif TYPE_CHECKING:\n from pandas import (\n DataFrame,\n Float64Index,\n PeriodIndex,\n TimedeltaIndex,\n )\n\n\ndef _new_DatetimeIndex(cls, d):\n \"\"\"\n This is called upon unpickling, rather than the default which doesn't\n have arguments and breaks __new__\n \"\"\"\n if \"data\" in d and not isinstance(d[\"data\"], DatetimeIndex):\n # Avoid need to verify integrity by calling simple_new directly\n data = d.pop(\"data\")\n if not isinstance(data, DatetimeArray):\n # For backward compat with older pickles, we may need to construct\n # a DatetimeArray to adapt to the newer _simple_new signature\n tz = d.pop(\"tz\")\n freq = d.pop(\"freq\")\n dta = DatetimeArray._simple_new(data, dtype=tz_to_dtype(tz), freq=freq)\n else:\n dta = data\n for key in [\"tz\", \"freq\"]:\n # These are already stored in our DatetimeArray; if they are\n # also in the pickle and don't match, we have a problem.\n if key in d:\n assert d[key] == getattr(dta, key)\n d.pop(key)\n result = cls._simple_new(dta, **d)\n else:\n with warnings.catch_warnings():\n # TODO: If we knew what was going in to **d, we might be able to\n # go through _simple_new instead\n warnings.simplefilter(\"ignore\")\n result = cls.__new__(cls, **d)\n\n return result\n\n\n@inherit_names(\n DatetimeArray._field_ops\n + [\n method\n for method in DatetimeArray._datetimelike_methods\n if method not in (\"tz_localize\", \"tz_convert\")\n ],\n DatetimeArray,\n wrap=True,\n)\n@inherit_names([\"is_normalized\", \"_resolution_obj\"], DatetimeArray, cache=True)\n@inherit_names(\n [\n \"_bool_ops\",\n \"_object_ops\",\n \"_field_ops\",\n \"_datetimelike_ops\",\n \"_datetimelike_methods\",\n \"tz\",\n \"tzinfo\",\n \"dtype\",\n \"to_pydatetime\",\n \"_has_same_tz\",\n \"_format_native_types\",\n \"date\",\n \"time\",\n \"timetz\",\n \"std\",\n ]\n + DatetimeArray._bool_ops,\n DatetimeArray,\n)\nclass DatetimeIndex(DatetimeTimedeltaMixin):\n \"\"\"\n Immutable ndarray-like of datetime64 data.\n\n Represented internally as int64, and which can be boxed to Timestamp objects\n that are subclasses of datetime and carry metadata.\n\n Parameters\n ----------\n data : array-like (1-dimensional), optional\n Optional datetime-like data to construct index with.\n freq : str or pandas offset object, optional\n One of pandas date offset strings or corresponding objects. The string\n 'infer' can be passed in order to set the frequency of the index as the\n inferred frequency upon creation.\n tz : pytz.timezone or dateutil.tz.tzfile or datetime.tzinfo or str\n Set the Timezone of the data.\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range.\n closed : {'left', 'right'}, optional\n Set whether to include `start` and `end` that are on the\n boundary. The default includes boundary points on either end.\n ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'\n When clocks moved backward due to DST, ambiguous times may arise.\n For example in Central European Time (UTC+01), when going from 03:00\n DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC\n and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter\n dictates how ambiguous times should be handled.\n\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False signifies a\n non-DST time (note that this flag is only applicable for ambiguous\n times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous times.\n dayfirst : bool, default False\n If True, parse dates in `data` with the day first order.\n yearfirst : bool, default False\n If True parse dates in `data` with the year first order.\n dtype : numpy.dtype or DatetimeTZDtype or str, default None\n Note that the only NumPy dtype allowed is ‘datetime64[ns]’.\n copy : bool, default False\n Make a copy of input ndarray.\n name : label, default None\n Name to be stored in the index.\n\n Attributes\n ----------\n year\n month\n day\n hour\n minute\n second\n microsecond\n nanosecond\n date\n time\n timetz\n dayofyear\n day_of_year\n weekofyear\n week\n dayofweek\n day_of_week\n weekday\n quarter\n tz\n freq\n freqstr\n is_month_start\n is_month_end\n is_quarter_start\n is_quarter_end\n is_year_start\n is_year_end\n is_leap_year\n inferred_freq\n\n Methods\n -------\n normalize\n strftime\n snap\n tz_convert\n tz_localize\n round\n floor\n ceil\n to_period\n to_perioddelta\n to_pydatetime\n to_series\n to_frame\n month_name\n day_name\n mean\n std\n\n See Also\n --------\n Index : The base pandas Index type.\n TimedeltaIndex : Index of timedelta64 data.\n PeriodIndex : Index of Period data.\n to_datetime : Convert argument to datetime.\n date_range : Create a fixed-frequency DatetimeIndex.\n\n Notes\n -----\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n \"\"\"\n\n _typ = \"datetimeindex\"\n\n _data_cls = DatetimeArray\n _engine_type = libindex.DatetimeEngine\n _supports_partial_string_indexing = True\n\n _data: DatetimeArray\n inferred_freq: str | None\n tz: tzinfo | None\n\n # --------------------------------------------------------------------\n # methods that dispatch to DatetimeArray and wrap result\n\n @doc(DatetimeArray.strftime)\n def strftime(self, date_format) -> Index:\n arr = self._data.strftime(date_format)\n return Index(arr, name=self.name)\n\n @doc(DatetimeArray.tz_convert)\n def tz_convert(self, tz) -> DatetimeIndex:\n arr = self._data.tz_convert(tz)\n return type(self)._simple_new(arr, name=self.name)\n\n @doc(DatetimeArray.tz_localize)\n def tz_localize(self, tz, ambiguous=\"raise\", nonexistent=\"raise\") -> DatetimeIndex:\n arr = self._data.tz_localize(tz, ambiguous, nonexistent)\n return type(self)._simple_new(arr, name=self.name)\n\n @doc(DatetimeArray.to_period)\n def to_period(self, freq=None) -> PeriodIndex:\n from pandas.core.indexes.api import PeriodIndex\n\n arr = self._data.to_period(freq)\n return PeriodIndex._simple_new(arr, name=self.name)\n\n @doc(DatetimeArray.to_perioddelta)\n def to_perioddelta(self, freq) -> TimedeltaIndex:\n from pandas.core.indexes.api import TimedeltaIndex\n\n arr = self._data.to_perioddelta(freq)\n return TimedeltaIndex._simple_new(arr, name=self.name)\n\n @doc(DatetimeArray.to_julian_date)\n def to_julian_date(self) -> Float64Index:\n from pandas.core.indexes.api import Float64Index\n\n arr = self._data.to_julian_date()\n return Float64Index._simple_new(arr, name=self.name)\n\n @doc(DatetimeArray.isocalendar)\n def isocalendar(self) -> DataFrame:\n df = self._data.isocalendar()\n return df.set_index(self)\n\n # --------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls,\n data=None,\n freq=lib.no_default,\n tz=None,\n normalize: bool = False,\n closed=None,\n ambiguous=\"raise\",\n dayfirst: bool = False,\n yearfirst: bool = False,\n dtype: Dtype | None = None,\n copy: bool = False,\n name: Hashable = None,\n ) -> DatetimeIndex:\n\n if is_scalar(data):\n raise cls._scalar_data_error(data)\n\n # - Cases checked above all return/raise before reaching here - #\n\n name = maybe_extract_name(name, data, cls)\n\n dtarr = DatetimeArray._from_sequence_not_strict(\n data,\n dtype=dtype,\n copy=copy,\n tz=tz,\n freq=freq,\n dayfirst=dayfirst,\n yearfirst=yearfirst,\n ambiguous=ambiguous,\n )\n\n subarr = cls._simple_new(dtarr, name=name)\n return subarr\n\n # --------------------------------------------------------------------\n\n @cache_readonly\n def _is_dates_only(self) -> bool:\n \"\"\"\n Return a boolean if we are only dates (and don't have a timezone)\n\n Returns\n -------\n bool\n \"\"\"\n from pandas.io.formats.format import is_dates_only\n\n # error: Argument 1 to \"is_dates_only\" has incompatible type\n # \"Union[ExtensionArray, ndarray]\"; expected \"Union[ndarray,\n # DatetimeArray, Index, DatetimeIndex]\"\n return self.tz is None and is_dates_only(self._values) # type: ignore[arg-type]\n\n def __reduce__(self):\n\n # we use a special reduce here because we need\n # to simply set the .tz (and not reinterpret it)\n\n d = {\"data\": self._data}\n d.update(self._get_attributes_dict())\n return _new_DatetimeIndex, (type(self), d), None\n\n def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:\n \"\"\"\n Can we compare values of the given dtype to our own?\n \"\"\"\n if self.tz is not None:\n # If we have tz, we can compare to tzaware\n return is_datetime64tz_dtype(dtype)\n # if we dont have tz, we can only compare to tznaive\n return is_datetime64_dtype(dtype)\n\n # --------------------------------------------------------------------\n # Rendering Methods\n\n @property\n def _formatter_func(self):\n from pandas.io.formats.format import get_format_datetime64\n\n formatter = get_format_datetime64(is_dates_only=self._is_dates_only)\n return lambda x: f\"'{formatter(x)}'\"\n\n # --------------------------------------------------------------------\n # Set Operation Methods\n\n def union_many(self, others):\n \"\"\"\n A bit of a hack to accelerate unioning a collection of indexes.\n \"\"\"\n this = self\n\n for other in others:\n if not isinstance(this, DatetimeIndex):\n this = Index.union(this, other)\n continue\n\n if not isinstance(other, DatetimeIndex):\n try:\n other = DatetimeIndex(other)\n except TypeError:\n pass\n\n this, other = this._maybe_utc_convert(other)\n\n if this._can_fast_union(other):\n this = this._fast_union(other)\n else:\n this = Index.union(this, other)\n\n res_name = get_unanimous_names(self, *others)[0]\n if this.name != res_name:\n return this.rename(res_name)\n return this\n\n def _maybe_utc_convert(self, other: Index) -> tuple[DatetimeIndex, Index]:\n this = self\n\n if isinstance(other, DatetimeIndex):\n if (self.tz is None) ^ (other.tz is None):\n raise TypeError(\"Cannot join tz-naive with tz-aware DatetimeIndex\")\n\n if not timezones.tz_compare(self.tz, other.tz):\n this = self.tz_convert(\"UTC\")\n other = other.tz_convert(\"UTC\")\n return this, other\n\n # --------------------------------------------------------------------\n\n def _get_time_micros(self) -> np.ndarray:\n \"\"\"\n Return the number of microseconds since midnight.\n\n Returns\n -------\n ndarray[int64_t]\n \"\"\"\n values = self._data._local_timestamps()\n\n nanos = values % (24 * 3600 * 1_000_000_000)\n micros = nanos // 1000\n\n micros[self._isnan] = -1\n return micros\n\n def to_series(self, keep_tz=lib.no_default, index=None, name=None):\n \"\"\"\n Create a Series with both index and values equal to the index keys\n useful with map for returning an indexer based on an index.\n\n Parameters\n ----------\n keep_tz : optional, defaults True\n Return the data keeping the timezone.\n\n If keep_tz is True:\n\n If the timezone is not set, the resulting\n Series will have a datetime64[ns] dtype.\n\n Otherwise the Series will have an datetime64[ns, tz] dtype; the\n tz will be preserved.\n\n If keep_tz is False:\n\n Series will have a datetime64[ns] dtype. TZ aware\n objects will have the tz removed.\n\n .. versionchanged:: 1.0.0\n The default value is now True. In a future version,\n this keyword will be removed entirely. Stop passing the\n argument to obtain the future behavior and silence the warning.\n\n index : Index, optional\n Index of resulting Series. If None, defaults to original index.\n name : str, optional\n Name of resulting Series. If None, defaults to name of original\n index.\n\n Returns\n -------\n Series\n \"\"\"\n from pandas import Series\n\n if index is None:\n index = self._view()\n if name is None:\n name = self.name\n\n if keep_tz is not lib.no_default:\n if keep_tz:\n warnings.warn(\n \"The 'keep_tz' keyword in DatetimeIndex.to_series \"\n \"is deprecated and will be removed in a future version. \"\n \"You can stop passing 'keep_tz' to silence this warning.\",\n FutureWarning,\n stacklevel=2,\n )\n else:\n warnings.warn(\n \"Specifying 'keep_tz=False' is deprecated and this \"\n \"option will be removed in a future release. If \"\n \"you want to remove the timezone information, you \"\n \"can do 'idx.tz_convert(None)' before calling \"\n \"'to_series'.\",\n FutureWarning,\n stacklevel=2,\n )\n else:\n keep_tz = True\n\n if keep_tz and self.tz is not None:\n # preserve the tz & copy\n values = self.copy(deep=True)\n else:\n # error: Incompatible types in assignment (expression has type\n # \"Union[ExtensionArray, ndarray]\", variable has type \"DatetimeIndex\")\n values = self._values.view(\"M8[ns]\").copy() # type: ignore[assignment]\n\n return Series(values, index=index, name=name)\n\n def snap(self, freq=\"S\") -> DatetimeIndex:\n \"\"\"\n Snap time stamps to nearest occurring frequency.\n\n Returns\n -------\n DatetimeIndex\n \"\"\"\n # Superdumb, punting on any optimizing\n freq = to_offset(freq)\n\n snapped = np.empty(len(self), dtype=DT64NS_DTYPE)\n\n for i, v in enumerate(self):\n s = v\n if not freq.is_on_offset(s):\n t0 = freq.rollback(s)\n t1 = freq.rollforward(s)\n if abs(s - t0) < abs(t1 - s):\n s = t0\n else:\n s = t1\n snapped[i] = s\n\n dta = DatetimeArray(snapped, dtype=self.dtype)\n return DatetimeIndex._simple_new(dta, name=self.name)\n\n # --------------------------------------------------------------------\n # Indexing Methods\n\n def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):\n \"\"\"\n Calculate datetime bounds for parsed time string and its resolution.\n\n Parameters\n ----------\n reso : str\n Resolution provided by parsed string.\n parsed : datetime\n Datetime from parsed string.\n\n Returns\n -------\n lower, upper: pd.Timestamp\n \"\"\"\n assert isinstance(reso, Resolution), (type(reso), reso)\n valid_resos = {\n \"year\",\n \"month\",\n \"quarter\",\n \"day\",\n \"hour\",\n \"minute\",\n \"second\",\n \"millisecond\",\n \"microsecond\",\n }\n if reso.attrname not in valid_resos:\n raise KeyError\n\n grp = reso.freq_group\n per = Period(parsed, freq=grp.value)\n start, end = per.start_time, per.end_time\n\n # GH 24076\n # If an incoming date string contained a UTC offset, need to localize\n # the parsed date to this offset first before aligning with the index's\n # timezone\n if parsed.tzinfo is not None:\n if self.tz is None:\n raise ValueError(\n \"The index must be timezone aware when indexing \"\n \"with a date string with a UTC offset\"\n )\n start = start.tz_localize(parsed.tzinfo).tz_convert(self.tz)\n end = end.tz_localize(parsed.tzinfo).tz_convert(self.tz)\n elif self.tz is not None:\n start = start.tz_localize(self.tz)\n end = end.tz_localize(self.tz)\n return start, end\n\n def _validate_partial_date_slice(self, reso: Resolution):\n assert isinstance(reso, Resolution), (type(reso), reso)\n if (\n self.is_monotonic\n and reso.attrname in [\"day\", \"hour\", \"minute\", \"second\"]\n and self._resolution_obj >= reso\n ):\n # These resolution/monotonicity validations came from GH3931,\n # GH3452 and GH2369.\n\n # See also GH14826\n raise KeyError\n\n if reso.attrname == \"microsecond\":\n # _partial_date_slice doesn't allow microsecond resolution, but\n # _parsed_string_to_bounds allows it.\n raise KeyError\n\n def _deprecate_mismatched_indexing(self, key) -> None:\n # GH#36148\n # we get here with isinstance(key, self._data._recognized_scalars)\n try:\n self._data._assert_tzawareness_compat(key)\n except TypeError:\n if self.tz is None:\n msg = (\n \"Indexing a timezone-naive DatetimeIndex with a \"\n \"timezone-aware datetime is deprecated and will \"\n \"raise KeyError in a future version. \"\n \"Use a timezone-naive object instead.\"\n )\n else:\n msg = (\n \"Indexing a timezone-aware DatetimeIndex with a \"\n \"timezone-naive datetime is deprecated and will \"\n \"raise KeyError in a future version. \"\n \"Use a timezone-aware object instead.\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())\n\n def get_loc(self, key, method=None, tolerance=None):\n \"\"\"\n Get integer location for requested label\n\n Returns\n -------\n loc : int\n \"\"\"\n if not is_scalar(key):\n raise InvalidIndexError(key)\n\n orig_key = key\n if is_valid_na_for_dtype(key, self.dtype):\n key = NaT\n\n if isinstance(key, self._data._recognized_scalars):\n # needed to localize naive datetimes\n self._deprecate_mismatched_indexing(key)\n key = self._maybe_cast_for_get_loc(key)\n\n elif isinstance(key, str):\n try:\n return self._get_string_slice(key)\n except (TypeError, KeyError, ValueError, OverflowError):\n pass\n\n try:\n key = self._maybe_cast_for_get_loc(key)\n except ValueError as err:\n raise KeyError(key) from err\n\n elif isinstance(key, timedelta):\n # GH#20464\n raise TypeError(\n f\"Cannot index {type(self).__name__} with {type(key).__name__}\"\n )\n\n elif isinstance(key, time):\n if method is not None:\n raise NotImplementedError(\n \"cannot yet lookup inexact labels when key is a time object\"\n )\n return self.indexer_at_time(key)\n\n else:\n # unrecognized type\n raise KeyError(key)\n\n try:\n return Index.get_loc(self, key, method, tolerance)\n except KeyError as err:\n raise KeyError(orig_key) from err\n\n def _maybe_cast_for_get_loc(self, key) -> Timestamp:\n # needed to localize naive datetimes or dates (GH 35690)\n key = Timestamp(key)\n if key.tzinfo is None:\n key = key.tz_localize(self.tz)\n else:\n key = key.tz_convert(self.tz)\n return key\n\n def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):\n \"\"\"\n If label is a string, cast it to datetime according to resolution.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'loc', 'getitem'} or None\n\n Returns\n -------\n label : object\n\n Notes\n -----\n Value of `side` parameter should be validated in caller.\n \"\"\"\n assert kind in [\"loc\", \"getitem\", None, lib.no_default]\n self._deprecated_arg(kind, \"kind\", \"_maybe_cast_slice_bound\")\n\n if isinstance(label, str):\n freq = getattr(self, \"freqstr\", getattr(self, \"inferred_freq\", None))\n try:\n parsed, reso_str = parsing.parse_time_string(label, freq)\n except parsing.DateParseError as err:\n raise self._invalid_indexer(\"slice\", label) from err\n\n reso = Resolution.from_attrname(reso_str)\n lower, upper = self._parsed_string_to_bounds(reso, parsed)\n # lower, upper form the half-open interval:\n # [parsed, parsed + 1 freq)\n # because label may be passed to searchsorted\n # the bounds need swapped if index is reverse sorted and has a\n # length > 1 (is_monotonic_decreasing gives True for empty\n # and length 1 index)\n if self._is_strictly_monotonic_decreasing and len(self) > 1:\n return upper if side == \"left\" else lower\n return lower if side == \"left\" else upper\n elif isinstance(label, (self._data._recognized_scalars, date)):\n self._deprecate_mismatched_indexing(label)\n else:\n raise self._invalid_indexer(\"slice\", label)\n\n return self._maybe_cast_for_get_loc(label)\n\n def _get_string_slice(self, key: str):\n freq = getattr(self, \"freqstr\", getattr(self, \"inferred_freq\", None))\n parsed, reso_str = parsing.parse_time_string(key, freq)\n reso = Resolution.from_attrname(reso_str)\n return self._partial_date_slice(reso, parsed)\n\n def slice_indexer(self, start=None, end=None, step=None, kind=None):\n \"\"\"\n Return indexer for specified label slice.\n Index.slice_indexer, customized to handle time slicing.\n\n In addition to functionality provided by Index.slice_indexer, does the\n following:\n\n - if both `start` and `end` are instances of `datetime.time`, it\n invokes `indexer_between_time`\n - if `start` and `end` are both either string or None perform\n value-based selection in non-monotonic cases.\n\n \"\"\"\n # For historical reasons DatetimeIndex supports slices between two\n # instances of datetime.time as if it were applying a slice mask to\n # an array of (self.hour, self.minute, self.seconds, self.microsecond).\n if isinstance(start, time) and isinstance(end, time):\n if step is not None and step != 1:\n raise ValueError(\"Must have step size of 1 with time slices\")\n return self.indexer_between_time(start, end)\n\n if isinstance(start, time) or isinstance(end, time):\n raise KeyError(\"Cannot mix time and non-time slice keys\")\n\n # Pandas supports slicing with dates, treated as datetimes at midnight.\n # https://github.com/pandas-dev/pandas/issues/31501\n if isinstance(start, date) and not isinstance(start, datetime):\n start = datetime.combine(start, time(0, 0))\n if isinstance(end, date) and not isinstance(end, datetime):\n end = datetime.combine(end, time(0, 0))\n\n def check_str_or_none(point):\n return point is not None and not isinstance(point, str)\n\n # GH#33146 if start and end are combinations of str and None and Index is not\n # monotonic, we can not use Index.slice_indexer because it does not honor the\n # actual elements, is only searching for start and end\n if (\n check_str_or_none(start)\n or check_str_or_none(end)\n or self.is_monotonic_increasing\n ):\n return Index.slice_indexer(self, start, end, step, kind=kind)\n\n mask = np.array(True)\n deprecation_mask = np.array(True)\n if start is not None:\n start_casted = self._maybe_cast_slice_bound(start, \"left\")\n mask = start_casted <= self\n deprecation_mask = start_casted == self\n\n if end is not None:\n end_casted = self._maybe_cast_slice_bound(end, \"right\")\n mask = (self <= end_casted) & mask\n deprecation_mask = (end_casted == self) | deprecation_mask\n\n if not deprecation_mask.any():\n warnings.warn(\n \"Value based partial slicing on non-monotonic DatetimeIndexes \"\n \"with non-existing keys is deprecated and will raise a \"\n \"KeyError in a future Version.\",\n FutureWarning,\n stacklevel=5,\n )\n indexer = mask.nonzero()[0][::step]\n if len(indexer) == len(self):\n return slice(None)\n else:\n return indexer\n\n # --------------------------------------------------------------------\n\n @property\n def inferred_type(self) -> str:\n # b/c datetime is represented as microseconds since the epoch, make\n # sure we can't have ambiguous indexing\n return \"datetime64\"\n\n def indexer_at_time(self, time, asof: bool = False) -> np.ndarray:\n \"\"\"\n Return index locations of values at particular time of day\n (e.g. 9:30AM).\n\n Parameters\n ----------\n time : datetime.time or str\n Time passed in either as object (datetime.time) or as string in\n appropriate format (\"%H:%M\", \"%H%M\", \"%I:%M%p\", \"%I%M%p\",\n \"%H:%M:%S\", \"%H%M%S\", \"%I:%M:%S%p\", \"%I%M%S%p\").\n\n Returns\n -------\n np.ndarray[np.intp]\n\n See Also\n --------\n indexer_between_time : Get index locations of values between particular\n times of day.\n DataFrame.at_time : Select values at particular time of day.\n \"\"\"\n if asof:\n raise NotImplementedError(\"'asof' argument is not supported\")\n\n if isinstance(time, str):\n from dateutil.parser import parse\n\n time = parse(time).time()\n\n if time.tzinfo:\n if self.tz is None:\n raise ValueError(\"Index must be timezone aware.\")\n time_micros = self.tz_convert(time.tzinfo)._get_time_micros()\n else:\n time_micros = self._get_time_micros()\n micros = _time_to_micros(time)\n return (time_micros == micros).nonzero()[0]\n\n def indexer_between_time(\n self, start_time, end_time, include_start: bool = True, include_end: bool = True\n ) -> np.ndarray:\n \"\"\"\n Return index locations of values between particular times of day\n (e.g., 9:00-9:30AM).\n\n Parameters\n ----------\n start_time, end_time : datetime.time, str\n Time passed either as object (datetime.time) or as string in\n appropriate format (\"%H:%M\", \"%H%M\", \"%I:%M%p\", \"%I%M%p\",\n \"%H:%M:%S\", \"%H%M%S\", \"%I:%M:%S%p\",\"%I%M%S%p\").\n include_start : bool, default True\n include_end : bool, default True\n\n Returns\n -------\n np.ndarray[np.intp]\n\n See Also\n --------\n indexer_at_time : Get index locations of values at particular time of day.\n DataFrame.between_time : Select values between particular times of day.\n \"\"\"\n start_time = to_time(start_time)\n end_time = to_time(end_time)\n time_micros = self._get_time_micros()\n start_micros = _time_to_micros(start_time)\n end_micros = _time_to_micros(end_time)\n\n if include_start and include_end:\n lop = rop = operator.le\n elif include_start:\n lop = operator.le\n rop = operator.lt\n elif include_end:\n lop = operator.lt\n rop = operator.le\n else:\n lop = rop = operator.lt\n\n if start_time <= end_time:\n join_op = operator.and_\n else:\n join_op = operator.or_\n\n mask = join_op(lop(start_micros, time_micros), rop(time_micros, end_micros))\n\n return mask.nonzero()[0]\n\n\ndef date_range(\n start=None,\n end=None,\n periods=None,\n freq=None,\n tz=None,\n normalize: bool = False,\n name: Hashable = None,\n closed=None,\n **kwargs,\n) -> DatetimeIndex:\n \"\"\"\n Return a fixed frequency DatetimeIndex.\n\n Returns the range of equally spaced time points (where the difference between any\n two adjacent points is specified by the given frequency) such that they all\n satisfy `start <[=] x <[=] end`, where the first one and the last one are, resp.,\n the first and last time points in that range that fall on the boundary of ``freq``\n (if given as a frequency string) or that are valid for ``freq`` (if given as a\n :class:`pandas.tseries.offsets.DateOffset`). (If exactly one of ``start``,\n ``end``, or ``freq`` is *not* specified, this missing parameter can be computed\n given ``periods``, the number of timesteps in the range. See the note below.)\n\n Parameters\n ----------\n start : str or datetime-like, optional\n Left bound for generating dates.\n end : str or datetime-like, optional\n Right bound for generating dates.\n periods : int, optional\n Number of periods to generate.\n freq : str or DateOffset, default 'D'\n Frequency strings can have multiples, e.g. '5H'. See\n :ref:`here <timeseries.offset_aliases>` for a list of\n frequency aliases.\n tz : str or tzinfo, optional\n Time zone name for returning localized DatetimeIndex, for example\n 'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is\n timezone-naive.\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range.\n name : str, default None\n Name of the resulting DatetimeIndex.\n closed : {None, 'left', 'right'}, optional\n Make the interval closed with respect to the given frequency to\n the 'left', 'right', or both sides (None, the default).\n **kwargs\n For compatibility. Has no effect on the result.\n\n Returns\n -------\n rng : DatetimeIndex\n\n See Also\n --------\n DatetimeIndex : An immutable container for datetimes.\n timedelta_range : Return a fixed frequency TimedeltaIndex.\n period_range : Return a fixed frequency PeriodIndex.\n interval_range : Return a fixed frequency IntervalIndex.\n\n Notes\n -----\n Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. If ``freq`` is omitted, the resulting\n ``DatetimeIndex`` will have ``periods`` linearly spaced elements between\n ``start`` and ``end`` (closed on both sides).\n\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n **Specifying the values**\n\n The next four examples generate the same `DatetimeIndex`, but vary\n the combination of `start`, `end` and `periods`.\n\n Specify `start` and `end`, with the default daily frequency.\n\n >>> pd.date_range(start='1/1/2018', end='1/08/2018')\n DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',\n '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],\n dtype='datetime64[ns]', freq='D')\n\n Specify `start` and `periods`, the number of periods (days).\n\n >>> pd.date_range(start='1/1/2018', periods=8)\n DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',\n '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],\n dtype='datetime64[ns]', freq='D')\n\n Specify `end` and `periods`, the number of periods (days).\n\n >>> pd.date_range(end='1/1/2018', periods=8)\n DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',\n '2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],\n dtype='datetime64[ns]', freq='D')\n\n Specify `start`, `end`, and `periods`; the frequency is generated\n automatically (linearly spaced).\n\n >>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3)\n DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',\n '2018-04-27 00:00:00'],\n dtype='datetime64[ns]', freq=None)\n\n **Other Parameters**\n\n Changed the `freq` (frequency) to ``'M'`` (month end frequency).\n\n >>> pd.date_range(start='1/1/2018', periods=5, freq='M')\n DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',\n '2018-05-31'],\n dtype='datetime64[ns]', freq='M')\n\n Multiples are allowed\n\n >>> pd.date_range(start='1/1/2018', periods=5, freq='3M')\n DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',\n '2019-01-31'],\n dtype='datetime64[ns]', freq='3M')\n\n `freq` can also be specified as an Offset object.\n\n >>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3))\n DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',\n '2019-01-31'],\n dtype='datetime64[ns]', freq='3M')\n\n Specify `tz` to set the timezone.\n\n >>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo')\n DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00',\n '2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00',\n '2018-01-05 00:00:00+09:00'],\n dtype='datetime64[ns, Asia/Tokyo]', freq='D')\n\n `closed` controls whether to include `start` and `end` that are on the\n boundary. The default includes boundary points on either end.\n\n >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed=None)\n DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],\n dtype='datetime64[ns]', freq='D')\n\n Use ``closed='left'`` to exclude `end` if it falls on the boundary.\n\n >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='left')\n DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'],\n dtype='datetime64[ns]', freq='D')\n\n Use ``closed='right'`` to exclude `start` if it falls on the boundary.\n\n >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='right')\n DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'],\n dtype='datetime64[ns]', freq='D')\n \"\"\"\n if freq is None and com.any_none(periods, start, end):\n freq = \"D\"\n\n dtarr = DatetimeArray._generate_range(\n start=start,\n end=end,\n periods=periods,\n freq=freq,\n tz=tz,\n normalize=normalize,\n closed=closed,\n **kwargs,\n )\n return DatetimeIndex._simple_new(dtarr, name=name)\n\n\ndef bdate_range(\n start=None,\n end=None,\n periods: int | None = None,\n freq=\"B\",\n tz=None,\n normalize: bool = True,\n name: Hashable = None,\n weekmask=None,\n holidays=None,\n closed=None,\n **kwargs,\n) -> DatetimeIndex:\n \"\"\"\n Return a fixed frequency DatetimeIndex, with business day as the default\n frequency.\n\n Parameters\n ----------\n start : str or datetime-like, default None\n Left bound for generating dates.\n end : str or datetime-like, default None\n Right bound for generating dates.\n periods : int, default None\n Number of periods to generate.\n freq : str or DateOffset, default 'B' (business daily)\n Frequency strings can have multiples, e.g. '5H'.\n tz : str or None\n Time zone name for returning localized DatetimeIndex, for example\n Asia/Beijing.\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range.\n name : str, default None\n Name of the resulting DatetimeIndex.\n weekmask : str or None, default None\n Weekmask of valid business days, passed to ``numpy.busdaycalendar``,\n only used when custom frequency strings are passed. The default\n value None is equivalent to 'Mon Tue Wed Thu Fri'.\n holidays : list-like or None, default None\n Dates to exclude from the set of valid business days, passed to\n ``numpy.busdaycalendar``, only used when custom frequency strings\n are passed.\n closed : str, default None\n Make the interval closed with respect to the given frequency to\n the 'left', 'right', or both sides (None).\n **kwargs\n For compatibility. Has no effect on the result.\n\n Returns\n -------\n DatetimeIndex\n\n Notes\n -----\n Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. Specifying ``freq`` is a requirement\n for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not\n desired.\n\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n Note how the two weekend days are skipped in the result.\n\n >>> pd.bdate_range(start='1/1/2018', end='1/08/2018')\n DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',\n '2018-01-05', '2018-01-08'],\n dtype='datetime64[ns]', freq='B')\n \"\"\"\n if freq is None:\n msg = \"freq must be specified for bdate_range; use date_range instead\"\n raise TypeError(msg)\n\n if isinstance(freq, str) and freq.startswith(\"C\"):\n try:\n weekmask = weekmask or \"Mon Tue Wed Thu Fri\"\n freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask)\n except (KeyError, TypeError) as err:\n msg = f\"invalid custom frequency string: {freq}\"\n raise ValueError(msg) from err\n elif holidays or weekmask:\n msg = (\n \"a custom frequency string is required when holidays or \"\n f\"weekmask are passed, got frequency {freq}\"\n )\n raise ValueError(msg)\n\n return date_range(\n start=start,\n end=end,\n periods=periods,\n freq=freq,\n tz=tz,\n normalize=normalize,\n name=name,\n closed=closed,\n **kwargs,\n )\n\n\ndef _time_to_micros(time_obj: time) -> int:\n seconds = time_obj.hour * 60 * 60 + 60 * time_obj.minute + time_obj.second\n return 1_000_000 * seconds + time_obj.microsecond\n",
"\"\"\"\nTests column conversion functionality during parsing\nfor all of the parsers defined in parsers.py\n\"\"\"\nfrom io import StringIO\n\nfrom dateutil.parser import parse\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n)\nimport pandas._testing as tm\n\n\ndef test_converters_type_must_be_dict(all_parsers):\n parser = all_parsers\n data = \"\"\"index,A,B,C,D\nfoo,2,3,4,5\n\"\"\"\n\n with pytest.raises(TypeError, match=\"Type converters.+\"):\n parser.read_csv(StringIO(data), converters=0)\n\n\[email protected](\"column\", [3, \"D\"])\[email protected](\n \"converter\", [parse, lambda x: int(x.split(\"/\")[2])] # Produce integer.\n)\ndef test_converters(all_parsers, column, converter):\n parser = all_parsers\n data = \"\"\"A,B,C,D\na,1,2,01/01/2009\nb,3,4,01/02/2009\nc,4,5,01/03/2009\n\"\"\"\n result = parser.read_csv(StringIO(data), converters={column: converter})\n\n expected = parser.read_csv(StringIO(data))\n expected[\"D\"] = expected[\"D\"].map(converter)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_converters_no_implicit_conv(all_parsers):\n # see gh-2184\n parser = all_parsers\n data = \"\"\"000102,1.2,A\\n001245,2,B\"\"\"\n\n converters = {0: lambda x: x.strip()}\n result = parser.read_csv(StringIO(data), header=None, converters=converters)\n\n # Column 0 should not be casted to numeric and should remain as object.\n expected = DataFrame([[\"000102\", 1.2, \"A\"], [\"001245\", 2, \"B\"]])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_converters_euro_decimal_format(all_parsers):\n # see gh-583\n converters = {}\n parser = all_parsers\n\n data = \"\"\"Id;Number1;Number2;Text1;Text2;Number3\n1;1521,1541;187101,9543;ABC;poi;4,7387\n2;121,12;14897,76;DEF;uyt;0,3773\n3;878,158;108013,434;GHI;rez;2,7356\"\"\"\n converters[\"Number1\"] = converters[\"Number2\"] = converters[\n \"Number3\"\n ] = lambda x: float(x.replace(\",\", \".\"))\n\n result = parser.read_csv(StringIO(data), sep=\";\", converters=converters)\n expected = DataFrame(\n [\n [1, 1521.1541, 187101.9543, \"ABC\", \"poi\", 4.7387],\n [2, 121.12, 14897.76, \"DEF\", \"uyt\", 0.3773],\n [3, 878.158, 108013.434, \"GHI\", \"rez\", 2.7356],\n ],\n columns=[\"Id\", \"Number1\", \"Number2\", \"Text1\", \"Text2\", \"Number3\"],\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_converters_corner_with_nans(all_parsers):\n parser = all_parsers\n data = \"\"\"id,score,days\n1,2,12\n2,2-5,\n3,,14+\n4,6-12,2\"\"\"\n\n # Example converters.\n def convert_days(x):\n x = x.strip()\n\n if not x:\n return np.nan\n\n is_plus = x.endswith(\"+\")\n\n if is_plus:\n x = int(x[:-1]) + 1\n else:\n x = int(x)\n\n return x\n\n def convert_days_sentinel(x):\n x = x.strip()\n\n if not x:\n return np.nan\n\n is_plus = x.endswith(\"+\")\n\n if is_plus:\n x = int(x[:-1]) + 1\n else:\n x = int(x)\n\n return x\n\n def convert_score(x):\n x = x.strip()\n\n if not x:\n return np.nan\n\n if x.find(\"-\") > 0:\n val_min, val_max = map(int, x.split(\"-\"))\n val = 0.5 * (val_min + val_max)\n else:\n val = float(x)\n\n return val\n\n results = []\n\n for day_converter in [convert_days, convert_days_sentinel]:\n result = parser.read_csv(\n StringIO(data),\n converters={\"score\": convert_score, \"days\": day_converter},\n na_values=[\"\", None],\n )\n assert pd.isna(result[\"days\"][1])\n results.append(result)\n\n tm.assert_frame_equal(results[0], results[1])\n\n\ndef test_converter_index_col_bug(all_parsers):\n # see gh-1835\n parser = all_parsers\n data = \"A;B\\n1;2\\n3;4\"\n\n rs = parser.read_csv(\n StringIO(data), sep=\";\", index_col=\"A\", converters={\"A\": lambda x: x}\n )\n\n xp = DataFrame({\"B\": [2, 4]}, index=Index([1, 3], name=\"A\"))\n tm.assert_frame_equal(rs, xp)\n",
"from itertools import (\n chain,\n product,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.algos import (\n Infinity,\n NegInfinity,\n)\nimport pandas.util._test_decorators as td\n\nfrom pandas import (\n NaT,\n Series,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.api.types import CategoricalDtype\n\n\nclass TestSeriesRank:\n s = Series([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3])\n\n results = {\n \"average\": np.array([1.5, 5.5, 7.0, 3.5, np.nan, 3.5, 1.5, 8.0, np.nan, 5.5]),\n \"min\": np.array([1, 5, 7, 3, np.nan, 3, 1, 8, np.nan, 5]),\n \"max\": np.array([2, 6, 7, 4, np.nan, 4, 2, 8, np.nan, 6]),\n \"first\": np.array([1, 5, 7, 3, np.nan, 4, 2, 8, np.nan, 6]),\n \"dense\": np.array([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]),\n }\n\n def test_rank(self, datetime_series):\n pytest.importorskip(\"scipy.stats.special\")\n rankdata = pytest.importorskip(\"scipy.stats.rankdata\")\n\n datetime_series[::2] = np.nan\n datetime_series[:10][::3] = 4.0\n\n ranks = datetime_series.rank()\n oranks = datetime_series.astype(\"O\").rank()\n\n tm.assert_series_equal(ranks, oranks)\n\n mask = np.isnan(datetime_series)\n filled = datetime_series.fillna(np.inf)\n\n # rankdata returns a ndarray\n exp = Series(rankdata(filled), index=filled.index, name=\"ts\")\n exp[mask] = np.nan\n\n tm.assert_series_equal(ranks, exp)\n\n iseries = Series(np.arange(5).repeat(2))\n\n iranks = iseries.rank()\n exp = iseries.astype(float).rank()\n tm.assert_series_equal(iranks, exp)\n iseries = Series(np.arange(5)) + 1.0\n exp = iseries / 5.0\n iranks = iseries.rank(pct=True)\n\n tm.assert_series_equal(iranks, exp)\n\n iseries = Series(np.repeat(1, 100))\n exp = Series(np.repeat(0.505, 100))\n iranks = iseries.rank(pct=True)\n tm.assert_series_equal(iranks, exp)\n\n iseries[1] = np.nan\n exp = Series(np.repeat(50.0 / 99.0, 100))\n exp[1] = np.nan\n iranks = iseries.rank(pct=True)\n tm.assert_series_equal(iranks, exp)\n\n iseries = Series(np.arange(5)) + 1.0\n iseries[4] = np.nan\n exp = iseries / 4.0\n iranks = iseries.rank(pct=True)\n tm.assert_series_equal(iranks, exp)\n\n iseries = Series(np.repeat(np.nan, 100))\n exp = iseries.copy()\n iranks = iseries.rank(pct=True)\n tm.assert_series_equal(iranks, exp)\n\n iseries = Series(np.arange(5)) + 1\n iseries[4] = np.nan\n exp = iseries / 4.0\n iranks = iseries.rank(pct=True)\n tm.assert_series_equal(iranks, exp)\n\n rng = date_range(\"1/1/1990\", periods=5)\n iseries = Series(np.arange(5), rng) + 1\n iseries.iloc[4] = np.nan\n exp = iseries / 4.0\n iranks = iseries.rank(pct=True)\n tm.assert_series_equal(iranks, exp)\n\n iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20 + 1e-30, 1e-1])\n exp = Series([2, 1, 3, 5, 4, 6.0])\n iranks = iseries.rank()\n tm.assert_series_equal(iranks, exp)\n\n # GH 5968\n iseries = Series([\"3 day\", \"1 day 10m\", \"-2 day\", NaT], dtype=\"m8[ns]\")\n exp = Series([3, 2, 1, np.nan])\n iranks = iseries.rank()\n tm.assert_series_equal(iranks, exp)\n\n values = np.array(\n [-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40],\n dtype=\"float64\",\n )\n random_order = np.random.permutation(len(values))\n iseries = Series(values[random_order])\n exp = Series(random_order + 1.0, dtype=\"float64\")\n iranks = iseries.rank()\n tm.assert_series_equal(iranks, exp)\n\n def test_rank_categorical(self):\n # GH issue #15420 rank incorrectly orders ordered categories\n\n # Test ascending/descending ranking for ordered categoricals\n exp = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])\n exp_desc = Series([6.0, 5.0, 4.0, 3.0, 2.0, 1.0])\n ordered = Series(\n [\"first\", \"second\", \"third\", \"fourth\", \"fifth\", \"sixth\"]\n ).astype(\n CategoricalDtype(\n categories=[\"first\", \"second\", \"third\", \"fourth\", \"fifth\", \"sixth\"],\n ordered=True,\n )\n )\n tm.assert_series_equal(ordered.rank(), exp)\n tm.assert_series_equal(ordered.rank(ascending=False), exp_desc)\n\n # Unordered categoricals should be ranked as objects\n unordered = Series(\n [\"first\", \"second\", \"third\", \"fourth\", \"fifth\", \"sixth\"]\n ).astype(\n CategoricalDtype(\n categories=[\"first\", \"second\", \"third\", \"fourth\", \"fifth\", \"sixth\"],\n ordered=False,\n )\n )\n exp_unordered = Series([2.0, 4.0, 6.0, 3.0, 1.0, 5.0])\n res = unordered.rank()\n tm.assert_series_equal(res, exp_unordered)\n\n unordered1 = Series([1, 2, 3, 4, 5, 6]).astype(\n CategoricalDtype([1, 2, 3, 4, 5, 6], False)\n )\n exp_unordered1 = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])\n res1 = unordered1.rank()\n tm.assert_series_equal(res1, exp_unordered1)\n\n # Test na_option for rank data\n na_ser = Series(\n [\"first\", \"second\", \"third\", \"fourth\", \"fifth\", \"sixth\", np.NaN]\n ).astype(\n CategoricalDtype(\n [\"first\", \"second\", \"third\", \"fourth\", \"fifth\", \"sixth\", \"seventh\"],\n True,\n )\n )\n\n exp_top = Series([2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.0])\n exp_bot = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])\n exp_keep = Series([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, np.NaN])\n\n tm.assert_series_equal(na_ser.rank(na_option=\"top\"), exp_top)\n tm.assert_series_equal(na_ser.rank(na_option=\"bottom\"), exp_bot)\n tm.assert_series_equal(na_ser.rank(na_option=\"keep\"), exp_keep)\n\n # Test na_option for rank data with ascending False\n exp_top = Series([7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0])\n exp_bot = Series([6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 7.0])\n exp_keep = Series([6.0, 5.0, 4.0, 3.0, 2.0, 1.0, np.NaN])\n\n tm.assert_series_equal(na_ser.rank(na_option=\"top\", ascending=False), exp_top)\n tm.assert_series_equal(\n na_ser.rank(na_option=\"bottom\", ascending=False), exp_bot\n )\n tm.assert_series_equal(na_ser.rank(na_option=\"keep\", ascending=False), exp_keep)\n\n # Test invalid values for na_option\n msg = \"na_option must be one of 'keep', 'top', or 'bottom'\"\n\n with pytest.raises(ValueError, match=msg):\n na_ser.rank(na_option=\"bad\", ascending=False)\n\n # invalid type\n with pytest.raises(ValueError, match=msg):\n na_ser.rank(na_option=True, ascending=False)\n\n # Test with pct=True\n na_ser = Series([\"first\", \"second\", \"third\", \"fourth\", np.NaN]).astype(\n CategoricalDtype([\"first\", \"second\", \"third\", \"fourth\"], True)\n )\n exp_top = Series([0.4, 0.6, 0.8, 1.0, 0.2])\n exp_bot = Series([0.2, 0.4, 0.6, 0.8, 1.0])\n exp_keep = Series([0.25, 0.5, 0.75, 1.0, np.NaN])\n\n tm.assert_series_equal(na_ser.rank(na_option=\"top\", pct=True), exp_top)\n tm.assert_series_equal(na_ser.rank(na_option=\"bottom\", pct=True), exp_bot)\n tm.assert_series_equal(na_ser.rank(na_option=\"keep\", pct=True), exp_keep)\n\n def test_rank_signature(self):\n s = Series([0, 1])\n s.rank(method=\"average\")\n msg = \"No axis named average for object type Series\"\n with pytest.raises(ValueError, match=msg):\n s.rank(\"average\")\n\n def test_rank_tie_methods(self):\n s = self.s\n\n def _check(s, expected, method=\"average\"):\n result = s.rank(method=method)\n tm.assert_series_equal(result, Series(expected))\n\n dtypes = [None, object]\n disabled = {(object, \"first\")}\n results = self.results\n\n for method, dtype in product(results, dtypes):\n if (dtype, method) in disabled:\n continue\n series = s if dtype is None else s.astype(dtype)\n _check(series, results[method], method=method)\n\n @td.skip_if_no_scipy\n @pytest.mark.parametrize(\"ascending\", [True, False])\n @pytest.mark.parametrize(\"method\", [\"average\", \"min\", \"max\", \"first\", \"dense\"])\n @pytest.mark.parametrize(\"na_option\", [\"top\", \"bottom\", \"keep\"])\n def test_rank_tie_methods_on_infs_nans(self, method, na_option, ascending):\n dtypes = [\n (\"object\", None, Infinity(), NegInfinity()),\n (\"float64\", np.nan, np.inf, -np.inf),\n ]\n chunk = 3\n disabled = {(\"object\", \"first\")}\n\n def _check(s, method, na_option, ascending):\n exp_ranks = {\n \"average\": ([2, 2, 2], [5, 5, 5], [8, 8, 8]),\n \"min\": ([1, 1, 1], [4, 4, 4], [7, 7, 7]),\n \"max\": ([3, 3, 3], [6, 6, 6], [9, 9, 9]),\n \"first\": ([1, 2, 3], [4, 5, 6], [7, 8, 9]),\n \"dense\": ([1, 1, 1], [2, 2, 2], [3, 3, 3]),\n }\n ranks = exp_ranks[method]\n if na_option == \"top\":\n order = [ranks[1], ranks[0], ranks[2]]\n elif na_option == \"bottom\":\n order = [ranks[0], ranks[2], ranks[1]]\n else:\n order = [ranks[0], [np.nan] * chunk, ranks[1]]\n expected = order if ascending else order[::-1]\n expected = list(chain.from_iterable(expected))\n result = s.rank(method=method, na_option=na_option, ascending=ascending)\n tm.assert_series_equal(result, Series(expected, dtype=\"float64\"))\n\n for dtype, na_value, pos_inf, neg_inf in dtypes:\n in_arr = [neg_inf] * chunk + [na_value] * chunk + [pos_inf] * chunk\n iseries = Series(in_arr, dtype=dtype)\n if (dtype, method) in disabled:\n continue\n _check(iseries, method, na_option, ascending)\n\n def test_rank_desc_mix_nans_infs(self):\n # GH 19538\n # check descending ranking when mix nans and infs\n iseries = Series([1, np.nan, np.inf, -np.inf, 25])\n result = iseries.rank(ascending=False)\n exp = Series([3, np.nan, 1, 4, 2], dtype=\"float64\")\n tm.assert_series_equal(result, exp)\n\n def test_rank_methods_series(self):\n pytest.importorskip(\"scipy.stats.special\")\n rankdata = pytest.importorskip(\"scipy.stats.rankdata\")\n\n xs = np.random.randn(9)\n xs = np.concatenate([xs[i:] for i in range(0, 9, 2)]) # add duplicates\n np.random.shuffle(xs)\n\n index = [chr(ord(\"a\") + i) for i in range(len(xs))]\n\n for vals in [xs, xs + 1e6, xs * 1e-6]:\n ts = Series(vals, index=index)\n\n for m in [\"average\", \"min\", \"max\", \"first\", \"dense\"]:\n result = ts.rank(method=m)\n sprank = rankdata(vals, m if m != \"first\" else \"ordinal\")\n expected = Series(sprank, index=index).astype(\"float64\")\n tm.assert_series_equal(result, expected)\n\n def test_rank_dense_method(self):\n dtypes = [\"O\", \"f8\", \"i8\"]\n in_out = [\n ([1], [1]),\n ([2], [1]),\n ([0], [1]),\n ([2, 2], [1, 1]),\n ([1, 2, 3], [1, 2, 3]),\n ([4, 2, 1], [3, 2, 1]),\n ([1, 1, 5, 5, 3], [1, 1, 3, 3, 2]),\n ([-5, -4, -3, -2, -1], [1, 2, 3, 4, 5]),\n ]\n\n for ser, exp in in_out:\n for dtype in dtypes:\n s = Series(ser).astype(dtype)\n result = s.rank(method=\"dense\")\n expected = Series(exp).astype(result.dtype)\n tm.assert_series_equal(result, expected)\n\n def test_rank_descending(self):\n dtypes = [\"O\", \"f8\", \"i8\"]\n\n for dtype, method in product(dtypes, self.results):\n if \"i\" in dtype:\n s = self.s.dropna()\n else:\n s = self.s.astype(dtype)\n\n res = s.rank(ascending=False)\n expected = (s.max() - s).rank()\n tm.assert_series_equal(res, expected)\n\n if method == \"first\" and dtype == \"O\":\n continue\n\n expected = (s.max() - s).rank(method=method)\n res2 = s.rank(method=method, ascending=False)\n tm.assert_series_equal(res2, expected)\n\n def test_rank_int(self):\n s = self.s.dropna().astype(\"i8\")\n\n for method, res in self.results.items():\n result = s.rank(method=method)\n expected = Series(res).dropna()\n expected.index = result.index\n tm.assert_series_equal(result, expected)\n\n def test_rank_object_bug(self):\n # GH 13445\n\n # smoke tests\n Series([np.nan] * 32).astype(object).rank(ascending=True)\n Series([np.nan] * 32).astype(object).rank(ascending=False)\n\n def test_rank_modify_inplace(self):\n # GH 18521\n # Check rank does not mutate series\n s = Series([Timestamp(\"2017-01-05 10:20:27.569000\"), NaT])\n expected = s.copy()\n\n s.rank()\n result = s\n tm.assert_series_equal(result, expected)\n\n\n# GH15630, pct should be on 100% basis when method='dense'\n\n\[email protected](\"dtype\", [\"O\", \"f8\", \"i8\"])\[email protected](\n \"ser, exp\",\n [\n ([1], [1.0]),\n ([1, 2], [1.0 / 2, 2.0 / 2]),\n ([2, 2], [1.0, 1.0]),\n ([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),\n ([1, 2, 2], [1.0 / 2, 2.0 / 2, 2.0 / 2]),\n ([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),\n ([1, 1, 5, 5, 3], [1.0 / 3, 1.0 / 3, 3.0 / 3, 3.0 / 3, 2.0 / 3]),\n ([1, 1, 3, 3, 5, 5], [1.0 / 3, 1.0 / 3, 2.0 / 3, 2.0 / 3, 3.0 / 3, 3.0 / 3]),\n ([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),\n ],\n)\ndef test_rank_dense_pct(dtype, ser, exp):\n s = Series(ser).astype(dtype)\n result = s.rank(method=\"dense\", pct=True)\n expected = Series(exp).astype(result.dtype)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"dtype\", [\"O\", \"f8\", \"i8\"])\[email protected](\n \"ser, exp\",\n [\n ([1], [1.0]),\n ([1, 2], [1.0 / 2, 2.0 / 2]),\n ([2, 2], [1.0 / 2, 1.0 / 2]),\n ([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),\n ([1, 2, 2], [1.0 / 3, 2.0 / 3, 2.0 / 3]),\n ([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),\n ([1, 1, 5, 5, 3], [1.0 / 5, 1.0 / 5, 4.0 / 5, 4.0 / 5, 3.0 / 5]),\n ([1, 1, 3, 3, 5, 5], [1.0 / 6, 1.0 / 6, 3.0 / 6, 3.0 / 6, 5.0 / 6, 5.0 / 6]),\n ([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),\n ],\n)\ndef test_rank_min_pct(dtype, ser, exp):\n s = Series(ser).astype(dtype)\n result = s.rank(method=\"min\", pct=True)\n expected = Series(exp).astype(result.dtype)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"dtype\", [\"O\", \"f8\", \"i8\"])\[email protected](\n \"ser, exp\",\n [\n ([1], [1.0]),\n ([1, 2], [1.0 / 2, 2.0 / 2]),\n ([2, 2], [1.0, 1.0]),\n ([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),\n ([1, 2, 2], [1.0 / 3, 3.0 / 3, 3.0 / 3]),\n ([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),\n ([1, 1, 5, 5, 3], [2.0 / 5, 2.0 / 5, 5.0 / 5, 5.0 / 5, 3.0 / 5]),\n ([1, 1, 3, 3, 5, 5], [2.0 / 6, 2.0 / 6, 4.0 / 6, 4.0 / 6, 6.0 / 6, 6.0 / 6]),\n ([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),\n ],\n)\ndef test_rank_max_pct(dtype, ser, exp):\n s = Series(ser).astype(dtype)\n result = s.rank(method=\"max\", pct=True)\n expected = Series(exp).astype(result.dtype)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"dtype\", [\"O\", \"f8\", \"i8\"])\[email protected](\n \"ser, exp\",\n [\n ([1], [1.0]),\n ([1, 2], [1.0 / 2, 2.0 / 2]),\n ([2, 2], [1.5 / 2, 1.5 / 2]),\n ([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),\n ([1, 2, 2], [1.0 / 3, 2.5 / 3, 2.5 / 3]),\n ([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),\n ([1, 1, 5, 5, 3], [1.5 / 5, 1.5 / 5, 4.5 / 5, 4.5 / 5, 3.0 / 5]),\n ([1, 1, 3, 3, 5, 5], [1.5 / 6, 1.5 / 6, 3.5 / 6, 3.5 / 6, 5.5 / 6, 5.5 / 6]),\n ([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),\n ],\n)\ndef test_rank_average_pct(dtype, ser, exp):\n s = Series(ser).astype(dtype)\n result = s.rank(method=\"average\", pct=True)\n expected = Series(exp).astype(result.dtype)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"dtype\", [\"f8\", \"i8\"])\[email protected](\n \"ser, exp\",\n [\n ([1], [1.0]),\n ([1, 2], [1.0 / 2, 2.0 / 2]),\n ([2, 2], [1.0 / 2, 2.0 / 2.0]),\n ([1, 2, 3], [1.0 / 3, 2.0 / 3, 3.0 / 3]),\n ([1, 2, 2], [1.0 / 3, 2.0 / 3, 3.0 / 3]),\n ([4, 2, 1], [3.0 / 3, 2.0 / 3, 1.0 / 3]),\n ([1, 1, 5, 5, 3], [1.0 / 5, 2.0 / 5, 4.0 / 5, 5.0 / 5, 3.0 / 5]),\n ([1, 1, 3, 3, 5, 5], [1.0 / 6, 2.0 / 6, 3.0 / 6, 4.0 / 6, 5.0 / 6, 6.0 / 6]),\n ([-5, -4, -3, -2, -1], [1.0 / 5, 2.0 / 5, 3.0 / 5, 4.0 / 5, 5.0 / 5]),\n ],\n)\ndef test_rank_first_pct(dtype, ser, exp):\n s = Series(ser).astype(dtype)\n result = s.rank(method=\"first\", pct=True)\n expected = Series(exp).astype(result.dtype)\n tm.assert_series_equal(result, expected)\n\n\[email protected]\[email protected]_memory\ndef test_pct_max_many_rows():\n # GH 18271\n s = Series(np.arange(2 ** 24 + 1))\n result = s.rank(pct=True).max()\n assert result == 1\n",
"from contextlib import contextmanager\nimport tracemalloc\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs import hashtable as ht\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.core.algorithms import isin\n\n\n@contextmanager\ndef activated_tracemalloc():\n tracemalloc.start()\n try:\n yield\n finally:\n tracemalloc.stop()\n\n\ndef get_allocated_khash_memory():\n snapshot = tracemalloc.take_snapshot()\n snapshot = snapshot.filter_traces(\n (tracemalloc.DomainFilter(True, ht.get_hashtable_trace_domain()),)\n )\n return sum(map(lambda x: x.size, snapshot.traces))\n\n\[email protected](\n \"table_type, dtype\",\n [\n (ht.PyObjectHashTable, np.object_),\n (ht.Complex128HashTable, np.complex128),\n (ht.Int64HashTable, np.int64),\n (ht.UInt64HashTable, np.uint64),\n (ht.Float64HashTable, np.float64),\n (ht.Complex64HashTable, np.complex64),\n (ht.Int32HashTable, np.int32),\n (ht.UInt32HashTable, np.uint32),\n (ht.Float32HashTable, np.float32),\n (ht.Int16HashTable, np.int16),\n (ht.UInt16HashTable, np.uint16),\n (ht.Int8HashTable, np.int8),\n (ht.UInt8HashTable, np.uint8),\n ],\n)\nclass TestHashTable:\n def test_get_set_contains_len(self, table_type, dtype):\n index = 5\n table = table_type(55)\n assert len(table) == 0\n assert index not in table\n\n table.set_item(index, 42)\n assert len(table) == 1\n assert index in table\n assert table.get_item(index) == 42\n\n table.set_item(index + 1, 41)\n assert index in table\n assert index + 1 in table\n assert len(table) == 2\n assert table.get_item(index) == 42\n assert table.get_item(index + 1) == 41\n\n table.set_item(index, 21)\n assert index in table\n assert index + 1 in table\n assert len(table) == 2\n assert table.get_item(index) == 21\n assert table.get_item(index + 1) == 41\n assert index + 2 not in table\n\n with pytest.raises(KeyError, match=str(index + 2)):\n table.get_item(index + 2)\n\n def test_map(self, table_type, dtype, writable):\n # PyObjectHashTable has no map-method\n if table_type != ht.PyObjectHashTable:\n N = 77\n table = table_type()\n keys = np.arange(N).astype(dtype)\n vals = np.arange(N).astype(np.int64) + N\n keys.flags.writeable = writable\n vals.flags.writeable = writable\n table.map(keys, vals)\n for i in range(N):\n assert table.get_item(keys[i]) == i + N\n\n def test_map_locations(self, table_type, dtype, writable):\n N = 8\n table = table_type()\n keys = (np.arange(N) + N).astype(dtype)\n keys.flags.writeable = writable\n table.map_locations(keys)\n for i in range(N):\n assert table.get_item(keys[i]) == i\n\n def test_lookup(self, table_type, dtype, writable):\n N = 3\n table = table_type()\n keys = (np.arange(N) + N).astype(dtype)\n keys.flags.writeable = writable\n table.map_locations(keys)\n result = table.lookup(keys)\n expected = np.arange(N)\n tm.assert_numpy_array_equal(result.astype(np.int64), expected.astype(np.int64))\n\n def test_lookup_wrong(self, table_type, dtype):\n if dtype in (np.int8, np.uint8):\n N = 100\n else:\n N = 512\n table = table_type()\n keys = (np.arange(N) + N).astype(dtype)\n table.map_locations(keys)\n wrong_keys = np.arange(N).astype(dtype)\n result = table.lookup(wrong_keys)\n assert np.all(result == -1)\n\n def test_unique(self, table_type, dtype, writable):\n if dtype in (np.int8, np.uint8):\n N = 88\n else:\n N = 1000\n table = table_type()\n expected = (np.arange(N) + N).astype(dtype)\n keys = np.repeat(expected, 5)\n keys.flags.writeable = writable\n unique = table.unique(keys)\n tm.assert_numpy_array_equal(unique, expected)\n\n def test_tracemalloc_works(self, table_type, dtype):\n if dtype in (np.int8, np.uint8):\n N = 256\n else:\n N = 30000\n keys = np.arange(N).astype(dtype)\n with activated_tracemalloc():\n table = table_type()\n table.map_locations(keys)\n used = get_allocated_khash_memory()\n my_size = table.sizeof()\n assert used == my_size\n del table\n assert get_allocated_khash_memory() == 0\n\n def test_tracemalloc_for_empty(self, table_type, dtype):\n with activated_tracemalloc():\n table = table_type()\n used = get_allocated_khash_memory()\n my_size = table.sizeof()\n assert used == my_size\n del table\n assert get_allocated_khash_memory() == 0\n\n def test_get_state(self, table_type, dtype):\n table = table_type(1000)\n state = table.get_state()\n assert state[\"size\"] == 0\n assert state[\"n_occupied\"] == 0\n assert \"n_buckets\" in state\n assert \"upper_bound\" in state\n\n def test_no_reallocation(self, table_type, dtype):\n for N in range(1, 110):\n keys = np.arange(N).astype(dtype)\n preallocated_table = table_type(N)\n n_buckets_start = preallocated_table.get_state()[\"n_buckets\"]\n preallocated_table.map_locations(keys)\n n_buckets_end = preallocated_table.get_state()[\"n_buckets\"]\n # original number of buckets was enough:\n assert n_buckets_start == n_buckets_end\n # check with clean table (not too much preallocated)\n clean_table = table_type()\n clean_table.map_locations(keys)\n assert n_buckets_start == clean_table.get_state()[\"n_buckets\"]\n\n\nclass TestPyObjectHashTableWithNans:\n def test_nan_float(self):\n nan1 = float(\"nan\")\n nan2 = float(\"nan\")\n assert nan1 is not nan2\n table = ht.PyObjectHashTable()\n table.set_item(nan1, 42)\n assert table.get_item(nan2) == 42\n\n def test_nan_complex_both(self):\n nan1 = complex(float(\"nan\"), float(\"nan\"))\n nan2 = complex(float(\"nan\"), float(\"nan\"))\n assert nan1 is not nan2\n table = ht.PyObjectHashTable()\n table.set_item(nan1, 42)\n assert table.get_item(nan2) == 42\n\n def test_nan_complex_real(self):\n nan1 = complex(float(\"nan\"), 1)\n nan2 = complex(float(\"nan\"), 1)\n other = complex(float(\"nan\"), 2)\n assert nan1 is not nan2\n table = ht.PyObjectHashTable()\n table.set_item(nan1, 42)\n assert table.get_item(nan2) == 42\n with pytest.raises(KeyError, match=None) as error:\n table.get_item(other)\n assert str(error.value) == str(other)\n\n def test_nan_complex_imag(self):\n nan1 = complex(1, float(\"nan\"))\n nan2 = complex(1, float(\"nan\"))\n other = complex(2, float(\"nan\"))\n assert nan1 is not nan2\n table = ht.PyObjectHashTable()\n table.set_item(nan1, 42)\n assert table.get_item(nan2) == 42\n with pytest.raises(KeyError, match=None) as error:\n table.get_item(other)\n assert str(error.value) == str(other)\n\n def test_nan_in_tuple(self):\n nan1 = (float(\"nan\"),)\n nan2 = (float(\"nan\"),)\n assert nan1[0] is not nan2[0]\n table = ht.PyObjectHashTable()\n table.set_item(nan1, 42)\n assert table.get_item(nan2) == 42\n\n def test_nan_in_nested_tuple(self):\n nan1 = (1, (2, (float(\"nan\"),)))\n nan2 = (1, (2, (float(\"nan\"),)))\n other = (1, 2)\n table = ht.PyObjectHashTable()\n table.set_item(nan1, 42)\n assert table.get_item(nan2) == 42\n with pytest.raises(KeyError, match=None) as error:\n table.get_item(other)\n assert str(error.value) == str(other)\n\n\ndef test_hash_equal_tuple_with_nans():\n a = (float(\"nan\"), (float(\"nan\"), float(\"nan\")))\n b = (float(\"nan\"), (float(\"nan\"), float(\"nan\")))\n assert ht.object_hash(a) == ht.object_hash(b)\n assert ht.objects_are_equal(a, b)\n\n\ndef test_get_labels_groupby_for_Int64(writable):\n table = ht.Int64HashTable()\n vals = np.array([1, 2, -1, 2, 1, -1], dtype=np.int64)\n vals.flags.writeable = writable\n arr, unique = table.get_labels_groupby(vals)\n expected_arr = np.array([0, 1, -1, 1, 0, -1], dtype=np.int64)\n expected_unique = np.array([1, 2], dtype=np.int64)\n tm.assert_numpy_array_equal(arr.astype(np.int64), expected_arr)\n tm.assert_numpy_array_equal(unique, expected_unique)\n\n\ndef test_tracemalloc_works_for_StringHashTable():\n N = 1000\n keys = np.arange(N).astype(np.compat.unicode).astype(np.object_)\n with activated_tracemalloc():\n table = ht.StringHashTable()\n table.map_locations(keys)\n used = get_allocated_khash_memory()\n my_size = table.sizeof()\n assert used == my_size\n del table\n assert get_allocated_khash_memory() == 0\n\n\ndef test_tracemalloc_for_empty_StringHashTable():\n with activated_tracemalloc():\n table = ht.StringHashTable()\n used = get_allocated_khash_memory()\n my_size = table.sizeof()\n assert used == my_size\n del table\n assert get_allocated_khash_memory() == 0\n\n\ndef test_no_reallocation_StringHashTable():\n for N in range(1, 110):\n keys = np.arange(N).astype(np.compat.unicode).astype(np.object_)\n preallocated_table = ht.StringHashTable(N)\n n_buckets_start = preallocated_table.get_state()[\"n_buckets\"]\n preallocated_table.map_locations(keys)\n n_buckets_end = preallocated_table.get_state()[\"n_buckets\"]\n # original number of buckets was enough:\n assert n_buckets_start == n_buckets_end\n # check with clean table (not too much preallocated)\n clean_table = ht.StringHashTable()\n clean_table.map_locations(keys)\n assert n_buckets_start == clean_table.get_state()[\"n_buckets\"]\n\n\[email protected](\n \"table_type, dtype\",\n [\n (ht.Float64HashTable, np.float64),\n (ht.Float32HashTable, np.float32),\n (ht.Complex128HashTable, np.complex128),\n (ht.Complex64HashTable, np.complex64),\n ],\n)\nclass TestHashTableWithNans:\n def test_get_set_contains_len(self, table_type, dtype):\n index = float(\"nan\")\n table = table_type()\n assert index not in table\n\n table.set_item(index, 42)\n assert len(table) == 1\n assert index in table\n assert table.get_item(index) == 42\n\n table.set_item(index, 41)\n assert len(table) == 1\n assert index in table\n assert table.get_item(index) == 41\n\n def test_map(self, table_type, dtype):\n N = 332\n table = table_type()\n keys = np.full(N, np.nan, dtype=dtype)\n vals = (np.arange(N) + N).astype(np.int64)\n table.map(keys, vals)\n assert len(table) == 1\n assert table.get_item(np.nan) == 2 * N - 1\n\n def test_map_locations(self, table_type, dtype):\n N = 10\n table = table_type()\n keys = np.full(N, np.nan, dtype=dtype)\n table.map_locations(keys)\n assert len(table) == 1\n assert table.get_item(np.nan) == N - 1\n\n def test_unique(self, table_type, dtype):\n N = 1020\n table = table_type()\n keys = np.full(N, np.nan, dtype=dtype)\n unique = table.unique(keys)\n assert np.all(np.isnan(unique)) and len(unique) == 1\n\n\ndef test_unique_for_nan_objects_floats():\n table = ht.PyObjectHashTable()\n keys = np.array([float(\"nan\") for i in range(50)], dtype=np.object_)\n unique = table.unique(keys)\n assert len(unique) == 1\n\n\ndef test_unique_for_nan_objects_complex():\n table = ht.PyObjectHashTable()\n keys = np.array([complex(float(\"nan\"), 1.0) for i in range(50)], dtype=np.object_)\n unique = table.unique(keys)\n assert len(unique) == 1\n\n\ndef test_unique_for_nan_objects_tuple():\n table = ht.PyObjectHashTable()\n keys = np.array(\n [1] + [(1.0, (float(\"nan\"), 1.0)) for i in range(50)], dtype=np.object_\n )\n unique = table.unique(keys)\n assert len(unique) == 2\n\n\ndef get_ht_function(fun_name, type_suffix):\n return getattr(ht, fun_name)\n\n\[email protected](\n \"dtype, type_suffix\",\n [\n (np.object_, \"object\"),\n (np.complex128, \"complex128\"),\n (np.int64, \"int64\"),\n (np.uint64, \"uint64\"),\n (np.float64, \"float64\"),\n (np.complex64, \"complex64\"),\n (np.int32, \"int32\"),\n (np.uint32, \"uint32\"),\n (np.float32, \"float32\"),\n (np.int16, \"int16\"),\n (np.uint16, \"uint16\"),\n (np.int8, \"int8\"),\n (np.uint8, \"uint8\"),\n ],\n)\nclass TestHelpFunctions:\n def test_value_count(self, dtype, type_suffix, writable):\n N = 43\n value_count = get_ht_function(\"value_count\", type_suffix)\n expected = (np.arange(N) + N).astype(dtype)\n values = np.repeat(expected, 5)\n values.flags.writeable = writable\n keys, counts = value_count(values, False)\n tm.assert_numpy_array_equal(np.sort(keys), expected)\n assert np.all(counts == 5)\n\n def test_value_count_stable(self, dtype, type_suffix, writable):\n # GH12679\n value_count = get_ht_function(\"value_count\", type_suffix)\n values = np.array([2, 1, 5, 22, 3, -1, 8]).astype(dtype)\n values.flags.writeable = writable\n keys, counts = value_count(values, False)\n tm.assert_numpy_array_equal(keys, values)\n assert np.all(counts == 1)\n\n def test_duplicated_first(self, dtype, type_suffix, writable):\n N = 100\n duplicated = get_ht_function(\"duplicated\", type_suffix)\n values = np.repeat(np.arange(N).astype(dtype), 5)\n values.flags.writeable = writable\n result = duplicated(values)\n expected = np.ones_like(values, dtype=np.bool_)\n expected[::5] = False\n tm.assert_numpy_array_equal(result, expected)\n\n def test_ismember_yes(self, dtype, type_suffix, writable):\n N = 127\n ismember = get_ht_function(\"ismember\", type_suffix)\n arr = np.arange(N).astype(dtype)\n values = np.arange(N).astype(dtype)\n arr.flags.writeable = writable\n values.flags.writeable = writable\n result = ismember(arr, values)\n expected = np.ones_like(values, dtype=np.bool_)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_ismember_no(self, dtype, type_suffix):\n N = 17\n ismember = get_ht_function(\"ismember\", type_suffix)\n arr = np.arange(N).astype(dtype)\n values = (np.arange(N) + N).astype(dtype)\n result = ismember(arr, values)\n expected = np.zeros_like(values, dtype=np.bool_)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_mode(self, dtype, type_suffix, writable):\n if dtype in (np.int8, np.uint8):\n N = 53\n else:\n N = 11111\n mode = get_ht_function(\"mode\", type_suffix)\n values = np.repeat(np.arange(N).astype(dtype), 5)\n values[0] = 42\n values.flags.writeable = writable\n result = mode(values, False)\n assert result == 42\n\n def test_mode_stable(self, dtype, type_suffix, writable):\n mode = get_ht_function(\"mode\", type_suffix)\n values = np.array([2, 1, 5, 22, 3, -1, 8]).astype(dtype)\n values.flags.writeable = writable\n keys = mode(values, False)\n tm.assert_numpy_array_equal(keys, values)\n\n\ndef test_modes_with_nans():\n # GH39007\n values = np.array([True, pd.NA, np.nan], dtype=np.object_)\n # pd.Na and np.nan will have the same representative: np.nan\n # thus we have 2 nans and 1 True\n modes = ht.mode(values, False)\n assert modes.size == 1\n assert np.isnan(modes[0])\n\n\[email protected](\n \"dtype, type_suffix\",\n [\n (np.float64, \"float64\"),\n (np.float32, \"float32\"),\n (np.complex128, \"complex128\"),\n (np.complex64, \"complex64\"),\n ],\n)\nclass TestHelpFunctionsWithNans:\n def test_value_count(self, dtype, type_suffix):\n value_count = get_ht_function(\"value_count\", type_suffix)\n values = np.array([np.nan, np.nan, np.nan], dtype=dtype)\n keys, counts = value_count(values, True)\n assert len(keys) == 0\n keys, counts = value_count(values, False)\n assert len(keys) == 1 and np.all(np.isnan(keys))\n assert counts[0] == 3\n\n def test_duplicated_first(self, dtype, type_suffix):\n duplicated = get_ht_function(\"duplicated\", type_suffix)\n values = np.array([np.nan, np.nan, np.nan], dtype=dtype)\n result = duplicated(values)\n expected = np.array([False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n def test_ismember_yes(self, dtype, type_suffix):\n ismember = get_ht_function(\"ismember\", type_suffix)\n arr = np.array([np.nan, np.nan, np.nan], dtype=dtype)\n values = np.array([np.nan, np.nan], dtype=dtype)\n result = ismember(arr, values)\n expected = np.array([True, True, True], dtype=np.bool_)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_ismember_no(self, dtype, type_suffix):\n ismember = get_ht_function(\"ismember\", type_suffix)\n arr = np.array([np.nan, np.nan, np.nan], dtype=dtype)\n values = np.array([1], dtype=dtype)\n result = ismember(arr, values)\n expected = np.array([False, False, False], dtype=np.bool_)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_mode(self, dtype, type_suffix):\n mode = get_ht_function(\"mode\", type_suffix)\n values = np.array([42, np.nan, np.nan, np.nan], dtype=dtype)\n assert mode(values, True) == 42\n assert np.isnan(mode(values, False))\n\n\ndef test_ismember_tuple_with_nans():\n # GH-41836\n values = [(\"a\", float(\"nan\")), (\"b\", 1)]\n comps = [(\"a\", float(\"nan\"))]\n result = isin(values, comps)\n expected = np.array([True, False], dtype=np.bool_)\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_float_complex_int_are_equal_as_objects():\n values = [\"a\", 5, 5.0, 5.0 + 0j]\n comps = list(range(129))\n result = isin(values, comps)\n expected = np.array([False, True, True, True], dtype=np.bool_)\n tm.assert_numpy_array_equal(result, expected)\n",
"from datetime import (\n datetime,\n timedelta,\n)\nimport operator\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslibs import iNaT\nimport pandas.compat as compat\n\nfrom pandas.core.dtypes.common import is_datetime64_any_dtype\n\nfrom pandas import (\n DatetimeIndex,\n DatetimeTZDtype,\n Index,\n NaT,\n Period,\n Series,\n Timedelta,\n TimedeltaIndex,\n Timestamp,\n isna,\n offsets,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import (\n DatetimeArray,\n PeriodArray,\n TimedeltaArray,\n)\nfrom pandas.core.ops import roperator\n\n\[email protected](\n \"nat,idx\",\n [\n (Timestamp(\"NaT\"), DatetimeIndex),\n (Timedelta(\"NaT\"), TimedeltaIndex),\n (Period(\"NaT\", freq=\"M\"), PeriodArray),\n ],\n)\ndef test_nat_fields(nat, idx):\n\n for field in idx._field_ops:\n # weekday is a property of DTI, but a method\n # on NaT/Timestamp for compat with datetime\n if field == \"weekday\":\n continue\n\n result = getattr(NaT, field)\n assert np.isnan(result)\n\n result = getattr(nat, field)\n assert np.isnan(result)\n\n for field in idx._bool_ops:\n\n result = getattr(NaT, field)\n assert result is False\n\n result = getattr(nat, field)\n assert result is False\n\n\ndef test_nat_vector_field_access():\n idx = DatetimeIndex([\"1/1/2000\", None, None, \"1/4/2000\"])\n\n for field in DatetimeIndex._field_ops:\n # weekday is a property of DTI, but a method\n # on NaT/Timestamp for compat with datetime\n if field == \"weekday\":\n continue\n if field in [\"week\", \"weekofyear\"]:\n # GH#33595 Deprecate week and weekofyear\n continue\n\n result = getattr(idx, field)\n expected = Index([getattr(x, field) for x in idx])\n tm.assert_index_equal(result, expected)\n\n ser = Series(idx)\n\n for field in DatetimeIndex._field_ops:\n # weekday is a property of DTI, but a method\n # on NaT/Timestamp for compat with datetime\n if field == \"weekday\":\n continue\n if field in [\"week\", \"weekofyear\"]:\n # GH#33595 Deprecate week and weekofyear\n continue\n\n result = getattr(ser.dt, field)\n expected = [getattr(x, field) for x in idx]\n tm.assert_series_equal(result, Series(expected))\n\n for field in DatetimeIndex._bool_ops:\n result = getattr(ser.dt, field)\n expected = [getattr(x, field) for x in idx]\n tm.assert_series_equal(result, Series(expected))\n\n\[email protected](\"klass\", [Timestamp, Timedelta, Period])\[email protected](\"value\", [None, np.nan, iNaT, float(\"nan\"), NaT, \"NaT\", \"nat\"])\ndef test_identity(klass, value):\n assert klass(value) is NaT\n\n\[email protected](\"klass\", [Timestamp, Timedelta, Period])\[email protected](\"value\", [\"\", \"nat\", \"NAT\", None, np.nan])\ndef test_equality(klass, value):\n if klass is Period and value == \"\":\n pytest.skip(\"Period cannot parse empty string\")\n\n assert klass(value).value == iNaT\n\n\[email protected](\"klass\", [Timestamp, Timedelta])\[email protected](\"method\", [\"round\", \"floor\", \"ceil\"])\[email protected](\"freq\", [\"s\", \"5s\", \"min\", \"5min\", \"h\", \"5h\"])\ndef test_round_nat(klass, method, freq):\n # see gh-14940\n ts = klass(\"nat\")\n\n round_method = getattr(ts, method)\n assert round_method(freq) is ts\n\n\[email protected](\n \"method\",\n [\n \"astimezone\",\n \"combine\",\n \"ctime\",\n \"dst\",\n \"fromordinal\",\n \"fromtimestamp\",\n pytest.param(\n \"fromisocalendar\",\n marks=pytest.mark.skipif(\n not compat.PY38,\n reason=\"'fromisocalendar' was added in stdlib datetime in python 3.8\",\n ),\n ),\n \"isocalendar\",\n \"strftime\",\n \"strptime\",\n \"time\",\n \"timestamp\",\n \"timetuple\",\n \"timetz\",\n \"toordinal\",\n \"tzname\",\n \"utcfromtimestamp\",\n \"utcnow\",\n \"utcoffset\",\n \"utctimetuple\",\n \"timestamp\",\n ],\n)\ndef test_nat_methods_raise(method):\n # see gh-9513, gh-17329\n msg = f\"NaTType does not support {method}\"\n\n with pytest.raises(ValueError, match=msg):\n getattr(NaT, method)()\n\n\[email protected](\"method\", [\"weekday\", \"isoweekday\"])\ndef test_nat_methods_nan(method):\n # see gh-9513, gh-17329\n assert np.isnan(getattr(NaT, method)())\n\n\[email protected](\n \"method\", [\"date\", \"now\", \"replace\", \"today\", \"tz_convert\", \"tz_localize\"]\n)\ndef test_nat_methods_nat(method):\n # see gh-8254, gh-9513, gh-17329\n assert getattr(NaT, method)() is NaT\n\n\[email protected](\n \"get_nat\", [lambda x: NaT, lambda x: Timedelta(x), lambda x: Timestamp(x)]\n)\ndef test_nat_iso_format(get_nat):\n # see gh-12300\n assert get_nat(\"NaT\").isoformat() == \"NaT\"\n\n\[email protected](\n \"klass,expected\",\n [\n (Timestamp, [\"freqstr\", \"normalize\", \"to_julian_date\", \"to_period\", \"tz\"]),\n (\n Timedelta,\n [\n \"components\",\n \"delta\",\n \"is_populated\",\n \"resolution_string\",\n \"to_pytimedelta\",\n \"to_timedelta64\",\n \"view\",\n ],\n ),\n ],\n)\ndef test_missing_public_nat_methods(klass, expected):\n # see gh-17327\n #\n # NaT should have *most* of the Timestamp and Timedelta methods.\n # Here, we check which public methods NaT does not have. We\n # ignore any missing private methods.\n nat_names = dir(NaT)\n klass_names = dir(klass)\n\n missing = [x for x in klass_names if x not in nat_names and not x.startswith(\"_\")]\n missing.sort()\n\n assert missing == expected\n\n\ndef _get_overlap_public_nat_methods(klass, as_tuple=False):\n \"\"\"\n Get overlapping public methods between NaT and another class.\n\n Parameters\n ----------\n klass : type\n The class to compare with NaT\n as_tuple : bool, default False\n Whether to return a list of tuples of the form (klass, method).\n\n Returns\n -------\n overlap : list\n \"\"\"\n nat_names = dir(NaT)\n klass_names = dir(klass)\n\n overlap = [\n x\n for x in nat_names\n if x in klass_names and not x.startswith(\"_\") and callable(getattr(klass, x))\n ]\n\n # Timestamp takes precedence over Timedelta in terms of overlap.\n if klass is Timedelta:\n ts_names = dir(Timestamp)\n overlap = [x for x in overlap if x not in ts_names]\n\n if as_tuple:\n overlap = [(klass, method) for method in overlap]\n\n overlap.sort()\n return overlap\n\n\[email protected](\n \"klass,expected\",\n [\n (\n Timestamp,\n [\n \"astimezone\",\n \"ceil\",\n \"combine\",\n \"ctime\",\n \"date\",\n \"day_name\",\n \"dst\",\n \"floor\",\n \"fromisocalendar\",\n \"fromisoformat\",\n \"fromordinal\",\n \"fromtimestamp\",\n \"isocalendar\",\n \"isoformat\",\n \"isoweekday\",\n \"month_name\",\n \"now\",\n \"replace\",\n \"round\",\n \"strftime\",\n \"strptime\",\n \"time\",\n \"timestamp\",\n \"timetuple\",\n \"timetz\",\n \"to_datetime64\",\n \"to_numpy\",\n \"to_pydatetime\",\n \"today\",\n \"toordinal\",\n \"tz_convert\",\n \"tz_localize\",\n \"tzname\",\n \"utcfromtimestamp\",\n \"utcnow\",\n \"utcoffset\",\n \"utctimetuple\",\n \"weekday\",\n ],\n ),\n (Timedelta, [\"total_seconds\"]),\n ],\n)\ndef test_overlap_public_nat_methods(klass, expected):\n # see gh-17327\n #\n # NaT should have *most* of the Timestamp and Timedelta methods.\n # In case when Timestamp, Timedelta, and NaT are overlap, the overlap\n # is considered to be with Timestamp and NaT, not Timedelta.\n\n # \"fromisocalendar\" was introduced in 3.8\n if klass is Timestamp and not compat.PY38:\n expected.remove(\"fromisocalendar\")\n\n assert _get_overlap_public_nat_methods(klass) == expected\n\n\[email protected](\n \"compare\",\n (\n _get_overlap_public_nat_methods(Timestamp, True)\n + _get_overlap_public_nat_methods(Timedelta, True)\n ),\n)\ndef test_nat_doc_strings(compare):\n # see gh-17327\n #\n # The docstrings for overlapping methods should match.\n klass, method = compare\n klass_doc = getattr(klass, method).__doc__\n\n nat_doc = getattr(NaT, method).__doc__\n assert klass_doc == nat_doc\n\n\n_ops = {\n \"left_plus_right\": lambda a, b: a + b,\n \"right_plus_left\": lambda a, b: b + a,\n \"left_minus_right\": lambda a, b: a - b,\n \"right_minus_left\": lambda a, b: b - a,\n \"left_times_right\": lambda a, b: a * b,\n \"right_times_left\": lambda a, b: b * a,\n \"left_div_right\": lambda a, b: a / b,\n \"right_div_left\": lambda a, b: b / a,\n}\n\n\[email protected](\"op_name\", list(_ops.keys()))\[email protected](\n \"value,val_type\",\n [\n (2, \"scalar\"),\n (1.5, \"floating\"),\n (np.nan, \"floating\"),\n (\"foo\", \"str\"),\n (timedelta(3600), \"timedelta\"),\n (Timedelta(\"5s\"), \"timedelta\"),\n (datetime(2014, 1, 1), \"timestamp\"),\n (Timestamp(\"2014-01-01\"), \"timestamp\"),\n (Timestamp(\"2014-01-01\", tz=\"UTC\"), \"timestamp\"),\n (Timestamp(\"2014-01-01\", tz=\"US/Eastern\"), \"timestamp\"),\n (pytz.timezone(\"Asia/Tokyo\").localize(datetime(2014, 1, 1)), \"timestamp\"),\n ],\n)\ndef test_nat_arithmetic_scalar(op_name, value, val_type):\n # see gh-6873\n invalid_ops = {\n \"scalar\": {\"right_div_left\"},\n \"floating\": {\n \"right_div_left\",\n \"left_minus_right\",\n \"right_minus_left\",\n \"left_plus_right\",\n \"right_plus_left\",\n },\n \"str\": set(_ops.keys()),\n \"timedelta\": {\"left_times_right\", \"right_times_left\"},\n \"timestamp\": {\n \"left_times_right\",\n \"right_times_left\",\n \"left_div_right\",\n \"right_div_left\",\n },\n }\n\n op = _ops[op_name]\n\n if op_name in invalid_ops.get(val_type, set()):\n if (\n val_type == \"timedelta\"\n and \"times\" in op_name\n and isinstance(value, Timedelta)\n ):\n typs = \"(Timedelta|NaTType)\"\n msg = rf\"unsupported operand type\\(s\\) for \\*: '{typs}' and '{typs}'\"\n elif val_type == \"str\":\n # un-specific check here because the message comes from str\n # and varies by method\n msg = \"|\".join(\n [\n \"can only concatenate str\",\n \"unsupported operand type\",\n \"can't multiply sequence\",\n \"Can't convert 'NaTType'\",\n \"must be str, not NaTType\",\n ]\n )\n else:\n msg = \"unsupported operand type\"\n\n with pytest.raises(TypeError, match=msg):\n op(NaT, value)\n else:\n if val_type == \"timedelta\" and \"div\" in op_name:\n expected = np.nan\n else:\n expected = NaT\n\n assert op(NaT, value) is expected\n\n\[email protected](\n \"val,expected\", [(np.nan, NaT), (NaT, np.nan), (np.timedelta64(\"NaT\"), np.nan)]\n)\ndef test_nat_rfloordiv_timedelta(val, expected):\n # see gh-#18846\n #\n # See also test_timedelta.TestTimedeltaArithmetic.test_floordiv\n td = Timedelta(hours=3, minutes=4)\n assert td // val is expected\n\n\[email protected](\n \"op_name\",\n [\"left_plus_right\", \"right_plus_left\", \"left_minus_right\", \"right_minus_left\"],\n)\[email protected](\n \"value\",\n [\n DatetimeIndex([\"2011-01-01\", \"2011-01-02\"], name=\"x\"),\n DatetimeIndex([\"2011-01-01\", \"2011-01-02\"], tz=\"US/Eastern\", name=\"x\"),\n DatetimeArray._from_sequence([\"2011-01-01\", \"2011-01-02\"]),\n DatetimeArray._from_sequence(\n [\"2011-01-01\", \"2011-01-02\"], dtype=DatetimeTZDtype(tz=\"US/Pacific\")\n ),\n TimedeltaIndex([\"1 day\", \"2 day\"], name=\"x\"),\n ],\n)\ndef test_nat_arithmetic_index(op_name, value):\n # see gh-11718\n exp_name = \"x\"\n exp_data = [NaT] * 2\n\n if is_datetime64_any_dtype(value.dtype) and \"plus\" in op_name:\n expected = DatetimeIndex(exp_data, tz=value.tz, name=exp_name)\n else:\n expected = TimedeltaIndex(exp_data, name=exp_name)\n\n if not isinstance(value, Index):\n expected = expected.array\n\n op = _ops[op_name]\n result = op(NaT, value)\n tm.assert_equal(result, expected)\n\n\[email protected](\n \"op_name\",\n [\"left_plus_right\", \"right_plus_left\", \"left_minus_right\", \"right_minus_left\"],\n)\[email protected](\"box\", [TimedeltaIndex, Series, TimedeltaArray._from_sequence])\ndef test_nat_arithmetic_td64_vector(op_name, box):\n # see gh-19124\n vec = box([\"1 day\", \"2 day\"], dtype=\"timedelta64[ns]\")\n box_nat = box([NaT, NaT], dtype=\"timedelta64[ns]\")\n tm.assert_equal(_ops[op_name](vec, NaT), box_nat)\n\n\[email protected](\n \"dtype,op,out_dtype\",\n [\n (\"datetime64[ns]\", operator.add, \"datetime64[ns]\"),\n (\"datetime64[ns]\", roperator.radd, \"datetime64[ns]\"),\n (\"datetime64[ns]\", operator.sub, \"timedelta64[ns]\"),\n (\"datetime64[ns]\", roperator.rsub, \"timedelta64[ns]\"),\n (\"timedelta64[ns]\", operator.add, \"datetime64[ns]\"),\n (\"timedelta64[ns]\", roperator.radd, \"datetime64[ns]\"),\n (\"timedelta64[ns]\", operator.sub, \"datetime64[ns]\"),\n (\"timedelta64[ns]\", roperator.rsub, \"timedelta64[ns]\"),\n ],\n)\ndef test_nat_arithmetic_ndarray(dtype, op, out_dtype):\n other = np.arange(10).astype(dtype)\n result = op(NaT, other)\n\n expected = np.empty(other.shape, dtype=out_dtype)\n expected.fill(\"NaT\")\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_nat_pinned_docstrings():\n # see gh-17327\n assert NaT.ctime.__doc__ == datetime.ctime.__doc__\n\n\ndef test_to_numpy_alias():\n # GH 24653: alias .to_numpy() for scalars\n expected = NaT.to_datetime64()\n result = NaT.to_numpy()\n\n assert isna(expected) and isna(result)\n\n\[email protected](\n \"other\",\n [\n Timedelta(0),\n Timedelta(0).to_pytimedelta(),\n pytest.param(\n Timedelta(0).to_timedelta64(),\n marks=pytest.mark.xfail(\n reason=\"td64 doesn't return NotImplemented, see numpy#17017\"\n ),\n ),\n Timestamp(0),\n Timestamp(0).to_pydatetime(),\n pytest.param(\n Timestamp(0).to_datetime64(),\n marks=pytest.mark.xfail(\n reason=\"dt64 doesn't return NotImplemented, see numpy#17017\"\n ),\n ),\n Timestamp(0).tz_localize(\"UTC\"),\n NaT,\n ],\n)\ndef test_nat_comparisons(compare_operators_no_eq_ne, other):\n # GH 26039\n opname = compare_operators_no_eq_ne\n\n assert getattr(NaT, opname)(other) is False\n\n op = getattr(operator, opname.strip(\"_\"))\n assert op(NaT, other) is False\n assert op(other, NaT) is False\n\n\[email protected](\"other\", [np.timedelta64(0, \"ns\"), np.datetime64(\"now\", \"ns\")])\ndef test_nat_comparisons_numpy(other):\n # Once numpy#17017 is fixed and the xfailed cases in test_nat_comparisons\n # pass, this test can be removed\n assert not NaT == other\n assert NaT != other\n assert not NaT < other\n assert not NaT > other\n assert not NaT <= other\n assert not NaT >= other\n\n\[email protected](\"other_and_type\", [(\"foo\", \"str\"), (2, \"int\"), (2.0, \"float\")])\[email protected](\n \"symbol_and_op\",\n [(\"<=\", operator.le), (\"<\", operator.lt), (\">=\", operator.ge), (\">\", operator.gt)],\n)\ndef test_nat_comparisons_invalid(other_and_type, symbol_and_op):\n # GH#35585\n other, other_type = other_and_type\n symbol, op = symbol_and_op\n\n assert not NaT == other\n assert not other == NaT\n\n assert NaT != other\n assert other != NaT\n\n msg = f\"'{symbol}' not supported between instances of 'NaTType' and '{other_type}'\"\n with pytest.raises(TypeError, match=msg):\n op(NaT, other)\n\n msg = f\"'{symbol}' not supported between instances of '{other_type}' and 'NaTType'\"\n with pytest.raises(TypeError, match=msg):\n op(other, NaT)\n\n\[email protected](\n \"other\",\n [\n np.array([\"foo\"] * 2, dtype=object),\n np.array([2, 3], dtype=\"int64\"),\n np.array([2.0, 3.5], dtype=\"float64\"),\n ],\n ids=[\"str\", \"int\", \"float\"],\n)\ndef test_nat_comparisons_invalid_ndarray(other):\n # GH#40722\n expected = np.array([False, False])\n result = NaT == other\n tm.assert_numpy_array_equal(result, expected)\n result = other == NaT\n tm.assert_numpy_array_equal(result, expected)\n\n expected = np.array([True, True])\n result = NaT != other\n tm.assert_numpy_array_equal(result, expected)\n result = other != NaT\n tm.assert_numpy_array_equal(result, expected)\n\n for symbol, op in [\n (\"<=\", operator.le),\n (\"<\", operator.lt),\n (\">=\", operator.ge),\n (\">\", operator.gt),\n ]:\n msg = f\"'{symbol}' not supported between\"\n\n with pytest.raises(TypeError, match=msg):\n op(NaT, other)\n\n if other.dtype == np.dtype(\"object\"):\n # uses the reverse operator, so symbol changes\n msg = None\n with pytest.raises(TypeError, match=msg):\n op(other, NaT)\n\n\ndef test_compare_date():\n # GH#39151 comparing NaT with date object is deprecated\n # See also: tests.scalar.timestamps.test_comparisons::test_compare_date\n\n dt = Timestamp.now().to_pydatetime().date()\n\n for left, right in [(NaT, dt), (dt, NaT)]:\n assert not left == right\n assert left != right\n\n with tm.assert_produces_warning(FutureWarning):\n assert not left < right\n with tm.assert_produces_warning(FutureWarning):\n assert not left <= right\n with tm.assert_produces_warning(FutureWarning):\n assert not left > right\n with tm.assert_produces_warning(FutureWarning):\n assert not left >= right\n\n # Once the deprecation is enforced, the following assertions\n # can be enabled:\n # assert not left == right\n # assert left != right\n #\n # with pytest.raises(TypeError):\n # left < right\n # with pytest.raises(TypeError):\n # left <= right\n # with pytest.raises(TypeError):\n # left > right\n # with pytest.raises(TypeError):\n # left >= right\n\n\[email protected](\n \"obj\",\n [\n offsets.YearEnd(2),\n offsets.YearBegin(2),\n offsets.MonthBegin(1),\n offsets.MonthEnd(2),\n offsets.MonthEnd(12),\n offsets.Day(2),\n offsets.Day(5),\n offsets.Hour(24),\n offsets.Hour(3),\n offsets.Minute(),\n np.timedelta64(3, \"h\"),\n np.timedelta64(4, \"h\"),\n np.timedelta64(3200, \"s\"),\n np.timedelta64(3600, \"s\"),\n np.timedelta64(3600 * 24, \"s\"),\n np.timedelta64(2, \"D\"),\n np.timedelta64(365, \"D\"),\n timedelta(-2),\n timedelta(365),\n timedelta(minutes=120),\n timedelta(days=4, minutes=180),\n timedelta(hours=23),\n timedelta(hours=23, minutes=30),\n timedelta(hours=48),\n ],\n)\ndef test_nat_addsub_tdlike_scalar(obj):\n assert NaT + obj is NaT\n assert obj + NaT is NaT\n assert NaT - obj is NaT\n\n\ndef test_pickle():\n # GH#4606\n p = tm.round_trip_pickle(NaT)\n assert p is NaT\n",
"from __future__ import annotations\n\nfrom datetime import (\n datetime,\n timedelta,\n)\nimport operator\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Sequence,\n TypeVar,\n Union,\n cast,\n overload,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import (\n algos,\n lib,\n)\nfrom pandas._libs.tslibs import (\n BaseOffset,\n IncompatibleFrequency,\n NaT,\n NaTType,\n Period,\n Resolution,\n Tick,\n Timestamp,\n delta_to_nanoseconds,\n iNaT,\n to_offset,\n)\nfrom pandas._libs.tslibs.fields import (\n RoundTo,\n round_nsint64,\n)\nfrom pandas._libs.tslibs.timestamps import integer_op_not_supported\nfrom pandas._typing import (\n ArrayLike,\n DatetimeLikeScalar,\n Dtype,\n DtypeObj,\n NpDtype,\n PositionalIndexer2D,\n)\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import (\n AbstractMethodError,\n NullFrequencyError,\n PerformanceWarning,\n)\nfrom pandas.util._decorators import (\n Appender,\n Substitution,\n cache_readonly,\n)\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import (\n is_categorical_dtype,\n is_datetime64_any_dtype,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_datetime_or_timedelta_dtype,\n is_dtype_equal,\n is_extension_array_dtype,\n is_float_dtype,\n is_integer_dtype,\n is_list_like,\n is_object_dtype,\n is_period_dtype,\n is_string_dtype,\n is_timedelta64_dtype,\n is_unsigned_integer_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import (\n DatetimeTZDtype,\n PeriodDtype,\n)\nfrom pandas.core.dtypes.missing import (\n is_valid_na_for_dtype,\n isna,\n)\n\nfrom pandas.core import (\n nanops,\n ops,\n)\nfrom pandas.core.algorithms import (\n checked_add_with_arr,\n isin,\n unique1d,\n)\nfrom pandas.core.arraylike import OpsMixin\nfrom pandas.core.arrays._mixins import (\n NDArrayBackedExtensionArray,\n ravel_compat,\n)\nimport pandas.core.common as com\nfrom pandas.core.construction import (\n array as pd_array,\n extract_array,\n)\nfrom pandas.core.indexers import (\n check_array_indexer,\n check_setitem_lengths,\n)\nfrom pandas.core.ops.common import unpack_zerodim_and_defer\nfrom pandas.core.ops.invalid import (\n invalid_comparison,\n make_invalid_op,\n)\n\nfrom pandas.tseries import frequencies\n\nif TYPE_CHECKING:\n from typing import Literal\n\n from pandas.core.arrays import (\n DatetimeArray,\n TimedeltaArray,\n )\n\nDTScalarOrNaT = Union[DatetimeLikeScalar, NaTType]\nDatetimeLikeArrayT = TypeVar(\"DatetimeLikeArrayT\", bound=\"DatetimeLikeArrayMixin\")\n\n\nclass InvalidComparison(Exception):\n \"\"\"\n Raised by _validate_comparison_value to indicate to caller it should\n return invalid_comparison.\n \"\"\"\n\n pass\n\n\nclass DatetimeLikeArrayMixin(OpsMixin, NDArrayBackedExtensionArray):\n \"\"\"\n Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray\n\n Assumes that __new__/__init__ defines:\n _data\n _freq\n\n and that the inheriting class has methods:\n _generate_range\n \"\"\"\n\n # _infer_matches -> which infer_dtype strings are close enough to our own\n _infer_matches: tuple[str, ...]\n _is_recognized_dtype: Callable[[DtypeObj], bool]\n _recognized_scalars: tuple[type, ...]\n _ndarray: np.ndarray\n\n @cache_readonly\n def _can_hold_na(self) -> bool:\n return True\n\n def __init__(self, data, dtype: Dtype | None = None, freq=None, copy=False):\n raise AbstractMethodError(self)\n\n @property\n def _scalar_type(self) -> type[DatetimeLikeScalar]:\n \"\"\"\n The scalar associated with this datelike\n\n * PeriodArray : Period\n * DatetimeArray : Timestamp\n * TimedeltaArray : Timedelta\n \"\"\"\n raise AbstractMethodError(self)\n\n def _scalar_from_string(self, value: str) -> DTScalarOrNaT:\n \"\"\"\n Construct a scalar type from a string.\n\n Parameters\n ----------\n value : str\n\n Returns\n -------\n Period, Timestamp, or Timedelta, or NaT\n Whatever the type of ``self._scalar_type`` is.\n\n Notes\n -----\n This should call ``self._check_compatible_with`` before\n unboxing the result.\n \"\"\"\n raise AbstractMethodError(self)\n\n def _unbox_scalar(\n self, value: DTScalarOrNaT, setitem: bool = False\n ) -> np.int64 | np.datetime64 | np.timedelta64:\n \"\"\"\n Unbox the integer value of a scalar `value`.\n\n Parameters\n ----------\n value : Period, Timestamp, Timedelta, or NaT\n Depending on subclass.\n setitem : bool, default False\n Whether to check compatibility with setitem strictness.\n\n Returns\n -------\n int\n\n Examples\n --------\n >>> self._unbox_scalar(Timedelta(\"10s\")) # doctest: +SKIP\n 10000000000\n \"\"\"\n raise AbstractMethodError(self)\n\n def _check_compatible_with(\n self, other: DTScalarOrNaT, setitem: bool = False\n ) -> None:\n \"\"\"\n Verify that `self` and `other` are compatible.\n\n * DatetimeArray verifies that the timezones (if any) match\n * PeriodArray verifies that the freq matches\n * Timedelta has no verification\n\n In each case, NaT is considered compatible.\n\n Parameters\n ----------\n other\n setitem : bool, default False\n For __setitem__ we may have stricter compatibility restrictions than\n for comparisons.\n\n Raises\n ------\n Exception\n \"\"\"\n raise AbstractMethodError(self)\n\n # ------------------------------------------------------------------\n # NDArrayBackedExtensionArray compat\n\n @cache_readonly\n def _data(self) -> np.ndarray:\n return self._ndarray\n\n # ------------------------------------------------------------------\n\n def _box_func(self, x):\n \"\"\"\n box function to get object from internal representation\n \"\"\"\n raise AbstractMethodError(self)\n\n def _box_values(self, values) -> np.ndarray:\n \"\"\"\n apply box func to passed values\n \"\"\"\n return lib.map_infer(values, self._box_func, convert=False)\n\n def __iter__(self):\n if self.ndim > 1:\n return (self[n] for n in range(len(self)))\n else:\n return (self._box_func(v) for v in self.asi8)\n\n @property\n def asi8(self) -> np.ndarray:\n \"\"\"\n Integer representation of the values.\n\n Returns\n -------\n ndarray\n An ndarray with int64 dtype.\n \"\"\"\n # do not cache or you'll create a memory leak\n return self._ndarray.view(\"i8\")\n\n # ----------------------------------------------------------------\n # Rendering Methods\n\n def _format_native_types(self, na_rep=\"NaT\", date_format=None):\n \"\"\"\n Helper method for astype when converting to strings.\n\n Returns\n -------\n ndarray[str]\n \"\"\"\n raise AbstractMethodError(self)\n\n def _formatter(self, boxed: bool = False):\n # TODO: Remove Datetime & DatetimeTZ formatters.\n return \"'{}'\".format\n\n # ----------------------------------------------------------------\n # Array-Like / EA-Interface Methods\n\n def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:\n # used for Timedelta/DatetimeArray, overwritten by PeriodArray\n if is_object_dtype(dtype):\n return np.array(list(self), dtype=object)\n return self._ndarray\n\n def __getitem__(\n self, key: PositionalIndexer2D\n ) -> DatetimeLikeArrayMixin | DTScalarOrNaT:\n \"\"\"\n This getitem defers to the underlying array, which by-definition can\n only handle list-likes, slices, and integer scalars\n \"\"\"\n result = super().__getitem__(key)\n if lib.is_scalar(result):\n return result\n\n result._freq = self._get_getitem_freq(key)\n return result\n\n def _get_getitem_freq(self, key) -> BaseOffset | None:\n \"\"\"\n Find the `freq` attribute to assign to the result of a __getitem__ lookup.\n \"\"\"\n is_period = is_period_dtype(self.dtype)\n if is_period:\n freq = self.freq\n elif self.ndim != 1:\n freq = None\n else:\n key = check_array_indexer(self, key) # maybe ndarray[bool] -> slice\n freq = None\n if isinstance(key, slice):\n if self.freq is not None and key.step is not None:\n freq = key.step * self.freq\n else:\n freq = self.freq\n elif key is Ellipsis:\n # GH#21282 indexing with Ellipsis is similar to a full slice,\n # should preserve `freq` attribute\n freq = self.freq\n elif com.is_bool_indexer(key):\n new_key = lib.maybe_booleans_to_slice(key.view(np.uint8))\n if isinstance(new_key, slice):\n return self._get_getitem_freq(new_key)\n return freq\n\n # error: Argument 1 of \"__setitem__\" is incompatible with supertype\n # \"ExtensionArray\"; supertype defines the argument type as \"Union[int,\n # ndarray]\"\n def __setitem__( # type: ignore[override]\n self,\n key: int | Sequence[int] | Sequence[bool] | slice,\n value: NaTType | Any | Sequence[Any],\n ) -> None:\n # I'm fudging the types a bit here. \"Any\" above really depends\n # on type(self). For PeriodArray, it's Period (or stuff coercible\n # to a period in from_sequence). For DatetimeArray, it's Timestamp...\n # I don't know if mypy can do that, possibly with Generics.\n # https://mypy.readthedocs.io/en/latest/generics.html\n no_op = check_setitem_lengths(key, value, self)\n if no_op:\n return\n\n super().__setitem__(key, value)\n self._maybe_clear_freq()\n\n def _maybe_clear_freq(self):\n # inplace operations like __setitem__ may invalidate the freq of\n # DatetimeArray and TimedeltaArray\n pass\n\n def astype(self, dtype, copy: bool = True):\n # Some notes on cases we don't have to handle here in the base class:\n # 1. PeriodArray.astype handles period -> period\n # 2. DatetimeArray.astype handles conversion between tz.\n # 3. DatetimeArray.astype handles datetime -> period\n dtype = pandas_dtype(dtype)\n\n if is_object_dtype(dtype):\n return self._box_values(self.asi8.ravel()).reshape(self.shape)\n elif is_string_dtype(dtype) and not is_categorical_dtype(dtype):\n if is_extension_array_dtype(dtype):\n arr_cls = dtype.construct_array_type()\n return arr_cls._from_sequence(self, dtype=dtype, copy=copy)\n else:\n return self._format_native_types()\n elif is_integer_dtype(dtype):\n # we deliberately ignore int32 vs. int64 here.\n # See https://github.com/pandas-dev/pandas/issues/24381 for more.\n level = find_stack_level()\n warnings.warn(\n f\"casting {self.dtype} values to int64 with .astype(...) is \"\n \"deprecated and will raise in a future version. \"\n \"Use .view(...) instead.\",\n FutureWarning,\n stacklevel=level,\n )\n\n values = self.asi8\n\n if is_unsigned_integer_dtype(dtype):\n # Again, we ignore int32 vs. int64\n values = values.view(\"uint64\")\n\n if copy:\n values = values.copy()\n return values\n elif (\n is_datetime_or_timedelta_dtype(dtype)\n and not is_dtype_equal(self.dtype, dtype)\n ) or is_float_dtype(dtype):\n # disallow conversion between datetime/timedelta,\n # and conversions for any datetimelike to float\n msg = f\"Cannot cast {type(self).__name__} to dtype {dtype}\"\n raise TypeError(msg)\n elif is_categorical_dtype(dtype):\n arr_cls = dtype.construct_array_type()\n return arr_cls(self, dtype=dtype)\n else:\n return np.asarray(self, dtype=dtype)\n\n @overload\n def view(self: DatetimeLikeArrayT) -> DatetimeLikeArrayT:\n ...\n\n @overload\n def view(self, dtype: Literal[\"M8[ns]\"]) -> DatetimeArray:\n ...\n\n @overload\n def view(self, dtype: Literal[\"m8[ns]\"]) -> TimedeltaArray:\n ...\n\n @overload\n def view(self, dtype: Dtype | None = ...) -> ArrayLike:\n ...\n\n def view(self, dtype: Dtype | None = None) -> ArrayLike:\n # We handle datetime64, datetime64tz, timedelta64, and period\n # dtypes here. Everything else we pass through to the underlying\n # ndarray.\n if dtype is None or dtype is self.dtype:\n return type(self)(self._ndarray, dtype=self.dtype)\n\n if isinstance(dtype, type):\n # we sometimes pass non-dtype objects, e.g np.ndarray;\n # pass those through to the underlying ndarray\n return self._ndarray.view(dtype)\n\n dtype = pandas_dtype(dtype)\n if isinstance(dtype, (PeriodDtype, DatetimeTZDtype)):\n cls = dtype.construct_array_type()\n return cls(self.asi8, dtype=dtype)\n elif dtype == \"M8[ns]\":\n from pandas.core.arrays import DatetimeArray\n\n return DatetimeArray(self.asi8, dtype=dtype)\n elif dtype == \"m8[ns]\":\n from pandas.core.arrays import TimedeltaArray\n\n return TimedeltaArray(self.asi8, dtype=dtype)\n # error: Incompatible return value type (got \"ndarray\", expected\n # \"ExtensionArray\")\n # error: Argument \"dtype\" to \"view\" of \"_ArrayOrScalarCommon\" has incompatible\n # type \"Union[ExtensionDtype, dtype[Any]]\"; expected \"Union[dtype[Any], None,\n # type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int,\n # Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]\"\n return self._ndarray.view(dtype=dtype) # type: ignore[return-value,arg-type]\n\n # ------------------------------------------------------------------\n # ExtensionArray Interface\n\n @classmethod\n def _concat_same_type(\n cls: type[DatetimeLikeArrayT],\n to_concat: Sequence[DatetimeLikeArrayT],\n axis: int = 0,\n ) -> DatetimeLikeArrayT:\n new_obj = super()._concat_same_type(to_concat, axis)\n\n obj = to_concat[0]\n dtype = obj.dtype\n\n new_freq = None\n if is_period_dtype(dtype):\n new_freq = obj.freq\n elif axis == 0:\n # GH 3232: If the concat result is evenly spaced, we can retain the\n # original frequency\n to_concat = [x for x in to_concat if len(x)]\n\n if obj.freq is not None and all(x.freq == obj.freq for x in to_concat):\n pairs = zip(to_concat[:-1], to_concat[1:])\n if all(pair[0][-1] + obj.freq == pair[1][0] for pair in pairs):\n new_freq = obj.freq\n\n new_obj._freq = new_freq\n return new_obj\n\n def copy(self: DatetimeLikeArrayT) -> DatetimeLikeArrayT:\n new_obj = super().copy()\n new_obj._freq = self.freq\n return new_obj\n\n def _values_for_factorize(self):\n # int64 instead of int ensures we have a \"view\" method\n return self._ndarray, np.int64(iNaT)\n\n @classmethod\n def _from_factorized(\n cls: type[DatetimeLikeArrayT], values, original: DatetimeLikeArrayT\n ) -> DatetimeLikeArrayT:\n return cls(values, dtype=original.dtype)\n\n # ------------------------------------------------------------------\n # Validation Methods\n # TODO: try to de-duplicate these, ensure identical behavior\n\n def _validate_comparison_value(self, other):\n if isinstance(other, str):\n try:\n # GH#18435 strings get a pass from tzawareness compat\n other = self._scalar_from_string(other)\n except (ValueError, IncompatibleFrequency):\n # failed to parse as Timestamp/Timedelta/Period\n raise InvalidComparison(other)\n\n if isinstance(other, self._recognized_scalars) or other is NaT:\n other = self._scalar_type(other)\n try:\n self._check_compatible_with(other)\n except (TypeError, IncompatibleFrequency) as err:\n # e.g. tzawareness mismatch\n raise InvalidComparison(other) from err\n\n elif not is_list_like(other):\n raise InvalidComparison(other)\n\n elif len(other) != len(self):\n raise ValueError(\"Lengths must match\")\n\n else:\n try:\n other = self._validate_listlike(other, allow_object=True)\n self._check_compatible_with(other)\n except (TypeError, IncompatibleFrequency) as err:\n if is_object_dtype(getattr(other, \"dtype\", None)):\n # We will have to operate element-wise\n pass\n else:\n raise InvalidComparison(other) from err\n\n return other\n\n def _validate_shift_value(self, fill_value):\n # TODO(2.0): once this deprecation is enforced, use _validate_scalar\n if is_valid_na_for_dtype(fill_value, self.dtype):\n fill_value = NaT\n elif isinstance(fill_value, self._recognized_scalars):\n fill_value = self._scalar_type(fill_value)\n else:\n new_fill: DatetimeLikeScalar\n\n # only warn if we're not going to raise\n if self._scalar_type is Period and lib.is_integer(fill_value):\n # kludge for #31971 since Period(integer) tries to cast to str\n new_fill = Period._from_ordinal(fill_value, freq=self.freq)\n else:\n new_fill = self._scalar_type(fill_value)\n\n # stacklevel here is chosen to be correct when called from\n # DataFrame.shift or Series.shift\n warnings.warn(\n f\"Passing {type(fill_value)} to shift is deprecated and \"\n \"will raise in a future version, pass \"\n f\"{self._scalar_type.__name__} instead.\",\n FutureWarning,\n # There is no way to hard-code the level since this might be\n # reached directly or called from the Index or Block method\n stacklevel=find_stack_level(),\n )\n fill_value = new_fill\n\n return self._unbox(fill_value, setitem=True)\n\n def _validate_scalar(\n self,\n value,\n *,\n allow_listlike: bool = False,\n setitem: bool = True,\n unbox: bool = True,\n ):\n \"\"\"\n Validate that the input value can be cast to our scalar_type.\n\n Parameters\n ----------\n value : object\n allow_listlike: bool, default False\n When raising an exception, whether the message should say\n listlike inputs are allowed.\n setitem : bool, default True\n Whether to check compatibility with setitem strictness.\n unbox : bool, default True\n Whether to unbox the result before returning. Note: unbox=False\n skips the setitem compatibility check.\n\n Returns\n -------\n self._scalar_type or NaT\n \"\"\"\n if isinstance(value, str):\n # NB: Careful about tzawareness\n try:\n value = self._scalar_from_string(value)\n except ValueError as err:\n msg = self._validation_error_message(value, allow_listlike)\n raise TypeError(msg) from err\n\n elif is_valid_na_for_dtype(value, self.dtype):\n # GH#18295\n value = NaT\n\n elif isna(value):\n # if we are dt64tz and value is dt64(\"NaT\"), dont cast to NaT,\n # or else we'll fail to raise in _unbox_scalar\n msg = self._validation_error_message(value, allow_listlike)\n raise TypeError(msg)\n\n elif isinstance(value, self._recognized_scalars):\n value = self._scalar_type(value)\n\n else:\n msg = self._validation_error_message(value, allow_listlike)\n raise TypeError(msg)\n\n if not unbox:\n # NB: In general NDArrayBackedExtensionArray will unbox here;\n # this option exists to prevent a performance hit in\n # TimedeltaIndex.get_loc\n return value\n return self._unbox_scalar(value, setitem=setitem)\n\n def _validation_error_message(self, value, allow_listlike: bool = False) -> str:\n \"\"\"\n Construct an exception message on validation error.\n\n Some methods allow only scalar inputs, while others allow either scalar\n or listlike.\n\n Parameters\n ----------\n allow_listlike: bool, default False\n\n Returns\n -------\n str\n \"\"\"\n if allow_listlike:\n msg = (\n f\"value should be a '{self._scalar_type.__name__}', 'NaT', \"\n f\"or array of those. Got '{type(value).__name__}' instead.\"\n )\n else:\n msg = (\n f\"value should be a '{self._scalar_type.__name__}' or 'NaT'. \"\n f\"Got '{type(value).__name__}' instead.\"\n )\n return msg\n\n def _validate_listlike(self, value, allow_object: bool = False):\n if isinstance(value, type(self)):\n return value\n\n if isinstance(value, list) and len(value) == 0:\n # We treat empty list as our own dtype.\n return type(self)._from_sequence([], dtype=self.dtype)\n\n if hasattr(value, \"dtype\") and value.dtype == object:\n # `array` below won't do inference if value is an Index or Series.\n # so do so here. in the Index case, inferred_type may be cached.\n if lib.infer_dtype(value) in self._infer_matches:\n try:\n value = type(self)._from_sequence(value)\n except (ValueError, TypeError):\n if allow_object:\n return value\n msg = self._validation_error_message(value, True)\n raise TypeError(msg)\n\n # Do type inference if necessary up front\n # e.g. we passed PeriodIndex.values and got an ndarray of Periods\n value = pd_array(value)\n value = extract_array(value, extract_numpy=True)\n\n if is_dtype_equal(value.dtype, \"string\"):\n # We got a StringArray\n try:\n # TODO: Could use from_sequence_of_strings if implemented\n # Note: passing dtype is necessary for PeriodArray tests\n value = type(self)._from_sequence(value, dtype=self.dtype)\n except ValueError:\n pass\n\n if is_categorical_dtype(value.dtype):\n # e.g. we have a Categorical holding self.dtype\n if is_dtype_equal(value.categories.dtype, self.dtype):\n # TODO: do we need equal dtype or just comparable?\n value = value._internal_get_values()\n value = extract_array(value, extract_numpy=True)\n\n if allow_object and is_object_dtype(value.dtype):\n pass\n\n elif not type(self)._is_recognized_dtype(value.dtype):\n msg = self._validation_error_message(value, True)\n raise TypeError(msg)\n\n return value\n\n def _validate_searchsorted_value(self, value):\n if not is_list_like(value):\n return self._validate_scalar(value, allow_listlike=True, setitem=False)\n else:\n value = self._validate_listlike(value)\n\n return self._unbox(value)\n\n def _validate_setitem_value(self, value):\n if is_list_like(value):\n value = self._validate_listlike(value)\n else:\n return self._validate_scalar(value, allow_listlike=True)\n\n return self._unbox(value, setitem=True)\n\n def _unbox(\n self, other, setitem: bool = False\n ) -> np.int64 | np.datetime64 | np.timedelta64 | np.ndarray:\n \"\"\"\n Unbox either a scalar with _unbox_scalar or an instance of our own type.\n \"\"\"\n if lib.is_scalar(other):\n other = self._unbox_scalar(other, setitem=setitem)\n else:\n # same type as self\n self._check_compatible_with(other, setitem=setitem)\n other = other._ndarray\n return other\n\n # ------------------------------------------------------------------\n # Additional array methods\n # These are not part of the EA API, but we implement them because\n # pandas assumes they're there.\n\n @ravel_compat\n def map(self, mapper):\n # TODO(GH-23179): Add ExtensionArray.map\n # Need to figure out if we want ExtensionArray.map first.\n # If so, then we can refactor IndexOpsMixin._map_values to\n # a standalone function and call from here..\n # Else, just rewrite _map_infer_values to do the right thing.\n from pandas import Index\n\n return Index(self).map(mapper).array\n\n def isin(self, values) -> np.ndarray:\n \"\"\"\n Compute boolean array of whether each value is found in the\n passed set of values.\n\n Parameters\n ----------\n values : set or sequence of values\n\n Returns\n -------\n ndarray[bool]\n \"\"\"\n if not hasattr(values, \"dtype\"):\n values = np.asarray(values)\n\n if values.dtype.kind in [\"f\", \"i\", \"u\", \"c\"]:\n # TODO: de-duplicate with equals, validate_comparison_value\n return np.zeros(self.shape, dtype=bool)\n\n if not isinstance(values, type(self)):\n inferable = [\n \"timedelta\",\n \"timedelta64\",\n \"datetime\",\n \"datetime64\",\n \"date\",\n \"period\",\n ]\n if values.dtype == object:\n inferred = lib.infer_dtype(values, skipna=False)\n if inferred not in inferable:\n if inferred == \"string\":\n pass\n\n elif \"mixed\" in inferred:\n return isin(self.astype(object), values)\n else:\n return np.zeros(self.shape, dtype=bool)\n\n try:\n values = type(self)._from_sequence(values)\n except ValueError:\n return isin(self.astype(object), values)\n\n try:\n self._check_compatible_with(values)\n except (TypeError, ValueError):\n # Includes tzawareness mismatch and IncompatibleFrequencyError\n return np.zeros(self.shape, dtype=bool)\n\n return isin(self.asi8, values.asi8)\n\n # ------------------------------------------------------------------\n # Null Handling\n\n def isna(self) -> np.ndarray:\n return self._isnan\n\n @property # NB: override with cache_readonly in immutable subclasses\n def _isnan(self) -> np.ndarray:\n \"\"\"\n return if each value is nan\n \"\"\"\n return self.asi8 == iNaT\n\n @property # NB: override with cache_readonly in immutable subclasses\n def _hasnans(self) -> bool:\n \"\"\"\n return if I have any nans; enables various perf speedups\n \"\"\"\n return bool(self._isnan.any())\n\n def _maybe_mask_results(\n self, result: np.ndarray, fill_value=iNaT, convert=None\n ) -> np.ndarray:\n \"\"\"\n Parameters\n ----------\n result : np.ndarray\n fill_value : object, default iNaT\n convert : str, dtype or None\n\n Returns\n -------\n result : ndarray with values replace by the fill_value\n\n mask the result if needed, convert to the provided dtype if its not\n None\n\n This is an internal routine.\n \"\"\"\n if self._hasnans:\n if convert:\n result = result.astype(convert)\n if fill_value is None:\n fill_value = np.nan\n np.putmask(result, self._isnan, fill_value)\n return result\n\n # ------------------------------------------------------------------\n # Frequency Properties/Methods\n\n @property\n def freq(self):\n \"\"\"\n Return the frequency object if it is set, otherwise None.\n \"\"\"\n return self._freq\n\n @freq.setter\n def freq(self, value):\n if value is not None:\n value = to_offset(value)\n self._validate_frequency(self, value)\n\n if self.ndim > 1:\n raise ValueError(\"Cannot set freq with ndim > 1\")\n\n self._freq = value\n\n @property\n def freqstr(self) -> str | None:\n \"\"\"\n Return the frequency object as a string if its set, otherwise None.\n \"\"\"\n if self.freq is None:\n return None\n return self.freq.freqstr\n\n @property # NB: override with cache_readonly in immutable subclasses\n def inferred_freq(self) -> str | None:\n \"\"\"\n Tries to return a string representing a frequency guess,\n generated by infer_freq. Returns None if it can't autodetect the\n frequency.\n \"\"\"\n if self.ndim != 1:\n return None\n try:\n return frequencies.infer_freq(self)\n except ValueError:\n return None\n\n @property # NB: override with cache_readonly in immutable subclasses\n def _resolution_obj(self) -> Resolution | None:\n freqstr = self.freqstr\n if freqstr is None:\n return None\n try:\n return Resolution.get_reso_from_freq(freqstr)\n except KeyError:\n return None\n\n @property # NB: override with cache_readonly in immutable subclasses\n def resolution(self) -> str:\n \"\"\"\n Returns day, hour, minute, second, millisecond or microsecond\n \"\"\"\n # error: Item \"None\" of \"Optional[Any]\" has no attribute \"attrname\"\n return self._resolution_obj.attrname # type: ignore[union-attr]\n\n @classmethod\n def _validate_frequency(cls, index, freq, **kwargs):\n \"\"\"\n Validate that a frequency is compatible with the values of a given\n Datetime Array/Index or Timedelta Array/Index\n\n Parameters\n ----------\n index : DatetimeIndex or TimedeltaIndex\n The index on which to determine if the given frequency is valid\n freq : DateOffset\n The frequency to validate\n \"\"\"\n # TODO: this is not applicable to PeriodArray, move to correct Mixin\n inferred = index.inferred_freq\n if index.size == 0 or inferred == freq.freqstr:\n return None\n\n try:\n on_freq = cls._generate_range(\n start=index[0], end=None, periods=len(index), freq=freq, **kwargs\n )\n if not np.array_equal(index.asi8, on_freq.asi8):\n raise ValueError\n except ValueError as e:\n if \"non-fixed\" in str(e):\n # non-fixed frequencies are not meaningful for timedelta64;\n # we retain that error message\n raise e\n # GH#11587 the main way this is reached is if the `np.array_equal`\n # check above is False. This can also be reached if index[0]\n # is `NaT`, in which case the call to `cls._generate_range` will\n # raise a ValueError, which we re-raise with a more targeted\n # message.\n raise ValueError(\n f\"Inferred frequency {inferred} from passed values \"\n f\"does not conform to passed frequency {freq.freqstr}\"\n ) from e\n\n @classmethod\n def _generate_range(\n cls: type[DatetimeLikeArrayT], start, end, periods, freq, *args, **kwargs\n ) -> DatetimeLikeArrayT:\n raise AbstractMethodError(cls)\n\n # monotonicity/uniqueness properties are called via frequencies.infer_freq,\n # see GH#23789\n\n @property\n def _is_monotonic_increasing(self) -> bool:\n return algos.is_monotonic(self.asi8, timelike=True)[0]\n\n @property\n def _is_monotonic_decreasing(self) -> bool:\n return algos.is_monotonic(self.asi8, timelike=True)[1]\n\n @property\n def _is_unique(self) -> bool:\n return len(unique1d(self.asi8.ravel(\"K\"))) == self.size\n\n # ------------------------------------------------------------------\n # Arithmetic Methods\n\n def _cmp_method(self, other, op):\n if self.ndim > 1 and getattr(other, \"shape\", None) == self.shape:\n # TODO: handle 2D-like listlikes\n return op(self.ravel(), other.ravel()).reshape(self.shape)\n\n try:\n other = self._validate_comparison_value(other)\n except InvalidComparison:\n return invalid_comparison(self, other, op)\n\n dtype = getattr(other, \"dtype\", None)\n if is_object_dtype(dtype):\n # We have to use comp_method_OBJECT_ARRAY instead of numpy\n # comparison otherwise it would fail to raise when\n # comparing tz-aware and tz-naive\n with np.errstate(all=\"ignore\"):\n result = ops.comp_method_OBJECT_ARRAY(\n op, np.asarray(self.astype(object)), other\n )\n return result\n\n other_vals = self._unbox(other)\n # GH#37462 comparison on i8 values is almost 2x faster than M8/m8\n result = op(self._ndarray.view(\"i8\"), other_vals.view(\"i8\"))\n\n o_mask = isna(other)\n mask = self._isnan | o_mask\n if mask.any():\n nat_result = op is operator.ne\n np.putmask(result, mask, nat_result)\n\n return result\n\n # pow is invalid for all three subclasses; TimedeltaArray will override\n # the multiplication and division ops\n __pow__ = make_invalid_op(\"__pow__\")\n __rpow__ = make_invalid_op(\"__rpow__\")\n __mul__ = make_invalid_op(\"__mul__\")\n __rmul__ = make_invalid_op(\"__rmul__\")\n __truediv__ = make_invalid_op(\"__truediv__\")\n __rtruediv__ = make_invalid_op(\"__rtruediv__\")\n __floordiv__ = make_invalid_op(\"__floordiv__\")\n __rfloordiv__ = make_invalid_op(\"__rfloordiv__\")\n __mod__ = make_invalid_op(\"__mod__\")\n __rmod__ = make_invalid_op(\"__rmod__\")\n __divmod__ = make_invalid_op(\"__divmod__\")\n __rdivmod__ = make_invalid_op(\"__rdivmod__\")\n\n def _add_datetimelike_scalar(self, other):\n # Overridden by TimedeltaArray\n raise TypeError(f\"cannot add {type(self).__name__} and {type(other).__name__}\")\n\n _add_datetime_arraylike = _add_datetimelike_scalar\n\n def _sub_datetimelike_scalar(self, other):\n # Overridden by DatetimeArray\n assert other is not NaT\n raise TypeError(f\"cannot subtract a datelike from a {type(self).__name__}\")\n\n _sub_datetime_arraylike = _sub_datetimelike_scalar\n\n def _sub_period(self, other):\n # Overridden by PeriodArray\n raise TypeError(f\"cannot subtract Period from a {type(self).__name__}\")\n\n def _add_period(self, other: Period):\n # Overridden by TimedeltaArray\n raise TypeError(f\"cannot add Period to a {type(self).__name__}\")\n\n def _add_offset(self, offset):\n raise AbstractMethodError(self)\n\n def _add_timedeltalike_scalar(self, other):\n \"\"\"\n Add a delta of a timedeltalike\n\n Returns\n -------\n Same type as self\n \"\"\"\n if isna(other):\n # i.e np.timedelta64(\"NaT\"), not recognized by delta_to_nanoseconds\n new_values = np.empty(self.shape, dtype=\"i8\")\n new_values.fill(iNaT)\n return type(self)(new_values, dtype=self.dtype)\n\n inc = delta_to_nanoseconds(other)\n new_values = checked_add_with_arr(self.asi8, inc, arr_mask=self._isnan)\n new_values = new_values.view(\"i8\")\n new_values = self._maybe_mask_results(new_values)\n new_values = new_values.view(self._ndarray.dtype)\n\n new_freq = None\n if isinstance(self.freq, Tick) or is_period_dtype(self.dtype):\n # adding a scalar preserves freq\n new_freq = self.freq\n\n # error: Unexpected keyword argument \"freq\" for \"_simple_new\" of \"NDArrayBacked\"\n return type(self)._simple_new( # type: ignore[call-arg]\n new_values, dtype=self.dtype, freq=new_freq\n )\n\n def _add_timedelta_arraylike(self, other):\n \"\"\"\n Add a delta of a TimedeltaIndex\n\n Returns\n -------\n Same type as self\n \"\"\"\n # overridden by PeriodArray\n\n if len(self) != len(other):\n raise ValueError(\"cannot add indices of unequal length\")\n\n if isinstance(other, np.ndarray):\n # ndarray[timedelta64]; wrap in TimedeltaIndex for op\n from pandas.core.arrays import TimedeltaArray\n\n other = TimedeltaArray._from_sequence(other)\n\n self_i8 = self.asi8\n other_i8 = other.asi8\n new_values = checked_add_with_arr(\n self_i8, other_i8, arr_mask=self._isnan, b_mask=other._isnan\n )\n if self._hasnans or other._hasnans:\n mask = self._isnan | other._isnan\n np.putmask(new_values, mask, iNaT)\n\n return type(self)(new_values, dtype=self.dtype)\n\n def _add_nat(self):\n \"\"\"\n Add pd.NaT to self\n \"\"\"\n if is_period_dtype(self.dtype):\n raise TypeError(\n f\"Cannot add {type(self).__name__} and {type(NaT).__name__}\"\n )\n\n # GH#19124 pd.NaT is treated like a timedelta for both timedelta\n # and datetime dtypes\n result = np.empty(self.shape, dtype=np.int64)\n result.fill(iNaT)\n return type(self)(result, dtype=self.dtype, freq=None)\n\n def _sub_nat(self):\n \"\"\"\n Subtract pd.NaT from self\n \"\"\"\n # GH#19124 Timedelta - datetime is not in general well-defined.\n # We make an exception for pd.NaT, which in this case quacks\n # like a timedelta.\n # For datetime64 dtypes by convention we treat NaT as a datetime, so\n # this subtraction returns a timedelta64 dtype.\n # For period dtype, timedelta64 is a close-enough return dtype.\n result = np.empty(self.shape, dtype=np.int64)\n result.fill(iNaT)\n return result.view(\"timedelta64[ns]\")\n\n def _sub_period_array(self, other):\n # Overridden by PeriodArray\n raise TypeError(\n f\"cannot subtract {other.dtype}-dtype from {type(self).__name__}\"\n )\n\n def _addsub_object_array(self, other: np.ndarray, op):\n \"\"\"\n Add or subtract array-like of DateOffset objects\n\n Parameters\n ----------\n other : np.ndarray[object]\n op : {operator.add, operator.sub}\n\n Returns\n -------\n result : same class as self\n \"\"\"\n assert op in [operator.add, operator.sub]\n if len(other) == 1 and self.ndim == 1:\n # If both 1D then broadcasting is unambiguous\n return op(self, other[0])\n\n warnings.warn(\n \"Adding/subtracting object-dtype array to \"\n f\"{type(self).__name__} not vectorized\",\n PerformanceWarning,\n )\n\n # Caller is responsible for broadcasting if necessary\n assert self.shape == other.shape, (self.shape, other.shape)\n\n with warnings.catch_warnings():\n # filter out warnings about Timestamp.freq\n warnings.filterwarnings(\"ignore\", category=FutureWarning)\n res_values = op(self.astype(\"O\"), np.asarray(other))\n\n result = pd_array(res_values.ravel())\n # error: Item \"ExtensionArray\" of \"Union[Any, ExtensionArray]\" has no attribute\n # \"reshape\"\n result = extract_array(\n result, extract_numpy=True\n ).reshape( # type: ignore[union-attr]\n self.shape\n )\n return result\n\n def _time_shift(self, periods: int, freq=None):\n \"\"\"\n Shift each value by `periods`.\n\n Note this is different from ExtensionArray.shift, which\n shifts the *position* of each element, padding the end with\n missing values.\n\n Parameters\n ----------\n periods : int\n Number of periods to shift by.\n freq : pandas.DateOffset, pandas.Timedelta, or str\n Frequency increment to shift by.\n \"\"\"\n if freq is not None and freq != self.freq:\n if isinstance(freq, str):\n freq = to_offset(freq)\n offset = periods * freq\n return self + offset\n\n if periods == 0 or len(self) == 0:\n # GH#14811 empty case\n return self.copy()\n\n if self.freq is None:\n raise NullFrequencyError(\"Cannot shift with no freq\")\n\n start = self[0] + periods * self.freq\n end = self[-1] + periods * self.freq\n\n # Note: in the DatetimeTZ case, _generate_range will infer the\n # appropriate timezone from `start` and `end`, so tz does not need\n # to be passed explicitly.\n return self._generate_range(start=start, end=end, periods=None, freq=self.freq)\n\n @unpack_zerodim_and_defer(\"__add__\")\n def __add__(self, other):\n other_dtype = getattr(other, \"dtype\", None)\n\n # scalar others\n if other is NaT:\n result = self._add_nat()\n elif isinstance(other, (Tick, timedelta, np.timedelta64)):\n result = self._add_timedeltalike_scalar(other)\n elif isinstance(other, BaseOffset):\n # specifically _not_ a Tick\n result = self._add_offset(other)\n elif isinstance(other, (datetime, np.datetime64)):\n result = self._add_datetimelike_scalar(other)\n elif isinstance(other, Period) and is_timedelta64_dtype(self.dtype):\n result = self._add_period(other)\n elif lib.is_integer(other):\n # This check must come after the check for np.timedelta64\n # as is_integer returns True for these\n if not is_period_dtype(self.dtype):\n raise integer_op_not_supported(self)\n result = self._time_shift(other)\n\n # array-like others\n elif is_timedelta64_dtype(other_dtype):\n # TimedeltaIndex, ndarray[timedelta64]\n result = self._add_timedelta_arraylike(other)\n elif is_object_dtype(other_dtype):\n # e.g. Array/Index of DateOffset objects\n result = self._addsub_object_array(other, operator.add)\n elif is_datetime64_dtype(other_dtype) or is_datetime64tz_dtype(other_dtype):\n # DatetimeIndex, ndarray[datetime64]\n return self._add_datetime_arraylike(other)\n elif is_integer_dtype(other_dtype):\n if not is_period_dtype(self.dtype):\n raise integer_op_not_supported(self)\n result = self._addsub_int_array(other, operator.add)\n else:\n # Includes Categorical, other ExtensionArrays\n # For PeriodDtype, if self is a TimedeltaArray and other is a\n # PeriodArray with a timedelta-like (i.e. Tick) freq, this\n # operation is valid. Defer to the PeriodArray implementation.\n # In remaining cases, this will end up raising TypeError.\n return NotImplemented\n\n if isinstance(result, np.ndarray) and is_timedelta64_dtype(result.dtype):\n from pandas.core.arrays import TimedeltaArray\n\n return TimedeltaArray(result)\n return result\n\n def __radd__(self, other):\n # alias for __add__\n return self.__add__(other)\n\n @unpack_zerodim_and_defer(\"__sub__\")\n def __sub__(self, other):\n\n other_dtype = getattr(other, \"dtype\", None)\n\n # scalar others\n if other is NaT:\n result = self._sub_nat()\n elif isinstance(other, (Tick, timedelta, np.timedelta64)):\n result = self._add_timedeltalike_scalar(-other)\n elif isinstance(other, BaseOffset):\n # specifically _not_ a Tick\n result = self._add_offset(-other)\n elif isinstance(other, (datetime, np.datetime64)):\n result = self._sub_datetimelike_scalar(other)\n elif lib.is_integer(other):\n # This check must come after the check for np.timedelta64\n # as is_integer returns True for these\n if not is_period_dtype(self.dtype):\n raise integer_op_not_supported(self)\n result = self._time_shift(-other)\n\n elif isinstance(other, Period):\n result = self._sub_period(other)\n\n # array-like others\n elif is_timedelta64_dtype(other_dtype):\n # TimedeltaIndex, ndarray[timedelta64]\n result = self._add_timedelta_arraylike(-other)\n elif is_object_dtype(other_dtype):\n # e.g. Array/Index of DateOffset objects\n result = self._addsub_object_array(other, operator.sub)\n elif is_datetime64_dtype(other_dtype) or is_datetime64tz_dtype(other_dtype):\n # DatetimeIndex, ndarray[datetime64]\n result = self._sub_datetime_arraylike(other)\n elif is_period_dtype(other_dtype):\n # PeriodIndex\n result = self._sub_period_array(other)\n elif is_integer_dtype(other_dtype):\n if not is_period_dtype(self.dtype):\n raise integer_op_not_supported(self)\n result = self._addsub_int_array(other, operator.sub)\n else:\n # Includes ExtensionArrays, float_dtype\n return NotImplemented\n\n if isinstance(result, np.ndarray) and is_timedelta64_dtype(result.dtype):\n from pandas.core.arrays import TimedeltaArray\n\n return TimedeltaArray(result)\n return result\n\n def __rsub__(self, other):\n other_dtype = getattr(other, \"dtype\", None)\n\n if is_datetime64_any_dtype(other_dtype) and is_timedelta64_dtype(self.dtype):\n # ndarray[datetime64] cannot be subtracted from self, so\n # we need to wrap in DatetimeArray/Index and flip the operation\n if lib.is_scalar(other):\n # i.e. np.datetime64 object\n return Timestamp(other) - self\n if not isinstance(other, DatetimeLikeArrayMixin):\n # Avoid down-casting DatetimeIndex\n from pandas.core.arrays import DatetimeArray\n\n other = DatetimeArray(other)\n return other - self\n elif (\n is_datetime64_any_dtype(self.dtype)\n and hasattr(other, \"dtype\")\n and not is_datetime64_any_dtype(other.dtype)\n ):\n # GH#19959 datetime - datetime is well-defined as timedelta,\n # but any other type - datetime is not well-defined.\n raise TypeError(\n f\"cannot subtract {type(self).__name__} from {type(other).__name__}\"\n )\n elif is_period_dtype(self.dtype) and is_timedelta64_dtype(other_dtype):\n # TODO: Can we simplify/generalize these cases at all?\n raise TypeError(f\"cannot subtract {type(self).__name__} from {other.dtype}\")\n elif is_timedelta64_dtype(self.dtype):\n self = cast(\"TimedeltaArray\", self)\n return (-self) + other\n\n # We get here with e.g. datetime objects\n return -(self - other)\n\n def __iadd__(self, other):\n result = self + other\n self[:] = result[:]\n\n if not is_period_dtype(self.dtype):\n # restore freq, which is invalidated by setitem\n self._freq = result._freq\n return self\n\n def __isub__(self, other):\n result = self - other\n self[:] = result[:]\n\n if not is_period_dtype(self.dtype):\n # restore freq, which is invalidated by setitem\n self._freq = result._freq\n return self\n\n # --------------------------------------------------------------\n # Reductions\n\n def min(self, *, axis: int | None = None, skipna: bool = True, **kwargs):\n \"\"\"\n Return the minimum value of the Array or minimum along\n an axis.\n\n See Also\n --------\n numpy.ndarray.min\n Index.min : Return the minimum value in an Index.\n Series.min : Return the minimum value in a Series.\n \"\"\"\n nv.validate_min((), kwargs)\n nv.validate_minmax_axis(axis, self.ndim)\n\n if is_period_dtype(self.dtype):\n # pass datetime64 values to nanops to get correct NaT semantics\n result = nanops.nanmin(\n self._ndarray.view(\"M8[ns]\"), axis=axis, skipna=skipna\n )\n if result is NaT:\n return NaT\n result = result.view(\"i8\")\n if axis is None or self.ndim == 1:\n return self._box_func(result)\n return self._from_backing_data(result)\n\n result = nanops.nanmin(self._ndarray, axis=axis, skipna=skipna)\n return self._wrap_reduction_result(axis, result)\n\n def max(self, *, axis: int | None = None, skipna: bool = True, **kwargs):\n \"\"\"\n Return the maximum value of the Array or maximum along\n an axis.\n\n See Also\n --------\n numpy.ndarray.max\n Index.max : Return the maximum value in an Index.\n Series.max : Return the maximum value in a Series.\n \"\"\"\n # TODO: skipna is broken with max.\n # See https://github.com/pandas-dev/pandas/issues/24265\n nv.validate_max((), kwargs)\n nv.validate_minmax_axis(axis, self.ndim)\n\n if is_period_dtype(self.dtype):\n # pass datetime64 values to nanops to get correct NaT semantics\n result = nanops.nanmax(\n self._ndarray.view(\"M8[ns]\"), axis=axis, skipna=skipna\n )\n if result is NaT:\n return result\n result = result.view(\"i8\")\n if axis is None or self.ndim == 1:\n return self._box_func(result)\n return self._from_backing_data(result)\n\n result = nanops.nanmax(self._ndarray, axis=axis, skipna=skipna)\n return self._wrap_reduction_result(axis, result)\n\n def mean(self, *, skipna: bool = True, axis: int | None = 0):\n \"\"\"\n Return the mean value of the Array.\n\n .. versionadded:: 0.25.0\n\n Parameters\n ----------\n skipna : bool, default True\n Whether to ignore any NaT elements.\n axis : int, optional, default 0\n\n Returns\n -------\n scalar\n Timestamp or Timedelta.\n\n See Also\n --------\n numpy.ndarray.mean : Returns the average of array elements along a given axis.\n Series.mean : Return the mean value in a Series.\n\n Notes\n -----\n mean is only defined for Datetime and Timedelta dtypes, not for Period.\n \"\"\"\n if is_period_dtype(self.dtype):\n # See discussion in GH#24757\n raise TypeError(\n f\"mean is not implemented for {type(self).__name__} since the \"\n \"meaning is ambiguous. An alternative is \"\n \"obj.to_timestamp(how='start').mean()\"\n )\n\n result = nanops.nanmean(\n self._ndarray, axis=axis, skipna=skipna, mask=self.isna()\n )\n return self._wrap_reduction_result(axis, result)\n\n def median(self, *, axis: int | None = None, skipna: bool = True, **kwargs):\n nv.validate_median((), kwargs)\n\n if axis is not None and abs(axis) >= self.ndim:\n raise ValueError(\"abs(axis) must be less than ndim\")\n\n if is_period_dtype(self.dtype):\n # pass datetime64 values to nanops to get correct NaT semantics\n result = nanops.nanmedian(\n self._ndarray.view(\"M8[ns]\"), axis=axis, skipna=skipna\n )\n result = result.view(\"i8\")\n if axis is None or self.ndim == 1:\n return self._box_func(result)\n return self._from_backing_data(result)\n\n result = nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)\n return self._wrap_reduction_result(axis, result)\n\n\nclass DatelikeOps(DatetimeLikeArrayMixin):\n \"\"\"\n Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.\n \"\"\"\n\n @Substitution(\n URL=\"https://docs.python.org/3/library/datetime.html\"\n \"#strftime-and-strptime-behavior\"\n )\n def strftime(self, date_format: str) -> np.ndarray:\n \"\"\"\n Convert to Index using specified date_format.\n\n Return an Index of formatted strings specified by date_format, which\n supports the same string format as the python standard library. Details\n of the string format can be found in `python string format\n doc <%(URL)s>`__.\n\n Parameters\n ----------\n date_format : str\n Date format string (e.g. \"%%Y-%%m-%%d\").\n\n Returns\n -------\n ndarray\n NumPy ndarray of formatted strings.\n\n See Also\n --------\n to_datetime : Convert the given argument to datetime.\n DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.\n DatetimeIndex.round : Round the DatetimeIndex to the specified freq.\n DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.\n\n Examples\n --------\n >>> rng = pd.date_range(pd.Timestamp(\"2018-03-10 09:00\"),\n ... periods=3, freq='s')\n >>> rng.strftime('%%B %%d, %%Y, %%r')\n Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',\n 'March 10, 2018, 09:00:02 AM'],\n dtype='object')\n \"\"\"\n result = self._format_native_types(date_format=date_format, na_rep=np.nan)\n return result.astype(object)\n\n\n_round_doc = \"\"\"\n Perform {op} operation on the data to the specified `freq`.\n\n Parameters\n ----------\n freq : str or Offset\n The frequency level to {op} the index to. Must be a fixed\n frequency like 'S' (second) not 'ME' (month end). See\n :ref:`frequency aliases <timeseries.offset_aliases>` for\n a list of possible `freq` values.\n ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'\n Only relevant for DatetimeIndex:\n\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False designates\n a non-DST time (note that this flag is only applicable for\n ambiguous times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous\n times.\n\n nonexistent : 'shift_forward', 'shift_backward', 'NaT', timedelta, default 'raise'\n A nonexistent time does not exist in a particular timezone\n where clocks moved forward due to DST.\n\n - 'shift_forward' will shift the nonexistent time forward to the\n closest existing time\n - 'shift_backward' will shift the nonexistent time backward to the\n closest existing time\n - 'NaT' will return NaT where there are nonexistent times\n - timedelta objects will shift nonexistent times by the timedelta\n - 'raise' will raise an NonExistentTimeError if there are\n nonexistent times.\n\n Returns\n -------\n DatetimeIndex, TimedeltaIndex, or Series\n Index of the same type for a DatetimeIndex or TimedeltaIndex,\n or a Series with the same index for a Series.\n\n Raises\n ------\n ValueError if the `freq` cannot be converted.\n\n Examples\n --------\n **DatetimeIndex**\n\n >>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min')\n >>> rng\n DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',\n '2018-01-01 12:01:00'],\n dtype='datetime64[ns]', freq='T')\n \"\"\"\n\n_round_example = \"\"\">>> rng.round('H')\n DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',\n '2018-01-01 12:00:00'],\n dtype='datetime64[ns]', freq=None)\n\n **Series**\n\n >>> pd.Series(rng).dt.round(\"H\")\n 0 2018-01-01 12:00:00\n 1 2018-01-01 12:00:00\n 2 2018-01-01 12:00:00\n dtype: datetime64[ns]\n \"\"\"\n\n_floor_example = \"\"\">>> rng.floor('H')\n DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',\n '2018-01-01 12:00:00'],\n dtype='datetime64[ns]', freq=None)\n\n **Series**\n\n >>> pd.Series(rng).dt.floor(\"H\")\n 0 2018-01-01 11:00:00\n 1 2018-01-01 12:00:00\n 2 2018-01-01 12:00:00\n dtype: datetime64[ns]\n \"\"\"\n\n_ceil_example = \"\"\">>> rng.ceil('H')\n DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',\n '2018-01-01 13:00:00'],\n dtype='datetime64[ns]', freq=None)\n\n **Series**\n\n >>> pd.Series(rng).dt.ceil(\"H\")\n 0 2018-01-01 12:00:00\n 1 2018-01-01 12:00:00\n 2 2018-01-01 13:00:00\n dtype: datetime64[ns]\n \"\"\"\n\n\nTimelikeOpsT = TypeVar(\"TimelikeOpsT\", bound=\"TimelikeOps\")\n\n\nclass TimelikeOps(DatetimeLikeArrayMixin):\n \"\"\"\n Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.\n \"\"\"\n\n def _round(self, freq, mode, ambiguous, nonexistent):\n # round the local times\n if is_datetime64tz_dtype(self.dtype):\n # operate on naive timestamps, then convert back to aware\n self = cast(\"DatetimeArray\", self)\n naive = self.tz_localize(None)\n result = naive._round(freq, mode, ambiguous, nonexistent)\n return result.tz_localize(\n self.tz, ambiguous=ambiguous, nonexistent=nonexistent\n )\n\n values = self.view(\"i8\")\n values = cast(np.ndarray, values)\n nanos = to_offset(freq).nanos\n result_i8 = round_nsint64(values, mode, nanos)\n result = self._maybe_mask_results(result_i8, fill_value=iNaT)\n result = result.view(self._ndarray.dtype)\n return self._simple_new(result, dtype=self.dtype)\n\n @Appender((_round_doc + _round_example).format(op=\"round\"))\n def round(self, freq, ambiguous=\"raise\", nonexistent=\"raise\"):\n return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent)\n\n @Appender((_round_doc + _floor_example).format(op=\"floor\"))\n def floor(self, freq, ambiguous=\"raise\", nonexistent=\"raise\"):\n return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)\n\n @Appender((_round_doc + _ceil_example).format(op=\"ceil\"))\n def ceil(self, freq, ambiguous=\"raise\", nonexistent=\"raise\"):\n return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)\n\n # --------------------------------------------------------------\n # Reductions\n\n def any(self, *, axis: int | None = None, skipna: bool = True):\n # GH#34479 discussion of desired behavior long-term\n return nanops.nanany(self._ndarray, axis=axis, skipna=skipna, mask=self.isna())\n\n def all(self, *, axis: int | None = None, skipna: bool = True):\n # GH#34479 discussion of desired behavior long-term\n return nanops.nanall(self._ndarray, axis=axis, skipna=skipna, mask=self.isna())\n\n # --------------------------------------------------------------\n # Frequency Methods\n\n def _maybe_clear_freq(self) -> None:\n self._freq = None\n\n def _with_freq(self, freq):\n \"\"\"\n Helper to get a view on the same data, with a new freq.\n\n Parameters\n ----------\n freq : DateOffset, None, or \"infer\"\n\n Returns\n -------\n Same type as self\n \"\"\"\n # GH#29843\n if freq is None:\n # Always valid\n pass\n elif len(self) == 0 and isinstance(freq, BaseOffset):\n # Always valid. In the TimedeltaArray case, we assume this\n # is a Tick offset.\n pass\n else:\n # As an internal method, we can ensure this assertion always holds\n assert freq == \"infer\"\n freq = to_offset(self.inferred_freq)\n\n arr = self.view()\n arr._freq = freq\n return arr\n\n # --------------------------------------------------------------\n\n def factorize(self, na_sentinel=-1, sort: bool = False):\n if self.freq is not None:\n # We must be unique, so can short-circuit (and retain freq)\n codes = np.arange(len(self), dtype=np.intp)\n uniques = self.copy() # TODO: copy or view?\n if sort and self.freq.n < 0:\n codes = codes[::-1]\n # TODO: overload __getitem__, a slice indexer returns same type as self\n # error: Incompatible types in assignment (expression has type\n # \"Union[DatetimeLikeArrayMixin, Union[Any, Any]]\", variable\n # has type \"TimelikeOps\")\n uniques = uniques[::-1] # type: ignore[assignment]\n return codes, uniques\n # FIXME: shouldn't get here; we are ignoring sort\n return super().factorize(na_sentinel=na_sentinel)\n\n\n# -------------------------------------------------------------------\n# Shared Constructor Helpers\n\n\ndef validate_periods(periods):\n \"\"\"\n If a `periods` argument is passed to the Datetime/Timedelta Array/Index\n constructor, cast it to an integer.\n\n Parameters\n ----------\n periods : None, float, int\n\n Returns\n -------\n periods : None or int\n\n Raises\n ------\n TypeError\n if periods is None, float, or int\n \"\"\"\n if periods is not None:\n if lib.is_float(periods):\n periods = int(periods)\n elif not lib.is_integer(periods):\n raise TypeError(f\"periods must be a number, got {periods}\")\n return periods\n\n\ndef validate_endpoints(closed):\n \"\"\"\n Check that the `closed` argument is among [None, \"left\", \"right\"]\n\n Parameters\n ----------\n closed : {None, \"left\", \"right\"}\n\n Returns\n -------\n left_closed : bool\n right_closed : bool\n\n Raises\n ------\n ValueError : if argument is not among valid values\n \"\"\"\n left_closed = False\n right_closed = False\n\n if closed is None:\n left_closed = True\n right_closed = True\n elif closed == \"left\":\n left_closed = True\n elif closed == \"right\":\n right_closed = True\n else:\n raise ValueError(\"Closed has to be either 'left', 'right' or None\")\n\n return left_closed, right_closed\n\n\ndef validate_inferred_freq(freq, inferred_freq, freq_infer):\n \"\"\"\n If the user passes a freq and another freq is inferred from passed data,\n require that they match.\n\n Parameters\n ----------\n freq : DateOffset or None\n inferred_freq : DateOffset or None\n freq_infer : bool\n\n Returns\n -------\n freq : DateOffset or None\n freq_infer : bool\n\n Notes\n -----\n We assume at this point that `maybe_infer_freq` has been called, so\n `freq` is either a DateOffset object or None.\n \"\"\"\n if inferred_freq is not None:\n if freq is not None and freq != inferred_freq:\n raise ValueError(\n f\"Inferred frequency {inferred_freq} from passed \"\n \"values does not conform to passed frequency \"\n f\"{freq.freqstr}\"\n )\n elif freq is None:\n freq = inferred_freq\n freq_infer = False\n\n return freq, freq_infer\n\n\ndef maybe_infer_freq(freq):\n \"\"\"\n Comparing a DateOffset to the string \"infer\" raises, so we need to\n be careful about comparisons. Make a dummy variable `freq_infer` to\n signify the case where the given freq is \"infer\" and set freq to None\n to avoid comparison trouble later on.\n\n Parameters\n ----------\n freq : {DateOffset, None, str}\n\n Returns\n -------\n freq : {DateOffset, None}\n freq_infer : bool\n Whether we should inherit the freq of passed data.\n \"\"\"\n freq_infer = False\n if not isinstance(freq, BaseOffset):\n # if a passed freq is None, don't infer automatically\n if freq != \"infer\":\n freq = to_offset(freq)\n else:\n freq_infer = True\n freq = None\n return freq, freq_infer\n",
"\"\"\"\nTests of pandas.tseries.offsets\n\"\"\"\nfrom __future__ import annotations\n\nfrom datetime import (\n datetime,\n timedelta,\n)\nfrom typing import (\n Dict,\n List,\n Tuple,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import (\n NaT,\n Timestamp,\n conversion,\n timezones,\n)\nimport pandas._libs.tslibs.offsets as liboffsets\nfrom pandas._libs.tslibs.offsets import (\n _get_offset,\n _offset_map,\n)\nfrom pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG\nfrom pandas.compat import np_datetime64_compat\nfrom pandas.errors import PerformanceWarning\n\nfrom pandas import DatetimeIndex\nimport pandas._testing as tm\nfrom pandas.tests.tseries.offsets.common import (\n Base,\n WeekDay,\n assert_offset_equal,\n)\n\nimport pandas.tseries.offsets as offsets\nfrom pandas.tseries.offsets import (\n FY5253,\n BaseOffset,\n BDay,\n BMonthEnd,\n BusinessHour,\n CustomBusinessDay,\n CustomBusinessHour,\n CustomBusinessMonthBegin,\n CustomBusinessMonthEnd,\n DateOffset,\n Day,\n Easter,\n FY5253Quarter,\n LastWeekOfMonth,\n MonthBegin,\n Nano,\n Tick,\n Week,\n WeekOfMonth,\n)\n\n_ApplyCases = List[Tuple[BaseOffset, Dict[datetime, datetime]]]\n\n\nclass TestCommon(Base):\n # executed value created by Base._get_offset\n # are applied to 2011/01/01 09:00 (Saturday)\n # used for .apply and .rollforward\n expecteds = {\n \"Day\": Timestamp(\"2011-01-02 09:00:00\"),\n \"DateOffset\": Timestamp(\"2011-01-02 09:00:00\"),\n \"BusinessDay\": Timestamp(\"2011-01-03 09:00:00\"),\n \"CustomBusinessDay\": Timestamp(\"2011-01-03 09:00:00\"),\n \"CustomBusinessMonthEnd\": Timestamp(\"2011-01-31 09:00:00\"),\n \"CustomBusinessMonthBegin\": Timestamp(\"2011-01-03 09:00:00\"),\n \"MonthBegin\": Timestamp(\"2011-02-01 09:00:00\"),\n \"BusinessMonthBegin\": Timestamp(\"2011-01-03 09:00:00\"),\n \"MonthEnd\": Timestamp(\"2011-01-31 09:00:00\"),\n \"SemiMonthEnd\": Timestamp(\"2011-01-15 09:00:00\"),\n \"SemiMonthBegin\": Timestamp(\"2011-01-15 09:00:00\"),\n \"BusinessMonthEnd\": Timestamp(\"2011-01-31 09:00:00\"),\n \"YearBegin\": Timestamp(\"2012-01-01 09:00:00\"),\n \"BYearBegin\": Timestamp(\"2011-01-03 09:00:00\"),\n \"YearEnd\": Timestamp(\"2011-12-31 09:00:00\"),\n \"BYearEnd\": Timestamp(\"2011-12-30 09:00:00\"),\n \"QuarterBegin\": Timestamp(\"2011-03-01 09:00:00\"),\n \"BQuarterBegin\": Timestamp(\"2011-03-01 09:00:00\"),\n \"QuarterEnd\": Timestamp(\"2011-03-31 09:00:00\"),\n \"BQuarterEnd\": Timestamp(\"2011-03-31 09:00:00\"),\n \"BusinessHour\": Timestamp(\"2011-01-03 10:00:00\"),\n \"CustomBusinessHour\": Timestamp(\"2011-01-03 10:00:00\"),\n \"WeekOfMonth\": Timestamp(\"2011-01-08 09:00:00\"),\n \"LastWeekOfMonth\": Timestamp(\"2011-01-29 09:00:00\"),\n \"FY5253Quarter\": Timestamp(\"2011-01-25 09:00:00\"),\n \"FY5253\": Timestamp(\"2011-01-25 09:00:00\"),\n \"Week\": Timestamp(\"2011-01-08 09:00:00\"),\n \"Easter\": Timestamp(\"2011-04-24 09:00:00\"),\n \"Hour\": Timestamp(\"2011-01-01 10:00:00\"),\n \"Minute\": Timestamp(\"2011-01-01 09:01:00\"),\n \"Second\": Timestamp(\"2011-01-01 09:00:01\"),\n \"Milli\": Timestamp(\"2011-01-01 09:00:00.001000\"),\n \"Micro\": Timestamp(\"2011-01-01 09:00:00.000001\"),\n \"Nano\": Timestamp(np_datetime64_compat(\"2011-01-01T09:00:00.000000001Z\")),\n }\n\n def test_immutable(self, offset_types):\n # GH#21341 check that __setattr__ raises\n offset = self._get_offset(offset_types)\n msg = \"objects is not writable|DateOffset objects are immutable\"\n with pytest.raises(AttributeError, match=msg):\n offset.normalize = True\n with pytest.raises(AttributeError, match=msg):\n offset.n = 91\n\n def test_return_type(self, offset_types):\n offset = self._get_offset(offset_types)\n\n # make sure that we are returning a Timestamp\n result = Timestamp(\"20080101\") + offset\n assert isinstance(result, Timestamp)\n\n # make sure that we are returning NaT\n assert NaT + offset is NaT\n assert offset + NaT is NaT\n\n assert NaT - offset is NaT\n assert (-offset).apply(NaT) is NaT\n\n def test_offset_n(self, offset_types):\n offset = self._get_offset(offset_types)\n assert offset.n == 1\n\n neg_offset = offset * -1\n assert neg_offset.n == -1\n\n mul_offset = offset * 3\n assert mul_offset.n == 3\n\n def test_offset_timedelta64_arg(self, offset_types):\n # check that offset._validate_n raises TypeError on a timedelt64\n # object\n off = self._get_offset(offset_types)\n\n td64 = np.timedelta64(4567, \"s\")\n with pytest.raises(TypeError, match=\"argument must be an integer\"):\n type(off)(n=td64, **off.kwds)\n\n def test_offset_mul_ndarray(self, offset_types):\n off = self._get_offset(offset_types)\n\n expected = np.array([[off, off * 2], [off * 3, off * 4]])\n\n result = np.array([[1, 2], [3, 4]]) * off\n tm.assert_numpy_array_equal(result, expected)\n\n result = off * np.array([[1, 2], [3, 4]])\n tm.assert_numpy_array_equal(result, expected)\n\n def test_offset_freqstr(self, offset_types):\n offset = self._get_offset(offset_types)\n\n freqstr = offset.freqstr\n if freqstr not in (\"<Easter>\", \"<DateOffset: days=1>\", \"LWOM-SAT\"):\n code = _get_offset(freqstr)\n assert offset.rule_code == code\n\n def _check_offsetfunc_works(self, offset, funcname, dt, expected, normalize=False):\n\n if normalize and issubclass(offset, Tick):\n # normalize=True disallowed for Tick subclasses GH#21427\n return\n\n offset_s = self._get_offset(offset, normalize=normalize)\n func = getattr(offset_s, funcname)\n\n result = func(dt)\n assert isinstance(result, Timestamp)\n assert result == expected\n\n result = func(Timestamp(dt))\n assert isinstance(result, Timestamp)\n assert result == expected\n\n # see gh-14101\n exp_warning = None\n ts = Timestamp(dt) + Nano(5)\n\n if (\n type(offset_s).__name__ == \"DateOffset\"\n and (funcname == \"apply\" or normalize)\n and ts.nanosecond > 0\n ):\n exp_warning = UserWarning\n\n # test nanosecond is preserved\n with tm.assert_produces_warning(exp_warning):\n result = func(ts)\n assert isinstance(result, Timestamp)\n if normalize is False:\n assert result == expected + Nano(5)\n else:\n assert result == expected\n\n if isinstance(dt, np.datetime64):\n # test tz when input is datetime or Timestamp\n return\n\n for tz in self.timezones:\n expected_localize = expected.tz_localize(tz)\n tz_obj = timezones.maybe_get_tz(tz)\n dt_tz = conversion.localize_pydatetime(dt, tz_obj)\n\n result = func(dt_tz)\n assert isinstance(result, Timestamp)\n assert result == expected_localize\n\n result = func(Timestamp(dt, tz=tz))\n assert isinstance(result, Timestamp)\n assert result == expected_localize\n\n # see gh-14101\n exp_warning = None\n ts = Timestamp(dt, tz=tz) + Nano(5)\n\n if (\n type(offset_s).__name__ == \"DateOffset\"\n and (funcname == \"apply\" or normalize)\n and ts.nanosecond > 0\n ):\n exp_warning = UserWarning\n\n # test nanosecond is preserved\n with tm.assert_produces_warning(exp_warning):\n result = func(ts)\n assert isinstance(result, Timestamp)\n if normalize is False:\n assert result == expected_localize + Nano(5)\n else:\n assert result == expected_localize\n\n def test_apply(self, offset_types):\n sdt = datetime(2011, 1, 1, 9, 0)\n ndt = np_datetime64_compat(\"2011-01-01 09:00Z\")\n\n for dt in [sdt, ndt]:\n expected = self.expecteds[offset_types.__name__]\n self._check_offsetfunc_works(offset_types, \"apply\", dt, expected)\n\n expected = Timestamp(expected.date())\n self._check_offsetfunc_works(\n offset_types, \"apply\", dt, expected, normalize=True\n )\n\n def test_rollforward(self, offset_types):\n expecteds = self.expecteds.copy()\n\n # result will not be changed if the target is on the offset\n no_changes = [\n \"Day\",\n \"MonthBegin\",\n \"SemiMonthBegin\",\n \"YearBegin\",\n \"Week\",\n \"Hour\",\n \"Minute\",\n \"Second\",\n \"Milli\",\n \"Micro\",\n \"Nano\",\n \"DateOffset\",\n ]\n for n in no_changes:\n expecteds[n] = Timestamp(\"2011/01/01 09:00\")\n\n expecteds[\"BusinessHour\"] = Timestamp(\"2011-01-03 09:00:00\")\n expecteds[\"CustomBusinessHour\"] = Timestamp(\"2011-01-03 09:00:00\")\n\n # but be changed when normalize=True\n norm_expected = expecteds.copy()\n for k in norm_expected:\n norm_expected[k] = Timestamp(norm_expected[k].date())\n\n normalized = {\n \"Day\": Timestamp(\"2011-01-02 00:00:00\"),\n \"DateOffset\": Timestamp(\"2011-01-02 00:00:00\"),\n \"MonthBegin\": Timestamp(\"2011-02-01 00:00:00\"),\n \"SemiMonthBegin\": Timestamp(\"2011-01-15 00:00:00\"),\n \"YearBegin\": Timestamp(\"2012-01-01 00:00:00\"),\n \"Week\": Timestamp(\"2011-01-08 00:00:00\"),\n \"Hour\": Timestamp(\"2011-01-01 00:00:00\"),\n \"Minute\": Timestamp(\"2011-01-01 00:00:00\"),\n \"Second\": Timestamp(\"2011-01-01 00:00:00\"),\n \"Milli\": Timestamp(\"2011-01-01 00:00:00\"),\n \"Micro\": Timestamp(\"2011-01-01 00:00:00\"),\n }\n norm_expected.update(normalized)\n\n sdt = datetime(2011, 1, 1, 9, 0)\n ndt = np_datetime64_compat(\"2011-01-01 09:00Z\")\n\n for dt in [sdt, ndt]:\n expected = expecteds[offset_types.__name__]\n self._check_offsetfunc_works(offset_types, \"rollforward\", dt, expected)\n expected = norm_expected[offset_types.__name__]\n self._check_offsetfunc_works(\n offset_types, \"rollforward\", dt, expected, normalize=True\n )\n\n def test_rollback(self, offset_types):\n expecteds = {\n \"BusinessDay\": Timestamp(\"2010-12-31 09:00:00\"),\n \"CustomBusinessDay\": Timestamp(\"2010-12-31 09:00:00\"),\n \"CustomBusinessMonthEnd\": Timestamp(\"2010-12-31 09:00:00\"),\n \"CustomBusinessMonthBegin\": Timestamp(\"2010-12-01 09:00:00\"),\n \"BusinessMonthBegin\": Timestamp(\"2010-12-01 09:00:00\"),\n \"MonthEnd\": Timestamp(\"2010-12-31 09:00:00\"),\n \"SemiMonthEnd\": Timestamp(\"2010-12-31 09:00:00\"),\n \"BusinessMonthEnd\": Timestamp(\"2010-12-31 09:00:00\"),\n \"BYearBegin\": Timestamp(\"2010-01-01 09:00:00\"),\n \"YearEnd\": Timestamp(\"2010-12-31 09:00:00\"),\n \"BYearEnd\": Timestamp(\"2010-12-31 09:00:00\"),\n \"QuarterBegin\": Timestamp(\"2010-12-01 09:00:00\"),\n \"BQuarterBegin\": Timestamp(\"2010-12-01 09:00:00\"),\n \"QuarterEnd\": Timestamp(\"2010-12-31 09:00:00\"),\n \"BQuarterEnd\": Timestamp(\"2010-12-31 09:00:00\"),\n \"BusinessHour\": Timestamp(\"2010-12-31 17:00:00\"),\n \"CustomBusinessHour\": Timestamp(\"2010-12-31 17:00:00\"),\n \"WeekOfMonth\": Timestamp(\"2010-12-11 09:00:00\"),\n \"LastWeekOfMonth\": Timestamp(\"2010-12-25 09:00:00\"),\n \"FY5253Quarter\": Timestamp(\"2010-10-26 09:00:00\"),\n \"FY5253\": Timestamp(\"2010-01-26 09:00:00\"),\n \"Easter\": Timestamp(\"2010-04-04 09:00:00\"),\n }\n\n # result will not be changed if the target is on the offset\n for n in [\n \"Day\",\n \"MonthBegin\",\n \"SemiMonthBegin\",\n \"YearBegin\",\n \"Week\",\n \"Hour\",\n \"Minute\",\n \"Second\",\n \"Milli\",\n \"Micro\",\n \"Nano\",\n \"DateOffset\",\n ]:\n expecteds[n] = Timestamp(\"2011/01/01 09:00\")\n\n # but be changed when normalize=True\n norm_expected = expecteds.copy()\n for k in norm_expected:\n norm_expected[k] = Timestamp(norm_expected[k].date())\n\n normalized = {\n \"Day\": Timestamp(\"2010-12-31 00:00:00\"),\n \"DateOffset\": Timestamp(\"2010-12-31 00:00:00\"),\n \"MonthBegin\": Timestamp(\"2010-12-01 00:00:00\"),\n \"SemiMonthBegin\": Timestamp(\"2010-12-15 00:00:00\"),\n \"YearBegin\": Timestamp(\"2010-01-01 00:00:00\"),\n \"Week\": Timestamp(\"2010-12-25 00:00:00\"),\n \"Hour\": Timestamp(\"2011-01-01 00:00:00\"),\n \"Minute\": Timestamp(\"2011-01-01 00:00:00\"),\n \"Second\": Timestamp(\"2011-01-01 00:00:00\"),\n \"Milli\": Timestamp(\"2011-01-01 00:00:00\"),\n \"Micro\": Timestamp(\"2011-01-01 00:00:00\"),\n }\n norm_expected.update(normalized)\n\n sdt = datetime(2011, 1, 1, 9, 0)\n ndt = np_datetime64_compat(\"2011-01-01 09:00Z\")\n\n for dt in [sdt, ndt]:\n expected = expecteds[offset_types.__name__]\n self._check_offsetfunc_works(offset_types, \"rollback\", dt, expected)\n\n expected = norm_expected[offset_types.__name__]\n self._check_offsetfunc_works(\n offset_types, \"rollback\", dt, expected, normalize=True\n )\n\n def test_is_on_offset(self, offset_types):\n dt = self.expecteds[offset_types.__name__]\n offset_s = self._get_offset(offset_types)\n assert offset_s.is_on_offset(dt)\n\n # when normalize=True, is_on_offset checks time is 00:00:00\n if issubclass(offset_types, Tick):\n # normalize=True disallowed for Tick subclasses GH#21427\n return\n offset_n = self._get_offset(offset_types, normalize=True)\n assert not offset_n.is_on_offset(dt)\n\n if offset_types in (BusinessHour, CustomBusinessHour):\n # In default BusinessHour (9:00-17:00), normalized time\n # cannot be in business hour range\n return\n date = datetime(dt.year, dt.month, dt.day)\n assert offset_n.is_on_offset(date)\n\n def test_add(self, offset_types, tz_naive_fixture):\n tz = tz_naive_fixture\n dt = datetime(2011, 1, 1, 9, 0)\n\n offset_s = self._get_offset(offset_types)\n expected = self.expecteds[offset_types.__name__]\n\n result_dt = dt + offset_s\n result_ts = Timestamp(dt) + offset_s\n for result in [result_dt, result_ts]:\n assert isinstance(result, Timestamp)\n assert result == expected\n\n expected_localize = expected.tz_localize(tz)\n result = Timestamp(dt, tz=tz) + offset_s\n assert isinstance(result, Timestamp)\n assert result == expected_localize\n\n # normalize=True, disallowed for Tick subclasses GH#21427\n if issubclass(offset_types, Tick):\n return\n offset_s = self._get_offset(offset_types, normalize=True)\n expected = Timestamp(expected.date())\n\n result_dt = dt + offset_s\n result_ts = Timestamp(dt) + offset_s\n for result in [result_dt, result_ts]:\n assert isinstance(result, Timestamp)\n assert result == expected\n\n expected_localize = expected.tz_localize(tz)\n result = Timestamp(dt, tz=tz) + offset_s\n assert isinstance(result, Timestamp)\n assert result == expected_localize\n\n def test_add_empty_datetimeindex(self, offset_types, tz_naive_fixture):\n # GH#12724, GH#30336\n offset_s = self._get_offset(offset_types)\n\n dti = DatetimeIndex([], tz=tz_naive_fixture)\n\n warn = None\n if isinstance(\n offset_s,\n (\n Easter,\n WeekOfMonth,\n LastWeekOfMonth,\n CustomBusinessDay,\n BusinessHour,\n CustomBusinessHour,\n CustomBusinessMonthBegin,\n CustomBusinessMonthEnd,\n FY5253,\n FY5253Quarter,\n ),\n ):\n # We don't have an optimized apply_index\n warn = PerformanceWarning\n\n with tm.assert_produces_warning(warn):\n result = dti + offset_s\n tm.assert_index_equal(result, dti)\n with tm.assert_produces_warning(warn):\n result = offset_s + dti\n tm.assert_index_equal(result, dti)\n\n dta = dti._data\n with tm.assert_produces_warning(warn):\n result = dta + offset_s\n tm.assert_equal(result, dta)\n with tm.assert_produces_warning(warn):\n result = offset_s + dta\n tm.assert_equal(result, dta)\n\n def test_pickle_roundtrip(self, offset_types):\n off = self._get_offset(offset_types)\n res = tm.round_trip_pickle(off)\n assert off == res\n if type(off) is not DateOffset:\n for attr in off._attributes:\n if attr == \"calendar\":\n # np.busdaycalendar __eq__ will return False;\n # we check holidays and weekmask attrs so are OK\n continue\n # Make sure nothings got lost from _params (which __eq__) is based on\n assert getattr(off, attr) == getattr(res, attr)\n\n def test_pickle_dateoffset_odd_inputs(self):\n # GH#34511\n off = DateOffset(months=12)\n res = tm.round_trip_pickle(off)\n assert off == res\n\n base_dt = datetime(2020, 1, 1)\n assert base_dt + off == base_dt + res\n\n def test_onOffset_deprecated(self, offset_types):\n # GH#30340 use idiomatic naming\n off = self._get_offset(offset_types)\n\n ts = Timestamp.now()\n with tm.assert_produces_warning(FutureWarning):\n result = off.onOffset(ts)\n\n expected = off.is_on_offset(ts)\n assert result == expected\n\n def test_isAnchored_deprecated(self, offset_types):\n # GH#30340 use idiomatic naming\n off = self._get_offset(offset_types)\n\n with tm.assert_produces_warning(FutureWarning):\n result = off.isAnchored()\n\n expected = off.is_anchored()\n assert result == expected\n\n def test_offsets_hashable(self, offset_types):\n # GH: 37267\n off = self._get_offset(offset_types)\n assert hash(off) is not None\n\n\nclass TestDateOffset(Base):\n def setup_method(self, method):\n self.d = Timestamp(datetime(2008, 1, 2))\n _offset_map.clear()\n\n def test_repr(self):\n repr(DateOffset())\n repr(DateOffset(2))\n repr(2 * DateOffset())\n repr(2 * DateOffset(months=2))\n\n def test_mul(self):\n assert DateOffset(2) == 2 * DateOffset(1)\n assert DateOffset(2) == DateOffset(1) * 2\n\n def test_constructor(self):\n\n assert (self.d + DateOffset(months=2)) == datetime(2008, 3, 2)\n assert (self.d - DateOffset(months=2)) == datetime(2007, 11, 2)\n\n assert (self.d + DateOffset(2)) == datetime(2008, 1, 4)\n\n assert not DateOffset(2).is_anchored()\n assert DateOffset(1).is_anchored()\n\n d = datetime(2008, 1, 31)\n assert (d + DateOffset(months=1)) == datetime(2008, 2, 29)\n\n def test_copy(self):\n assert DateOffset(months=2).copy() == DateOffset(months=2)\n\n def test_eq(self):\n offset1 = DateOffset(days=1)\n offset2 = DateOffset(days=365)\n\n assert offset1 != offset2\n\n\ndef test_Easter():\n assert_offset_equal(Easter(), datetime(2010, 1, 1), datetime(2010, 4, 4))\n assert_offset_equal(Easter(), datetime(2010, 4, 5), datetime(2011, 4, 24))\n assert_offset_equal(Easter(2), datetime(2010, 1, 1), datetime(2011, 4, 24))\n\n assert_offset_equal(Easter(), datetime(2010, 4, 4), datetime(2011, 4, 24))\n assert_offset_equal(Easter(2), datetime(2010, 4, 4), datetime(2012, 4, 8))\n\n assert_offset_equal(-Easter(), datetime(2011, 1, 1), datetime(2010, 4, 4))\n assert_offset_equal(-Easter(), datetime(2010, 4, 5), datetime(2010, 4, 4))\n assert_offset_equal(-Easter(2), datetime(2011, 1, 1), datetime(2009, 4, 12))\n\n assert_offset_equal(-Easter(), datetime(2010, 4, 4), datetime(2009, 4, 12))\n assert_offset_equal(-Easter(2), datetime(2010, 4, 4), datetime(2008, 3, 23))\n\n\nclass TestOffsetNames:\n def test_get_offset_name(self):\n assert BDay().freqstr == \"B\"\n assert BDay(2).freqstr == \"2B\"\n assert BMonthEnd().freqstr == \"BM\"\n assert Week(weekday=0).freqstr == \"W-MON\"\n assert Week(weekday=1).freqstr == \"W-TUE\"\n assert Week(weekday=2).freqstr == \"W-WED\"\n assert Week(weekday=3).freqstr == \"W-THU\"\n assert Week(weekday=4).freqstr == \"W-FRI\"\n\n assert LastWeekOfMonth(weekday=WeekDay.SUN).freqstr == \"LWOM-SUN\"\n\n\ndef test_get_offset():\n with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):\n _get_offset(\"gibberish\")\n with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):\n _get_offset(\"QS-JAN-B\")\n\n pairs = [\n (\"B\", BDay()),\n (\"b\", BDay()),\n (\"bm\", BMonthEnd()),\n (\"Bm\", BMonthEnd()),\n (\"W-MON\", Week(weekday=0)),\n (\"W-TUE\", Week(weekday=1)),\n (\"W-WED\", Week(weekday=2)),\n (\"W-THU\", Week(weekday=3)),\n (\"W-FRI\", Week(weekday=4)),\n ]\n\n for name, expected in pairs:\n offset = _get_offset(name)\n assert offset == expected, (\n f\"Expected {repr(name)} to yield {repr(expected)} \"\n f\"(actual: {repr(offset)})\"\n )\n\n\ndef test_get_offset_legacy():\n pairs = [(\"w@Sat\", Week(weekday=5))]\n for name, expected in pairs:\n with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):\n _get_offset(name)\n\n\nclass TestOffsetAliases:\n def setup_method(self, method):\n _offset_map.clear()\n\n def test_alias_equality(self):\n for k, v in _offset_map.items():\n if v is None:\n continue\n assert k == v.copy()\n\n def test_rule_code(self):\n lst = [\"M\", \"MS\", \"BM\", \"BMS\", \"D\", \"B\", \"H\", \"T\", \"S\", \"L\", \"U\"]\n for k in lst:\n assert k == _get_offset(k).rule_code\n # should be cached - this is kind of an internals test...\n assert k in _offset_map\n assert k == (_get_offset(k) * 3).rule_code\n\n suffix_lst = [\"MON\", \"TUE\", \"WED\", \"THU\", \"FRI\", \"SAT\", \"SUN\"]\n base = \"W\"\n for v in suffix_lst:\n alias = \"-\".join([base, v])\n assert alias == _get_offset(alias).rule_code\n assert alias == (_get_offset(alias) * 5).rule_code\n\n suffix_lst = [\n \"JAN\",\n \"FEB\",\n \"MAR\",\n \"APR\",\n \"MAY\",\n \"JUN\",\n \"JUL\",\n \"AUG\",\n \"SEP\",\n \"OCT\",\n \"NOV\",\n \"DEC\",\n ]\n base_lst = [\"A\", \"AS\", \"BA\", \"BAS\", \"Q\", \"QS\", \"BQ\", \"BQS\"]\n for base in base_lst:\n for v in suffix_lst:\n alias = \"-\".join([base, v])\n assert alias == _get_offset(alias).rule_code\n assert alias == (_get_offset(alias) * 5).rule_code\n\n\ndef test_dateoffset_misc():\n oset = offsets.DateOffset(months=2, days=4)\n # it works\n oset.freqstr\n\n assert not offsets.DateOffset(months=2) == 2\n\n\ndef test_freq_offsets():\n off = BDay(1, offset=timedelta(0, 1800))\n assert off.freqstr == \"B+30Min\"\n\n off = BDay(1, offset=timedelta(0, -1800))\n assert off.freqstr == \"B-30Min\"\n\n\nclass TestReprNames:\n def test_str_for_named_is_name(self):\n # look at all the amazing combinations!\n month_prefixes = [\"A\", \"AS\", \"BA\", \"BAS\", \"Q\", \"BQ\", \"BQS\", \"QS\"]\n names = [\n prefix + \"-\" + month\n for prefix in month_prefixes\n for month in [\n \"JAN\",\n \"FEB\",\n \"MAR\",\n \"APR\",\n \"MAY\",\n \"JUN\",\n \"JUL\",\n \"AUG\",\n \"SEP\",\n \"OCT\",\n \"NOV\",\n \"DEC\",\n ]\n ]\n days = [\"MON\", \"TUE\", \"WED\", \"THU\", \"FRI\", \"SAT\", \"SUN\"]\n names += [\"W-\" + day for day in days]\n names += [\"WOM-\" + week + day for week in (\"1\", \"2\", \"3\", \"4\") for day in days]\n _offset_map.clear()\n for name in names:\n offset = _get_offset(name)\n assert offset.freqstr == name\n\n\ndef get_utc_offset_hours(ts):\n # take a Timestamp and compute total hours of utc offset\n o = ts.utcoffset()\n return (o.days * 24 * 3600 + o.seconds) / 3600.0\n\n\n# ---------------------------------------------------------------------\n\n\ndef test_valid_default_arguments(offset_types):\n # GH#19142 check that the calling the constructors without passing\n # any keyword arguments produce valid offsets\n cls = offset_types\n cls()\n\n\[email protected](\"kwd\", sorted(liboffsets._relativedelta_kwds))\ndef test_valid_month_attributes(kwd, month_classes):\n # GH#18226\n cls = month_classes\n # check that we cannot create e.g. MonthEnd(weeks=3)\n msg = rf\"__init__\\(\\) got an unexpected keyword argument '{kwd}'\"\n with pytest.raises(TypeError, match=msg):\n cls(**{kwd: 3})\n\n\ndef test_month_offset_name(month_classes):\n # GH#33757 off.name with n != 1 should not raise AttributeError\n obj = month_classes(1)\n obj2 = month_classes(2)\n assert obj2.name == obj.name\n\n\[email protected](\"kwd\", sorted(liboffsets._relativedelta_kwds))\ndef test_valid_relativedelta_kwargs(kwd):\n # Check that all the arguments specified in liboffsets._relativedelta_kwds\n # are in fact valid relativedelta keyword args\n DateOffset(**{kwd: 1})\n\n\[email protected](\"kwd\", sorted(liboffsets._relativedelta_kwds))\ndef test_valid_tick_attributes(kwd, tick_classes):\n # GH#18226\n cls = tick_classes\n # check that we cannot create e.g. Hour(weeks=3)\n msg = rf\"__init__\\(\\) got an unexpected keyword argument '{kwd}'\"\n with pytest.raises(TypeError, match=msg):\n cls(**{kwd: 3})\n\n\ndef test_validate_n_error():\n with pytest.raises(TypeError, match=\"argument must be an integer\"):\n DateOffset(n=\"Doh!\")\n\n with pytest.raises(TypeError, match=\"argument must be an integer\"):\n MonthBegin(n=timedelta(1))\n\n with pytest.raises(TypeError, match=\"argument must be an integer\"):\n BDay(n=np.array([1, 2], dtype=np.int64))\n\n\ndef test_require_integers(offset_types):\n cls = offset_types\n with pytest.raises(ValueError, match=\"argument must be an integer\"):\n cls(n=1.5)\n\n\ndef test_tick_normalize_raises(tick_classes):\n # check that trying to create a Tick object with normalize=True raises\n # GH#21427\n cls = tick_classes\n msg = \"Tick offset with `normalize=True` are not allowed.\"\n with pytest.raises(ValueError, match=msg):\n cls(n=3, normalize=True)\n\n\ndef test_weeks_onoffset():\n # GH#18510 Week with weekday = None, normalize = False should always\n # be is_on_offset\n offset = Week(n=2, weekday=None)\n ts = Timestamp(\"1862-01-13 09:03:34.873477378+0210\", tz=\"Africa/Lusaka\")\n fast = offset.is_on_offset(ts)\n slow = (ts + offset) - offset == ts\n assert fast == slow\n\n # negative n\n offset = Week(n=2, weekday=None)\n ts = Timestamp(\"1856-10-24 16:18:36.556360110-0717\", tz=\"Pacific/Easter\")\n fast = offset.is_on_offset(ts)\n slow = (ts + offset) - offset == ts\n assert fast == slow\n\n\ndef test_weekofmonth_onoffset():\n # GH#18864\n # Make sure that nanoseconds don't trip up is_on_offset (and with it apply)\n offset = WeekOfMonth(n=2, week=2, weekday=0)\n ts = Timestamp(\"1916-05-15 01:14:49.583410462+0422\", tz=\"Asia/Qyzylorda\")\n fast = offset.is_on_offset(ts)\n slow = (ts + offset) - offset == ts\n assert fast == slow\n\n # negative n\n offset = WeekOfMonth(n=-3, week=1, weekday=0)\n ts = Timestamp(\"1980-12-08 03:38:52.878321185+0500\", tz=\"Asia/Oral\")\n fast = offset.is_on_offset(ts)\n slow = (ts + offset) - offset == ts\n assert fast == slow\n\n\ndef test_last_week_of_month_on_offset():\n # GH#19036, GH#18977 _adjust_dst was incorrect for LastWeekOfMonth\n offset = LastWeekOfMonth(n=4, weekday=6)\n ts = Timestamp(\"1917-05-27 20:55:27.084284178+0200\", tz=\"Europe/Warsaw\")\n slow = (ts + offset) - offset == ts\n fast = offset.is_on_offset(ts)\n assert fast == slow\n\n # negative n\n offset = LastWeekOfMonth(n=-4, weekday=5)\n ts = Timestamp(\"2005-08-27 05:01:42.799392561-0500\", tz=\"America/Rainy_River\")\n slow = (ts + offset) - offset == ts\n fast = offset.is_on_offset(ts)\n assert fast == slow\n\n\ndef test_week_add_invalid():\n # Week with weekday should raise TypeError and _not_ AttributeError\n # when adding invalid offset\n offset = Week(weekday=1)\n other = Day()\n with pytest.raises(TypeError, match=\"Cannot add\"):\n offset + other\n\n\[email protected](\n \"attribute\",\n [\n \"hours\",\n \"days\",\n \"weeks\",\n \"months\",\n \"years\",\n ],\n)\ndef test_dateoffset_immutable(attribute):\n offset = DateOffset(**{attribute: 0})\n msg = \"DateOffset objects are immutable\"\n with pytest.raises(AttributeError, match=msg):\n setattr(offset, attribute, 5)\n\n\[email protected](\n \"weekmask, expected_time, mult\",\n [\n [\"Mon Tue Wed Thu Fri Sat\", \"2018-11-10 09:00:00\", 10],\n [\"Tue Wed Thu Fri Sat\", \"2018-11-13 08:00:00\", 18],\n ],\n)\ndef test_custom_businesshour_weekmask_and_holidays(weekmask, expected_time, mult):\n # GH 23542\n holidays = [\"2018-11-09\"]\n bh = CustomBusinessHour(\n start=\"08:00\", end=\"17:00\", weekmask=weekmask, holidays=holidays\n )\n result = Timestamp(\"2018-11-08 08:00\") + mult * bh\n expected = Timestamp(expected_time)\n assert result == expected\n",
"import numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.common import is_integer_dtype\n\nimport pandas as pd\nimport pandas._testing as tm\n\narrays = [pd.array([1, 2, 3, None], dtype=dtype) for dtype in tm.ALL_EA_INT_DTYPES]\narrays += [\n pd.array([0.141, -0.268, 5.895, None], dtype=dtype) for dtype in tm.FLOAT_EA_DTYPES\n]\n\n\[email protected](params=arrays, ids=[a.dtype.name for a in arrays])\ndef data(request):\n return request.param\n\n\[email protected]()\ndef numpy_dtype(data):\n # For integer dtype, the numpy conversion must be done to float\n if is_integer_dtype(data):\n numpy_dtype = float\n else:\n numpy_dtype = data.dtype.type\n return numpy_dtype\n\n\ndef test_round(data, numpy_dtype):\n # No arguments\n result = data.round()\n expected = pd.array(\n np.round(data.to_numpy(dtype=numpy_dtype, na_value=None)), dtype=data.dtype\n )\n tm.assert_extension_array_equal(result, expected)\n\n # Decimals argument\n result = data.round(decimals=2)\n expected = pd.array(\n np.round(data.to_numpy(dtype=numpy_dtype, na_value=None), decimals=2),\n dtype=data.dtype,\n )\n tm.assert_extension_array_equal(result, expected)\n",
"from pathlib import Path\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import Timestamp\nfrom pandas.compat import is_platform_windows\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n HDFStore,\n Index,\n Series,\n _testing as tm,\n read_hdf,\n)\nfrom pandas.tests.io.pytables.common import (\n _maybe_remove,\n ensure_clean_path,\n ensure_clean_store,\n)\nfrom pandas.util import _test_decorators as td\n\nfrom pandas.io.pytables import TableIterator\n\npytestmark = pytest.mark.single\n\n\ndef test_read_missing_key_close_store(setup_path):\n # GH 25766\n with ensure_clean_path(setup_path) as path:\n df = DataFrame({\"a\": range(2), \"b\": range(2)})\n df.to_hdf(path, \"k1\")\n\n with pytest.raises(KeyError, match=\"'No object named k2 in the file'\"):\n read_hdf(path, \"k2\")\n\n # smoke test to test that file is properly closed after\n # read with KeyError before another write\n df.to_hdf(path, \"k2\")\n\n\ndef test_read_missing_key_opened_store(setup_path):\n # GH 28699\n with ensure_clean_path(setup_path) as path:\n df = DataFrame({\"a\": range(2), \"b\": range(2)})\n df.to_hdf(path, \"k1\")\n\n with HDFStore(path, \"r\") as store:\n\n with pytest.raises(KeyError, match=\"'No object named k2 in the file'\"):\n read_hdf(store, \"k2\")\n\n # Test that the file is still open after a KeyError and that we can\n # still read from it.\n read_hdf(store, \"k1\")\n\n\ndef test_read_column(setup_path):\n\n df = tm.makeTimeDataFrame()\n\n with ensure_clean_store(setup_path) as store:\n _maybe_remove(store, \"df\")\n\n # GH 17912\n # HDFStore.select_column should raise a KeyError\n # exception if the key is not a valid store\n with pytest.raises(KeyError, match=\"No object named df in the file\"):\n store.select_column(\"df\", \"index\")\n\n store.append(\"df\", df)\n # error\n with pytest.raises(\n KeyError, match=re.escape(\"'column [foo] not found in the table'\")\n ):\n store.select_column(\"df\", \"foo\")\n\n msg = re.escape(\"select_column() got an unexpected keyword argument 'where'\")\n with pytest.raises(TypeError, match=msg):\n store.select_column(\"df\", \"index\", where=[\"index>5\"])\n\n # valid\n result = store.select_column(\"df\", \"index\")\n tm.assert_almost_equal(result.values, Series(df.index).values)\n assert isinstance(result, Series)\n\n # not a data indexable column\n msg = re.escape(\n \"column [values_block_0] can not be extracted individually; \"\n \"it is not data indexable\"\n )\n with pytest.raises(ValueError, match=msg):\n store.select_column(\"df\", \"values_block_0\")\n\n # a data column\n df2 = df.copy()\n df2[\"string\"] = \"foo\"\n store.append(\"df2\", df2, data_columns=[\"string\"])\n result = store.select_column(\"df2\", \"string\")\n tm.assert_almost_equal(result.values, df2[\"string\"].values)\n\n # a data column with NaNs, result excludes the NaNs\n df3 = df.copy()\n df3[\"string\"] = \"foo\"\n df3.loc[df3.index[4:6], \"string\"] = np.nan\n store.append(\"df3\", df3, data_columns=[\"string\"])\n result = store.select_column(\"df3\", \"string\")\n tm.assert_almost_equal(result.values, df3[\"string\"].values)\n\n # start/stop\n result = store.select_column(\"df3\", \"string\", start=2)\n tm.assert_almost_equal(result.values, df3[\"string\"].values[2:])\n\n result = store.select_column(\"df3\", \"string\", start=-2)\n tm.assert_almost_equal(result.values, df3[\"string\"].values[-2:])\n\n result = store.select_column(\"df3\", \"string\", stop=2)\n tm.assert_almost_equal(result.values, df3[\"string\"].values[:2])\n\n result = store.select_column(\"df3\", \"string\", stop=-2)\n tm.assert_almost_equal(result.values, df3[\"string\"].values[:-2])\n\n result = store.select_column(\"df3\", \"string\", start=2, stop=-2)\n tm.assert_almost_equal(result.values, df3[\"string\"].values[2:-2])\n\n result = store.select_column(\"df3\", \"string\", start=-2, stop=2)\n tm.assert_almost_equal(result.values, df3[\"string\"].values[-2:2])\n\n # GH 10392 - make sure column name is preserved\n df4 = DataFrame({\"A\": np.random.randn(10), \"B\": \"foo\"})\n store.append(\"df4\", df4, data_columns=True)\n expected = df4[\"B\"]\n result = store.select_column(\"df4\", \"B\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_pytables_native_read(datapath, setup_path):\n with ensure_clean_store(\n datapath(\"io\", \"data\", \"legacy_hdf/pytables_native.h5\"), mode=\"r\"\n ) as store:\n d2 = store[\"detector/readout\"]\n assert isinstance(d2, DataFrame)\n\n\[email protected](is_platform_windows(), reason=\"native2 read fails oddly on windows\")\ndef test_pytables_native2_read(datapath, setup_path):\n with ensure_clean_store(\n datapath(\"io\", \"data\", \"legacy_hdf\", \"pytables_native2.h5\"), mode=\"r\"\n ) as store:\n str(store)\n d1 = store[\"detector\"]\n assert isinstance(d1, DataFrame)\n\n\ndef test_legacy_table_fixed_format_read_py2(datapath, setup_path):\n # GH 24510\n # legacy table with fixed format written in Python 2\n with ensure_clean_store(\n datapath(\"io\", \"data\", \"legacy_hdf\", \"legacy_table_fixed_py2.h5\"), mode=\"r\"\n ) as store:\n result = store.select(\"df\")\n expected = DataFrame(\n [[1, 2, 3, \"D\"]],\n columns=[\"A\", \"B\", \"C\", \"D\"],\n index=Index([\"ABC\"], name=\"INDEX_NAME\"),\n )\n tm.assert_frame_equal(expected, result)\n\n\ndef test_legacy_table_fixed_format_read_datetime_py2(datapath, setup_path):\n # GH 31750\n # legacy table with fixed format and datetime64 column written in Python 2\n with ensure_clean_store(\n datapath(\"io\", \"data\", \"legacy_hdf\", \"legacy_table_fixed_datetime_py2.h5\"),\n mode=\"r\",\n ) as store:\n result = store.select(\"df\")\n expected = DataFrame(\n [[Timestamp(\"2020-02-06T18:00\")]],\n columns=[\"A\"],\n index=Index([\"date\"]),\n )\n tm.assert_frame_equal(expected, result)\n\n\ndef test_legacy_table_read_py2(datapath, setup_path):\n # issue: 24925\n # legacy table written in Python 2\n with ensure_clean_store(\n datapath(\"io\", \"data\", \"legacy_hdf\", \"legacy_table_py2.h5\"), mode=\"r\"\n ) as store:\n result = store.select(\"table\")\n\n expected = DataFrame({\"a\": [\"a\", \"b\"], \"b\": [2, 3]})\n tm.assert_frame_equal(expected, result)\n\n\ndef test_read_hdf_open_store(setup_path):\n # GH10330\n # No check for non-string path_or-buf, and no test of open store\n df = DataFrame(np.random.rand(4, 5), index=list(\"abcd\"), columns=list(\"ABCDE\"))\n df.index.name = \"letters\"\n df = df.set_index(keys=\"E\", append=True)\n\n with ensure_clean_path(setup_path) as path:\n df.to_hdf(path, \"df\", mode=\"w\")\n direct = read_hdf(path, \"df\")\n store = HDFStore(path, mode=\"r\")\n indirect = read_hdf(store, \"df\")\n tm.assert_frame_equal(direct, indirect)\n assert store.is_open\n store.close()\n\n\ndef test_read_hdf_iterator(setup_path):\n df = DataFrame(np.random.rand(4, 5), index=list(\"abcd\"), columns=list(\"ABCDE\"))\n df.index.name = \"letters\"\n df = df.set_index(keys=\"E\", append=True)\n\n with ensure_clean_path(setup_path) as path:\n df.to_hdf(path, \"df\", mode=\"w\", format=\"t\")\n direct = read_hdf(path, \"df\")\n iterator = read_hdf(path, \"df\", iterator=True)\n assert isinstance(iterator, TableIterator)\n indirect = next(iterator.__iter__())\n tm.assert_frame_equal(direct, indirect)\n iterator.store.close()\n\n\ndef test_read_nokey(setup_path):\n # GH10443\n df = DataFrame(np.random.rand(4, 5), index=list(\"abcd\"), columns=list(\"ABCDE\"))\n\n # Categorical dtype not supported for \"fixed\" format. So no need\n # to test with that dtype in the dataframe here.\n with ensure_clean_path(setup_path) as path:\n df.to_hdf(path, \"df\", mode=\"a\")\n reread = read_hdf(path)\n tm.assert_frame_equal(df, reread)\n df.to_hdf(path, \"df2\", mode=\"a\")\n\n msg = \"key must be provided when HDF5 file contains multiple datasets.\"\n with pytest.raises(ValueError, match=msg):\n read_hdf(path)\n\n\ndef test_read_nokey_table(setup_path):\n # GH13231\n df = DataFrame({\"i\": range(5), \"c\": Series(list(\"abacd\"), dtype=\"category\")})\n\n with ensure_clean_path(setup_path) as path:\n df.to_hdf(path, \"df\", mode=\"a\", format=\"table\")\n reread = read_hdf(path)\n tm.assert_frame_equal(df, reread)\n df.to_hdf(path, \"df2\", mode=\"a\", format=\"table\")\n\n msg = \"key must be provided when HDF5 file contains multiple datasets.\"\n with pytest.raises(ValueError, match=msg):\n read_hdf(path)\n\n\ndef test_read_nokey_empty(setup_path):\n with ensure_clean_path(setup_path) as path:\n store = HDFStore(path)\n store.close()\n msg = re.escape(\n \"Dataset(s) incompatible with Pandas data types, not table, or no \"\n \"datasets found in HDF5 file.\"\n )\n with pytest.raises(ValueError, match=msg):\n read_hdf(path)\n\n\ndef test_read_from_pathlib_path(setup_path):\n\n # GH11773\n expected = DataFrame(\n np.random.rand(4, 5), index=list(\"abcd\"), columns=list(\"ABCDE\")\n )\n with ensure_clean_path(setup_path) as filename:\n path_obj = Path(filename)\n\n expected.to_hdf(path_obj, \"df\", mode=\"a\")\n actual = read_hdf(path_obj, \"df\")\n\n tm.assert_frame_equal(expected, actual)\n\n\[email protected]_if_no(\"py.path\")\ndef test_read_from_py_localpath(setup_path):\n\n # GH11773\n from py.path import local as LocalPath\n\n expected = DataFrame(\n np.random.rand(4, 5), index=list(\"abcd\"), columns=list(\"ABCDE\")\n )\n with ensure_clean_path(setup_path) as filename:\n path_obj = LocalPath(filename)\n\n expected.to_hdf(path_obj, \"df\", mode=\"a\")\n actual = read_hdf(path_obj, \"df\")\n\n tm.assert_frame_equal(expected, actual)\n\n\[email protected](\"format\", [\"fixed\", \"table\"])\ndef test_read_hdf_series_mode_r(format, setup_path):\n # GH 16583\n # Tests that reading a Series saved to an HDF file\n # still works if a mode='r' argument is supplied\n series = tm.makeFloatSeries()\n with ensure_clean_path(setup_path) as path:\n series.to_hdf(path, key=\"data\", format=format)\n result = read_hdf(path, key=\"data\", mode=\"r\")\n tm.assert_series_equal(result, series)\n\n\ndef test_read_py2_hdf_file_in_py3(datapath):\n # GH 16781\n\n # tests reading a PeriodIndex DataFrame written in Python2 in Python3\n\n # the file was generated in Python 2.7 like so:\n #\n # df = DataFrame([1.,2,3], index=pd.PeriodIndex(\n # ['2015-01-01', '2015-01-02', '2015-01-05'], freq='B'))\n # df.to_hdf('periodindex_0.20.1_x86_64_darwin_2.7.13.h5', 'p')\n\n expected = DataFrame(\n [1.0, 2, 3],\n index=pd.PeriodIndex([\"2015-01-01\", \"2015-01-02\", \"2015-01-05\"], freq=\"B\"),\n )\n\n with ensure_clean_store(\n datapath(\n \"io\", \"data\", \"legacy_hdf\", \"periodindex_0.20.1_x86_64_darwin_2.7.13.h5\"\n ),\n mode=\"r\",\n ) as store:\n result = store[\"p\"]\n tm.assert_frame_equal(result, expected)\n",
"import operator\nimport re\nimport warnings\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.sparse import IntIndex\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import isna\nimport pandas._testing as tm\nfrom pandas.core.arrays.sparse import (\n SparseArray,\n SparseDtype,\n)\n\n\nclass TestSparseArray:\n def setup_method(self, method):\n self.arr_data = np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6])\n self.arr = SparseArray(self.arr_data)\n self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)\n\n def test_constructor_dtype(self):\n arr = SparseArray([np.nan, 1, 2, np.nan])\n assert arr.dtype == SparseDtype(np.float64, np.nan)\n assert arr.dtype.subtype == np.float64\n assert np.isnan(arr.fill_value)\n\n arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)\n assert arr.dtype == SparseDtype(np.float64, 0)\n assert arr.fill_value == 0\n\n arr = SparseArray([0, 1, 2, 4], dtype=np.float64)\n assert arr.dtype == SparseDtype(np.float64, np.nan)\n assert np.isnan(arr.fill_value)\n\n arr = SparseArray([0, 1, 2, 4], dtype=np.int64)\n assert arr.dtype == SparseDtype(np.int64, 0)\n assert arr.fill_value == 0\n\n arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)\n assert arr.dtype == SparseDtype(np.int64, 0)\n assert arr.fill_value == 0\n\n arr = SparseArray([0, 1, 2, 4], dtype=None)\n assert arr.dtype == SparseDtype(np.int64, 0)\n assert arr.fill_value == 0\n\n arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)\n assert arr.dtype == SparseDtype(np.int64, 0)\n assert arr.fill_value == 0\n\n def test_constructor_dtype_str(self):\n result = SparseArray([1, 2, 3], dtype=\"int\")\n expected = SparseArray([1, 2, 3], dtype=int)\n tm.assert_sp_array_equal(result, expected)\n\n def test_constructor_sparse_dtype(self):\n result = SparseArray([1, 0, 0, 1], dtype=SparseDtype(\"int64\", -1))\n expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)\n tm.assert_sp_array_equal(result, expected)\n assert result.sp_values.dtype == np.dtype(\"int64\")\n\n def test_constructor_sparse_dtype_str(self):\n result = SparseArray([1, 0, 0, 1], dtype=\"Sparse[int32]\")\n expected = SparseArray([1, 0, 0, 1], dtype=np.int32)\n tm.assert_sp_array_equal(result, expected)\n assert result.sp_values.dtype == np.dtype(\"int32\")\n\n def test_constructor_object_dtype(self):\n # GH 11856\n arr = SparseArray([\"A\", \"A\", np.nan, \"B\"], dtype=object)\n assert arr.dtype == SparseDtype(object)\n assert np.isnan(arr.fill_value)\n\n arr = SparseArray([\"A\", \"A\", np.nan, \"B\"], dtype=object, fill_value=\"A\")\n assert arr.dtype == SparseDtype(object, \"A\")\n assert arr.fill_value == \"A\"\n\n # GH 17574\n data = [False, 0, 100.0, 0.0]\n arr = SparseArray(data, dtype=object, fill_value=False)\n assert arr.dtype == SparseDtype(object, False)\n assert arr.fill_value is False\n arr_expected = np.array(data, dtype=object)\n it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))\n assert np.fromiter(it, dtype=np.bool_).all()\n\n @pytest.mark.parametrize(\"dtype\", [SparseDtype(int, 0), int])\n def test_constructor_na_dtype(self, dtype):\n with pytest.raises(ValueError, match=\"Cannot convert\"):\n SparseArray([0, 1, np.nan], dtype=dtype)\n\n def test_constructor_warns_when_losing_timezone(self):\n # GH#32501 warn when losing timezone information\n dti = pd.date_range(\"2016-01-01\", periods=3, tz=\"US/Pacific\")\n\n expected = SparseArray(np.asarray(dti, dtype=\"datetime64[ns]\"))\n\n with tm.assert_produces_warning(UserWarning):\n result = SparseArray(dti)\n\n tm.assert_sp_array_equal(result, expected)\n\n with tm.assert_produces_warning(UserWarning):\n result = SparseArray(pd.Series(dti))\n\n tm.assert_sp_array_equal(result, expected)\n\n def test_constructor_spindex_dtype(self):\n arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))\n # XXX: Behavior change: specifying SparseIndex no longer changes the\n # fill_value\n expected = SparseArray([0, 1, 2, 0], kind=\"integer\")\n tm.assert_sp_array_equal(arr, expected)\n assert arr.dtype == SparseDtype(np.int64)\n assert arr.fill_value == 0\n\n arr = SparseArray(\n data=[1, 2, 3],\n sparse_index=IntIndex(4, [1, 2, 3]),\n dtype=np.int64,\n fill_value=0,\n )\n exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)\n tm.assert_sp_array_equal(arr, exp)\n assert arr.dtype == SparseDtype(np.int64)\n assert arr.fill_value == 0\n\n arr = SparseArray(\n data=[1, 2], sparse_index=IntIndex(4, [1, 2]), fill_value=0, dtype=np.int64\n )\n exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)\n tm.assert_sp_array_equal(arr, exp)\n assert arr.dtype == SparseDtype(np.int64)\n assert arr.fill_value == 0\n\n arr = SparseArray(\n data=[1, 2, 3],\n sparse_index=IntIndex(4, [1, 2, 3]),\n dtype=None,\n fill_value=0,\n )\n exp = SparseArray([0, 1, 2, 3], dtype=None)\n tm.assert_sp_array_equal(arr, exp)\n assert arr.dtype == SparseDtype(np.int64)\n assert arr.fill_value == 0\n\n @pytest.mark.parametrize(\"sparse_index\", [None, IntIndex(1, [0])])\n def test_constructor_spindex_dtype_scalar(self, sparse_index):\n # scalar input\n arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)\n exp = SparseArray([1], dtype=None)\n tm.assert_sp_array_equal(arr, exp)\n assert arr.dtype == SparseDtype(np.int64)\n assert arr.fill_value == 0\n\n arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)\n exp = SparseArray([1], dtype=None)\n tm.assert_sp_array_equal(arr, exp)\n assert arr.dtype == SparseDtype(np.int64)\n assert arr.fill_value == 0\n\n def test_constructor_spindex_dtype_scalar_broadcasts(self):\n arr = SparseArray(\n data=[1, 2], sparse_index=IntIndex(4, [1, 2]), fill_value=0, dtype=None\n )\n exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)\n tm.assert_sp_array_equal(arr, exp)\n assert arr.dtype == SparseDtype(np.int64)\n assert arr.fill_value == 0\n\n @pytest.mark.parametrize(\n \"data, fill_value\",\n [\n (np.array([1, 2]), 0),\n (np.array([1.0, 2.0]), np.nan),\n ([True, False], False),\n ([pd.Timestamp(\"2017-01-01\")], pd.NaT),\n ],\n )\n def test_constructor_inferred_fill_value(self, data, fill_value):\n result = SparseArray(data).fill_value\n\n if isna(fill_value):\n assert isna(result)\n else:\n assert result == fill_value\n\n @pytest.mark.parametrize(\"format\", [\"coo\", \"csc\", \"csr\"])\n @pytest.mark.parametrize(\"size\", [0, 10])\n @td.skip_if_no_scipy\n def test_from_spmatrix(self, size, format):\n import scipy.sparse\n\n mat = scipy.sparse.random(size, 1, density=0.5, format=format)\n result = SparseArray.from_spmatrix(mat)\n\n result = np.asarray(result)\n expected = mat.toarray().ravel()\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\"format\", [\"coo\", \"csc\", \"csr\"])\n @td.skip_if_no_scipy\n def test_from_spmatrix_including_explicit_zero(self, format):\n import scipy.sparse\n\n mat = scipy.sparse.random(10, 1, density=0.5, format=format)\n mat.data[0] = 0\n result = SparseArray.from_spmatrix(mat)\n\n result = np.asarray(result)\n expected = mat.toarray().ravel()\n tm.assert_numpy_array_equal(result, expected)\n\n @td.skip_if_no_scipy\n def test_from_spmatrix_raises(self):\n import scipy.sparse\n\n mat = scipy.sparse.eye(5, 4, format=\"csc\")\n\n with pytest.raises(ValueError, match=\"not '4'\"):\n SparseArray.from_spmatrix(mat)\n\n @pytest.mark.parametrize(\n \"scalar,dtype\",\n [\n (False, SparseDtype(bool, False)),\n (0.0, SparseDtype(\"float64\", 0)),\n (1, SparseDtype(\"int64\", 1)),\n (\"z\", SparseDtype(\"object\", \"z\")),\n ],\n )\n def test_scalar_with_index_infer_dtype(self, scalar, dtype):\n # GH 19163\n arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar)\n exp = SparseArray([scalar, scalar, scalar], fill_value=scalar)\n\n tm.assert_sp_array_equal(arr, exp)\n\n assert arr.dtype == dtype\n assert exp.dtype == dtype\n\n def test_get_item(self):\n\n assert np.isnan(self.arr[1])\n assert self.arr[2] == 1\n assert self.arr[7] == 5\n\n assert self.zarr[0] == 0\n assert self.zarr[2] == 1\n assert self.zarr[7] == 5\n\n errmsg = re.compile(\"bounds\")\n\n with pytest.raises(IndexError, match=errmsg):\n self.arr[11]\n\n with pytest.raises(IndexError, match=errmsg):\n self.arr[-11]\n\n assert self.arr[-1] == self.arr[len(self.arr) - 1]\n\n def test_take_scalar_raises(self):\n msg = \"'indices' must be an array, not a scalar '2'.\"\n with pytest.raises(ValueError, match=msg):\n self.arr.take(2)\n\n def test_take(self):\n exp = SparseArray(np.take(self.arr_data, [2, 3]))\n tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)\n\n exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))\n tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)\n\n def test_take_all_empty(self):\n a = pd.array([0, 0], dtype=SparseDtype(\"int64\"))\n result = a.take([0, 1], allow_fill=True, fill_value=np.nan)\n tm.assert_sp_array_equal(a, result)\n\n def test_take_fill_value(self):\n data = np.array([1, np.nan, 0, 3, 0])\n sparse = SparseArray(data, fill_value=0)\n\n exp = SparseArray(np.take(data, [0]), fill_value=0)\n tm.assert_sp_array_equal(sparse.take([0]), exp)\n\n exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)\n tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)\n\n def test_take_negative(self):\n exp = SparseArray(np.take(self.arr_data, [-1]))\n tm.assert_sp_array_equal(self.arr.take([-1]), exp)\n\n exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))\n tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)\n\n @pytest.mark.parametrize(\"fill_value\", [0, None, np.nan])\n def test_shift_fill_value(self, fill_value):\n # GH #24128\n sparse = SparseArray(np.array([1, 0, 0, 3, 0]), fill_value=8.0)\n res = sparse.shift(1, fill_value=fill_value)\n if isna(fill_value):\n fill_value = res.dtype.na_value\n exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]), fill_value=8.0)\n tm.assert_sp_array_equal(res, exp)\n\n def test_bad_take(self):\n with pytest.raises(IndexError, match=\"bounds\"):\n self.arr.take([11])\n\n def test_take_filling(self):\n # similar tests as GH 12631\n sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])\n result = sparse.take(np.array([1, 0, -1]))\n expected = SparseArray([np.nan, np.nan, 4])\n tm.assert_sp_array_equal(result, expected)\n\n # XXX: test change: fill_value=True -> allow_fill=True\n result = sparse.take(np.array([1, 0, -1]), allow_fill=True)\n expected = SparseArray([np.nan, np.nan, np.nan])\n tm.assert_sp_array_equal(result, expected)\n\n # allow_fill=False\n result = sparse.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)\n expected = SparseArray([np.nan, np.nan, 4])\n tm.assert_sp_array_equal(result, expected)\n\n msg = \"Invalid value in 'indices'\"\n with pytest.raises(ValueError, match=msg):\n sparse.take(np.array([1, 0, -2]), allow_fill=True)\n\n with pytest.raises(ValueError, match=msg):\n sparse.take(np.array([1, 0, -5]), allow_fill=True)\n\n msg = \"out of bounds value in 'indices'\"\n with pytest.raises(IndexError, match=msg):\n sparse.take(np.array([1, -6]))\n with pytest.raises(IndexError, match=msg):\n sparse.take(np.array([1, 5]))\n with pytest.raises(IndexError, match=msg):\n sparse.take(np.array([1, 5]), allow_fill=True)\n\n def test_take_filling_fill_value(self):\n # same tests as GH 12631\n sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)\n result = sparse.take(np.array([1, 0, -1]))\n expected = SparseArray([0, np.nan, 4], fill_value=0)\n tm.assert_sp_array_equal(result, expected)\n\n # fill_value\n result = sparse.take(np.array([1, 0, -1]), allow_fill=True)\n # XXX: behavior change.\n # the old way of filling self.fill_value doesn't follow EA rules.\n # It's supposed to be self.dtype.na_value (nan in this case)\n expected = SparseArray([0, np.nan, np.nan], fill_value=0)\n tm.assert_sp_array_equal(result, expected)\n\n # allow_fill=False\n result = sparse.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)\n expected = SparseArray([0, np.nan, 4], fill_value=0)\n tm.assert_sp_array_equal(result, expected)\n\n msg = \"Invalid value in 'indices'.\"\n with pytest.raises(ValueError, match=msg):\n sparse.take(np.array([1, 0, -2]), allow_fill=True)\n with pytest.raises(ValueError, match=msg):\n sparse.take(np.array([1, 0, -5]), allow_fill=True)\n\n msg = \"out of bounds value in 'indices'\"\n with pytest.raises(IndexError, match=msg):\n sparse.take(np.array([1, -6]))\n with pytest.raises(IndexError, match=msg):\n sparse.take(np.array([1, 5]))\n with pytest.raises(IndexError, match=msg):\n sparse.take(np.array([1, 5]), fill_value=True)\n\n def test_take_filling_all_nan(self):\n sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])\n # XXX: did the default kind from take change?\n result = sparse.take(np.array([1, 0, -1]))\n expected = SparseArray([np.nan, np.nan, np.nan], kind=\"block\")\n tm.assert_sp_array_equal(result, expected)\n\n result = sparse.take(np.array([1, 0, -1]), fill_value=True)\n expected = SparseArray([np.nan, np.nan, np.nan], kind=\"block\")\n tm.assert_sp_array_equal(result, expected)\n\n msg = \"out of bounds value in 'indices'\"\n with pytest.raises(IndexError, match=msg):\n sparse.take(np.array([1, -6]))\n with pytest.raises(IndexError, match=msg):\n sparse.take(np.array([1, 5]))\n with pytest.raises(IndexError, match=msg):\n sparse.take(np.array([1, 5]), fill_value=True)\n\n def test_set_item(self):\n def setitem():\n self.arr[5] = 3\n\n def setslice():\n self.arr[1:5] = 2\n\n with pytest.raises(TypeError, match=\"assignment via setitem\"):\n setitem()\n\n with pytest.raises(TypeError, match=\"assignment via setitem\"):\n setslice()\n\n def test_constructor_from_too_large_array(self):\n with pytest.raises(TypeError, match=\"expected dimension <= 1 data\"):\n SparseArray(np.arange(10).reshape((2, 5)))\n\n def test_constructor_from_sparse(self):\n res = SparseArray(self.zarr)\n assert res.fill_value == 0\n tm.assert_almost_equal(res.sp_values, self.zarr.sp_values)\n\n def test_constructor_copy(self):\n cp = SparseArray(self.arr, copy=True)\n cp.sp_values[:3] = 0\n assert not (self.arr.sp_values[:3] == 0).any()\n\n not_copy = SparseArray(self.arr)\n not_copy.sp_values[:3] = 0\n assert (self.arr.sp_values[:3] == 0).all()\n\n def test_constructor_bool(self):\n # GH 10648\n data = np.array([False, False, True, True, False, False])\n arr = SparseArray(data, fill_value=False, dtype=bool)\n\n assert arr.dtype == SparseDtype(bool)\n tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))\n # Behavior change: np.asarray densifies.\n # tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))\n tm.assert_numpy_array_equal(arr.sp_index.indices, np.array([2, 3], np.int32))\n\n dense = arr.to_dense()\n assert dense.dtype == bool\n tm.assert_numpy_array_equal(dense, data)\n\n def test_constructor_bool_fill_value(self):\n arr = SparseArray([True, False, True], dtype=None)\n assert arr.dtype == SparseDtype(np.bool_)\n assert not arr.fill_value\n\n arr = SparseArray([True, False, True], dtype=np.bool_)\n assert arr.dtype == SparseDtype(np.bool_)\n assert not arr.fill_value\n\n arr = SparseArray([True, False, True], dtype=np.bool_, fill_value=True)\n assert arr.dtype == SparseDtype(np.bool_, True)\n assert arr.fill_value\n\n def test_constructor_float32(self):\n # GH 10648\n data = np.array([1.0, np.nan, 3], dtype=np.float32)\n arr = SparseArray(data, dtype=np.float32)\n\n assert arr.dtype == SparseDtype(np.float32)\n tm.assert_numpy_array_equal(arr.sp_values, np.array([1, 3], dtype=np.float32))\n # Behavior change: np.asarray densifies.\n # tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))\n tm.assert_numpy_array_equal(\n arr.sp_index.indices, np.array([0, 2], dtype=np.int32)\n )\n\n dense = arr.to_dense()\n assert dense.dtype == np.float32\n tm.assert_numpy_array_equal(dense, data)\n\n def test_astype(self):\n # float -> float\n arr = SparseArray([None, None, 0, 2])\n result = arr.astype(\"Sparse[float32]\")\n expected = SparseArray([None, None, 0, 2], dtype=np.dtype(\"float32\"))\n tm.assert_sp_array_equal(result, expected)\n\n dtype = SparseDtype(\"float64\", fill_value=0)\n result = arr.astype(dtype)\n expected = SparseArray._simple_new(\n np.array([0.0, 2.0], dtype=dtype.subtype), IntIndex(4, [2, 3]), dtype\n )\n tm.assert_sp_array_equal(result, expected)\n\n dtype = SparseDtype(\"int64\", 0)\n result = arr.astype(dtype)\n expected = SparseArray._simple_new(\n np.array([0, 2], dtype=np.int64), IntIndex(4, [2, 3]), dtype\n )\n tm.assert_sp_array_equal(result, expected)\n\n arr = SparseArray([0, np.nan, 0, 1], fill_value=0)\n with pytest.raises(ValueError, match=\"NA\"):\n arr.astype(\"Sparse[i8]\")\n\n def test_astype_bool(self):\n a = SparseArray([1, 0, 0, 1], dtype=SparseDtype(int, 0))\n result = a.astype(bool)\n expected = SparseArray([True, 0, 0, True], dtype=SparseDtype(bool, 0))\n tm.assert_sp_array_equal(result, expected)\n\n # update fill value\n result = a.astype(SparseDtype(bool, False))\n expected = SparseArray(\n [True, False, False, True], dtype=SparseDtype(bool, False)\n )\n tm.assert_sp_array_equal(result, expected)\n\n def test_astype_all(self, any_real_dtype):\n vals = np.array([1, 2, 3])\n arr = SparseArray(vals, fill_value=1)\n typ = np.dtype(any_real_dtype)\n res = arr.astype(typ)\n assert res.dtype == SparseDtype(typ, 1)\n assert res.sp_values.dtype == typ\n\n tm.assert_numpy_array_equal(np.asarray(res.to_dense()), vals.astype(typ))\n\n @pytest.mark.parametrize(\n \"arr, dtype, expected\",\n [\n (\n SparseArray([0, 1]),\n \"float\",\n SparseArray([0.0, 1.0], dtype=SparseDtype(float, 0.0)),\n ),\n (SparseArray([0, 1]), bool, SparseArray([False, True])),\n (\n SparseArray([0, 1], fill_value=1),\n bool,\n SparseArray([False, True], dtype=SparseDtype(bool, True)),\n ),\n pytest.param(\n SparseArray([0, 1]),\n \"datetime64[ns]\",\n SparseArray(\n np.array([0, 1], dtype=\"datetime64[ns]\"),\n dtype=SparseDtype(\"datetime64[ns]\", pd.Timestamp(\"1970\")),\n ),\n marks=[pytest.mark.xfail(reason=\"NumPy-7619\")],\n ),\n (\n SparseArray([0, 1, 10]),\n str,\n SparseArray([\"0\", \"1\", \"10\"], dtype=SparseDtype(str, \"0\")),\n ),\n (SparseArray([\"10\", \"20\"]), float, SparseArray([10.0, 20.0])),\n (\n SparseArray([0, 1, 0]),\n object,\n SparseArray([0, 1, 0], dtype=SparseDtype(object, 0)),\n ),\n ],\n )\n def test_astype_more(self, arr, dtype, expected):\n result = arr.astype(dtype)\n tm.assert_sp_array_equal(result, expected)\n\n def test_astype_nan_raises(self):\n arr = SparseArray([1.0, np.nan])\n with pytest.raises(ValueError, match=\"Cannot convert non-finite\"):\n arr.astype(int)\n\n def test_astype_copy_false(self):\n # GH#34456 bug caused by using .view instead of .astype in astype_nansafe\n arr = SparseArray([1, 2, 3])\n\n result = arr.astype(float, copy=False)\n expected = SparseArray([1.0, 2.0, 3.0], fill_value=0.0)\n tm.assert_sp_array_equal(result, expected)\n\n def test_set_fill_value(self):\n arr = SparseArray([1.0, np.nan, 2.0], fill_value=np.nan)\n arr.fill_value = 2\n assert arr.fill_value == 2\n\n arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64)\n arr.fill_value = 2\n assert arr.fill_value == 2\n\n # XXX: this seems fine? You can construct an integer\n # sparsearray with NaN fill value, why not update one?\n # coerces to int\n # msg = \"unable to set fill_value 3\\\\.1 to int64 dtype\"\n # with pytest.raises(ValueError, match=msg):\n arr.fill_value = 3.1\n assert arr.fill_value == 3.1\n\n # msg = \"unable to set fill_value nan to int64 dtype\"\n # with pytest.raises(ValueError, match=msg):\n arr.fill_value = np.nan\n assert np.isnan(arr.fill_value)\n\n arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool_)\n arr.fill_value = True\n assert arr.fill_value\n\n # coerces to bool\n # msg = \"unable to set fill_value 0 to bool dtype\"\n # with pytest.raises(ValueError, match=msg):\n arr.fill_value = 0\n assert arr.fill_value == 0\n\n # msg = \"unable to set fill_value nan to bool dtype\"\n # with pytest.raises(ValueError, match=msg):\n arr.fill_value = np.nan\n assert np.isnan(arr.fill_value)\n\n @pytest.mark.parametrize(\"val\", [[1, 2, 3], np.array([1, 2]), (1, 2, 3)])\n def test_set_fill_invalid_non_scalar(self, val):\n arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool_)\n msg = \"fill_value must be a scalar\"\n\n with pytest.raises(ValueError, match=msg):\n arr.fill_value = val\n\n def test_copy(self):\n arr2 = self.arr.copy()\n assert arr2.sp_values is not self.arr.sp_values\n assert arr2.sp_index is self.arr.sp_index\n\n def test_values_asarray(self):\n tm.assert_almost_equal(self.arr.to_dense(), self.arr_data)\n\n @pytest.mark.parametrize(\n \"data,shape,dtype\",\n [\n ([0, 0, 0, 0, 0], (5,), None),\n ([], (0,), None),\n ([0], (1,), None),\n ([\"A\", \"A\", np.nan, \"B\"], (4,), object),\n ],\n )\n def test_shape(self, data, shape, dtype):\n # GH 21126\n out = SparseArray(data, dtype=dtype)\n assert out.shape == shape\n\n @pytest.mark.parametrize(\n \"vals\",\n [\n [np.nan, np.nan, np.nan, np.nan, np.nan],\n [1, np.nan, np.nan, 3, np.nan],\n [1, np.nan, 0, 3, 0],\n ],\n )\n @pytest.mark.parametrize(\"fill_value\", [None, 0])\n def test_dense_repr(self, vals, fill_value):\n vals = np.array(vals)\n arr = SparseArray(vals, fill_value=fill_value)\n\n res = arr.to_dense()\n tm.assert_numpy_array_equal(res, vals)\n\n res2 = arr._internal_get_values()\n\n tm.assert_numpy_array_equal(res2, vals)\n\n def test_getitem(self):\n def _checkit(i):\n tm.assert_almost_equal(self.arr[i], self.arr.to_dense()[i])\n\n for i in range(len(self.arr)):\n _checkit(i)\n _checkit(-i)\n\n def test_getitem_arraylike_mask(self):\n arr = SparseArray([0, 1, 2])\n result = arr[[True, False, True]]\n expected = SparseArray([0, 2])\n tm.assert_sp_array_equal(result, expected)\n\n def test_getslice(self):\n result = self.arr[:-3]\n exp = SparseArray(self.arr.to_dense()[:-3])\n tm.assert_sp_array_equal(result, exp)\n\n result = self.arr[-4:]\n exp = SparseArray(self.arr.to_dense()[-4:])\n tm.assert_sp_array_equal(result, exp)\n\n # two corner cases from Series\n result = self.arr[-12:]\n exp = SparseArray(self.arr)\n tm.assert_sp_array_equal(result, exp)\n\n result = self.arr[:-12]\n exp = SparseArray(self.arr.to_dense()[:0])\n tm.assert_sp_array_equal(result, exp)\n\n def test_getslice_tuple(self):\n dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])\n\n sparse = SparseArray(dense)\n res = sparse[(slice(4, None),)]\n exp = SparseArray(dense[4:])\n tm.assert_sp_array_equal(res, exp)\n\n sparse = SparseArray(dense, fill_value=0)\n res = sparse[(slice(4, None),)]\n exp = SparseArray(dense[4:], fill_value=0)\n tm.assert_sp_array_equal(res, exp)\n\n msg = \"too many indices for array\"\n with pytest.raises(IndexError, match=msg):\n sparse[4:, :]\n\n with pytest.raises(IndexError, match=msg):\n # check numpy compat\n dense[4:, :]\n\n def test_boolean_slice_empty(self):\n arr = SparseArray([0, 1, 2])\n res = arr[[False, False, False]]\n assert res.dtype == arr.dtype\n\n @pytest.mark.parametrize(\"op\", [\"add\", \"sub\", \"mul\", \"truediv\", \"floordiv\", \"pow\"])\n def test_binary_operators(self, op):\n op = getattr(operator, op)\n data1 = np.random.randn(20)\n data2 = np.random.randn(20)\n\n data1[::2] = np.nan\n data2[::3] = np.nan\n\n arr1 = SparseArray(data1)\n arr2 = SparseArray(data2)\n\n data1[::2] = 3\n data2[::3] = 3\n farr1 = SparseArray(data1, fill_value=3)\n farr2 = SparseArray(data2, fill_value=3)\n\n def _check_op(op, first, second):\n res = op(first, second)\n exp = SparseArray(\n op(first.to_dense(), second.to_dense()), fill_value=first.fill_value\n )\n assert isinstance(res, SparseArray)\n tm.assert_almost_equal(res.to_dense(), exp.to_dense())\n\n res2 = op(first, second.to_dense())\n assert isinstance(res2, SparseArray)\n tm.assert_sp_array_equal(res, res2)\n\n res3 = op(first.to_dense(), second)\n assert isinstance(res3, SparseArray)\n tm.assert_sp_array_equal(res, res3)\n\n res4 = op(first, 4)\n assert isinstance(res4, SparseArray)\n\n # Ignore this if the actual op raises (e.g. pow).\n try:\n exp = op(first.to_dense(), 4)\n exp_fv = op(first.fill_value, 4)\n except ValueError:\n pass\n else:\n tm.assert_almost_equal(res4.fill_value, exp_fv)\n tm.assert_almost_equal(res4.to_dense(), exp)\n\n with np.errstate(all=\"ignore\"):\n for first_arr, second_arr in [(arr1, arr2), (farr1, farr2)]:\n _check_op(op, first_arr, second_arr)\n\n def test_pickle(self):\n def _check_roundtrip(obj):\n unpickled = tm.round_trip_pickle(obj)\n tm.assert_sp_array_equal(unpickled, obj)\n\n _check_roundtrip(self.arr)\n _check_roundtrip(self.zarr)\n\n def test_generator_warnings(self):\n sp_arr = SparseArray([1, 2, 3])\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings(action=\"always\", category=DeprecationWarning)\n warnings.filterwarnings(action=\"always\", category=PendingDeprecationWarning)\n for _ in sp_arr:\n pass\n assert len(w) == 0\n\n def test_fillna(self):\n s = SparseArray([1, np.nan, np.nan, 3, np.nan])\n res = s.fillna(-1)\n exp = SparseArray([1, -1, -1, 3, -1], fill_value=-1, dtype=np.float64)\n tm.assert_sp_array_equal(res, exp)\n\n s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)\n res = s.fillna(-1)\n exp = SparseArray([1, -1, -1, 3, -1], fill_value=0, dtype=np.float64)\n tm.assert_sp_array_equal(res, exp)\n\n s = SparseArray([1, np.nan, 0, 3, 0])\n res = s.fillna(-1)\n exp = SparseArray([1, -1, 0, 3, 0], fill_value=-1, dtype=np.float64)\n tm.assert_sp_array_equal(res, exp)\n\n s = SparseArray([1, np.nan, 0, 3, 0], fill_value=0)\n res = s.fillna(-1)\n exp = SparseArray([1, -1, 0, 3, 0], fill_value=0, dtype=np.float64)\n tm.assert_sp_array_equal(res, exp)\n\n s = SparseArray([np.nan, np.nan, np.nan, np.nan])\n res = s.fillna(-1)\n exp = SparseArray([-1, -1, -1, -1], fill_value=-1, dtype=np.float64)\n tm.assert_sp_array_equal(res, exp)\n\n s = SparseArray([np.nan, np.nan, np.nan, np.nan], fill_value=0)\n res = s.fillna(-1)\n exp = SparseArray([-1, -1, -1, -1], fill_value=0, dtype=np.float64)\n tm.assert_sp_array_equal(res, exp)\n\n # float dtype's fill_value is np.nan, replaced by -1\n s = SparseArray([0.0, 0.0, 0.0, 0.0])\n res = s.fillna(-1)\n exp = SparseArray([0.0, 0.0, 0.0, 0.0], fill_value=-1)\n tm.assert_sp_array_equal(res, exp)\n\n # int dtype shouldn't have missing. No changes.\n s = SparseArray([0, 0, 0, 0])\n assert s.dtype == SparseDtype(np.int64)\n assert s.fill_value == 0\n res = s.fillna(-1)\n tm.assert_sp_array_equal(res, s)\n\n s = SparseArray([0, 0, 0, 0], fill_value=0)\n assert s.dtype == SparseDtype(np.int64)\n assert s.fill_value == 0\n res = s.fillna(-1)\n exp = SparseArray([0, 0, 0, 0], fill_value=0)\n tm.assert_sp_array_equal(res, exp)\n\n # fill_value can be nan if there is no missing hole.\n # only fill_value will be changed\n s = SparseArray([0, 0, 0, 0], fill_value=np.nan)\n assert s.dtype == SparseDtype(np.int64, fill_value=np.nan)\n assert np.isnan(s.fill_value)\n res = s.fillna(-1)\n exp = SparseArray([0, 0, 0, 0], fill_value=-1)\n tm.assert_sp_array_equal(res, exp)\n\n def test_fillna_overlap(self):\n s = SparseArray([1, np.nan, np.nan, 3, np.nan])\n # filling with existing value doesn't replace existing value with\n # fill_value, i.e. existing 3 remains in sp_values\n res = s.fillna(3)\n exp = np.array([1, 3, 3, 3, 3], dtype=np.float64)\n tm.assert_numpy_array_equal(res.to_dense(), exp)\n\n s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)\n res = s.fillna(3)\n exp = SparseArray([1, 3, 3, 3, 3], fill_value=0, dtype=np.float64)\n tm.assert_sp_array_equal(res, exp)\n\n def test_nonzero(self):\n # Tests regression #21172.\n sa = SparseArray([float(\"nan\"), float(\"nan\"), 1, 0, 0, 2, 0, 0, 0, 3, 0, 0])\n expected = np.array([2, 5, 9], dtype=np.int32)\n (result,) = sa.nonzero()\n tm.assert_numpy_array_equal(expected, result)\n\n sa = SparseArray([0, 0, 1, 0, 0, 2, 0, 0, 0, 3, 0, 0])\n (result,) = sa.nonzero()\n tm.assert_numpy_array_equal(expected, result)\n\n\nclass TestSparseArrayAnalytics:\n @pytest.mark.parametrize(\n \"data,pos,neg\",\n [\n ([True, True, True], True, False),\n ([1, 2, 1], 1, 0),\n ([1.0, 2.0, 1.0], 1.0, 0.0),\n ],\n )\n def test_all(self, data, pos, neg):\n # GH 17570\n out = SparseArray(data).all()\n assert out\n\n out = SparseArray(data, fill_value=pos).all()\n assert out\n\n data[1] = neg\n out = SparseArray(data).all()\n assert not out\n\n out = SparseArray(data, fill_value=pos).all()\n assert not out\n\n @pytest.mark.parametrize(\n \"data,pos,neg\",\n [\n ([True, True, True], True, False),\n ([1, 2, 1], 1, 0),\n ([1.0, 2.0, 1.0], 1.0, 0.0),\n ],\n )\n def test_numpy_all(self, data, pos, neg):\n # GH 17570\n out = np.all(SparseArray(data))\n assert out\n\n out = np.all(SparseArray(data, fill_value=pos))\n assert out\n\n data[1] = neg\n out = np.all(SparseArray(data))\n assert not out\n\n out = np.all(SparseArray(data, fill_value=pos))\n assert not out\n\n # raises with a different message on py2.\n msg = \"the 'out' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.all(SparseArray(data), out=np.array([]))\n\n @pytest.mark.parametrize(\n \"data,pos,neg\",\n [\n ([False, True, False], True, False),\n ([0, 2, 0], 2, 0),\n ([0.0, 2.0, 0.0], 2.0, 0.0),\n ],\n )\n def test_any(self, data, pos, neg):\n # GH 17570\n out = SparseArray(data).any()\n assert out\n\n out = SparseArray(data, fill_value=pos).any()\n assert out\n\n data[1] = neg\n out = SparseArray(data).any()\n assert not out\n\n out = SparseArray(data, fill_value=pos).any()\n assert not out\n\n @pytest.mark.parametrize(\n \"data,pos,neg\",\n [\n ([False, True, False], True, False),\n ([0, 2, 0], 2, 0),\n ([0.0, 2.0, 0.0], 2.0, 0.0),\n ],\n )\n def test_numpy_any(self, data, pos, neg):\n # GH 17570\n out = np.any(SparseArray(data))\n assert out\n\n out = np.any(SparseArray(data, fill_value=pos))\n assert out\n\n data[1] = neg\n out = np.any(SparseArray(data))\n assert not out\n\n out = np.any(SparseArray(data, fill_value=pos))\n assert not out\n\n msg = \"the 'out' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.any(SparseArray(data), out=out)\n\n def test_sum(self):\n data = np.arange(10).astype(float)\n out = SparseArray(data).sum()\n assert out == 45.0\n\n data[5] = np.nan\n out = SparseArray(data, fill_value=2).sum()\n assert out == 40.0\n\n out = SparseArray(data, fill_value=np.nan).sum()\n assert out == 40.0\n\n @pytest.mark.parametrize(\n \"arr\",\n [\n np.array([0, 1, np.nan, 1]),\n np.array([0, 1, 1]),\n np.array([True, True, False]),\n ],\n )\n @pytest.mark.parametrize(\"fill_value\", [0, 1, np.nan, True, False])\n @pytest.mark.parametrize(\"min_count, expected\", [(3, 2), (4, np.nan)])\n def test_sum_min_count(self, arr, fill_value, min_count, expected):\n # https://github.com/pandas-dev/pandas/issues/25777\n sparray = SparseArray(arr, fill_value=fill_value)\n result = sparray.sum(min_count=min_count)\n if np.isnan(expected):\n assert np.isnan(result)\n else:\n assert result == expected\n\n def test_numpy_sum(self):\n data = np.arange(10).astype(float)\n out = np.sum(SparseArray(data))\n assert out == 45.0\n\n data[5] = np.nan\n out = np.sum(SparseArray(data, fill_value=2))\n assert out == 40.0\n\n out = np.sum(SparseArray(data, fill_value=np.nan))\n assert out == 40.0\n\n msg = \"the 'dtype' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.sum(SparseArray(data), dtype=np.int64)\n\n msg = \"the 'out' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.sum(SparseArray(data), out=out)\n\n @pytest.mark.parametrize(\n \"data,expected\",\n [\n (\n np.array([1, 2, 3, 4, 5], dtype=float), # non-null data\n SparseArray(np.array([1.0, 3.0, 6.0, 10.0, 15.0])),\n ),\n (\n np.array([1, 2, np.nan, 4, 5], dtype=float), # null data\n SparseArray(np.array([1.0, 3.0, np.nan, 7.0, 12.0])),\n ),\n ],\n )\n @pytest.mark.parametrize(\"numpy\", [True, False])\n def test_cumsum(self, data, expected, numpy):\n cumsum = np.cumsum if numpy else lambda s: s.cumsum()\n\n out = cumsum(SparseArray(data))\n tm.assert_sp_array_equal(out, expected)\n\n out = cumsum(SparseArray(data, fill_value=np.nan))\n tm.assert_sp_array_equal(out, expected)\n\n out = cumsum(SparseArray(data, fill_value=2))\n tm.assert_sp_array_equal(out, expected)\n\n if numpy: # numpy compatibility checks.\n msg = \"the 'dtype' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.cumsum(SparseArray(data), dtype=np.int64)\n\n msg = \"the 'out' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.cumsum(SparseArray(data), out=out)\n else:\n axis = 1 # SparseArray currently 1-D, so only axis = 0 is valid.\n msg = re.escape(f\"axis(={axis}) out of bounds\")\n with pytest.raises(ValueError, match=msg):\n SparseArray(data).cumsum(axis=axis)\n\n def test_mean(self):\n data = np.arange(10).astype(float)\n out = SparseArray(data).mean()\n assert out == 4.5\n\n data[5] = np.nan\n out = SparseArray(data).mean()\n assert out == 40.0 / 9\n\n def test_numpy_mean(self):\n data = np.arange(10).astype(float)\n out = np.mean(SparseArray(data))\n assert out == 4.5\n\n data[5] = np.nan\n out = np.mean(SparseArray(data))\n assert out == 40.0 / 9\n\n msg = \"the 'dtype' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.mean(SparseArray(data), dtype=np.int64)\n\n msg = \"the 'out' parameter is not supported\"\n with pytest.raises(ValueError, match=msg):\n np.mean(SparseArray(data), out=out)\n\n def test_ufunc(self):\n # GH 13853 make sure ufunc is applied to fill_value\n sparse = SparseArray([1, np.nan, 2, np.nan, -2])\n result = SparseArray([1, np.nan, 2, np.nan, 2])\n tm.assert_sp_array_equal(abs(sparse), result)\n tm.assert_sp_array_equal(np.abs(sparse), result)\n\n sparse = SparseArray([1, -1, 2, -2], fill_value=1)\n result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index, fill_value=1)\n tm.assert_sp_array_equal(abs(sparse), result)\n tm.assert_sp_array_equal(np.abs(sparse), result)\n\n sparse = SparseArray([1, -1, 2, -2], fill_value=-1)\n result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index, fill_value=1)\n tm.assert_sp_array_equal(abs(sparse), result)\n tm.assert_sp_array_equal(np.abs(sparse), result)\n\n sparse = SparseArray([1, np.nan, 2, np.nan, -2])\n result = SparseArray(np.sin([1, np.nan, 2, np.nan, -2]))\n tm.assert_sp_array_equal(np.sin(sparse), result)\n\n sparse = SparseArray([1, -1, 2, -2], fill_value=1)\n result = SparseArray(np.sin([1, -1, 2, -2]), fill_value=np.sin(1))\n tm.assert_sp_array_equal(np.sin(sparse), result)\n\n sparse = SparseArray([1, -1, 0, -2], fill_value=0)\n result = SparseArray(np.sin([1, -1, 0, -2]), fill_value=np.sin(0))\n tm.assert_sp_array_equal(np.sin(sparse), result)\n\n def test_ufunc_args(self):\n # GH 13853 make sure ufunc is applied to fill_value, including its arg\n sparse = SparseArray([1, np.nan, 2, np.nan, -2])\n result = SparseArray([2, np.nan, 3, np.nan, -1])\n tm.assert_sp_array_equal(np.add(sparse, 1), result)\n\n sparse = SparseArray([1, -1, 2, -2], fill_value=1)\n result = SparseArray([2, 0, 3, -1], fill_value=2)\n tm.assert_sp_array_equal(np.add(sparse, 1), result)\n\n sparse = SparseArray([1, -1, 0, -2], fill_value=0)\n result = SparseArray([2, 0, 1, -1], fill_value=1)\n tm.assert_sp_array_equal(np.add(sparse, 1), result)\n\n @pytest.mark.parametrize(\"fill_value\", [0.0, np.nan])\n def test_modf(self, fill_value):\n # https://github.com/pandas-dev/pandas/issues/26946\n sparse = SparseArray([fill_value] * 10 + [1.1, 2.2], fill_value=fill_value)\n r1, r2 = np.modf(sparse)\n e1, e2 = np.modf(np.asarray(sparse))\n tm.assert_sp_array_equal(r1, SparseArray(e1, fill_value=fill_value))\n tm.assert_sp_array_equal(r2, SparseArray(e2, fill_value=fill_value))\n\n def test_nbytes_integer(self):\n arr = SparseArray([1, 0, 0, 0, 2], kind=\"integer\")\n result = arr.nbytes\n # (2 * 8) + 2 * 4\n assert result == 24\n\n def test_nbytes_block(self):\n arr = SparseArray([1, 2, 0, 0, 0], kind=\"block\")\n result = arr.nbytes\n # (2 * 8) + 4 + 4\n # sp_values, blocs, blengths\n assert result == 24\n\n def test_asarray_datetime64(self):\n s = SparseArray(pd.to_datetime([\"2012\", None, None, \"2013\"]))\n np.asarray(s)\n\n def test_density(self):\n arr = SparseArray([0, 1])\n assert arr.density == 0.5\n\n def test_npoints(self):\n arr = SparseArray([0, 1])\n assert arr.npoints == 1\n\n\nclass TestAccessor:\n @pytest.mark.parametrize(\"attr\", [\"npoints\", \"density\", \"fill_value\", \"sp_values\"])\n def test_get_attributes(self, attr):\n arr = SparseArray([0, 1])\n ser = pd.Series(arr)\n\n result = getattr(ser.sparse, attr)\n expected = getattr(arr, attr)\n assert result == expected\n\n @td.skip_if_no_scipy\n def test_from_coo(self):\n import scipy.sparse\n\n row = [0, 3, 1, 0]\n col = [0, 3, 1, 2]\n data = [4, 5, 7, 9]\n # TODO: Remove dtype when scipy is fixed\n # https://github.com/scipy/scipy/issues/13585\n sp_array = scipy.sparse.coo_matrix((data, (row, col)), dtype=\"int\")\n result = pd.Series.sparse.from_coo(sp_array)\n\n index = pd.MultiIndex.from_arrays([[0, 0, 1, 3], [0, 2, 1, 3]])\n expected = pd.Series([4, 9, 7, 5], index=index, dtype=\"Sparse[int]\")\n tm.assert_series_equal(result, expected)\n\n @td.skip_if_no_scipy\n def test_to_coo(self):\n import scipy.sparse\n\n ser = pd.Series(\n [1, 2, 3],\n index=pd.MultiIndex.from_product([[0], [1, 2, 3]], names=[\"a\", \"b\"]),\n dtype=\"Sparse[int]\",\n )\n A, _, _ = ser.sparse.to_coo()\n assert isinstance(A, scipy.sparse.coo.coo_matrix)\n\n def test_non_sparse_raises(self):\n ser = pd.Series([1, 2, 3])\n with pytest.raises(AttributeError, match=\".sparse\"):\n ser.sparse.density\n\n\ndef test_setting_fill_value_fillna_still_works():\n # This is why letting users update fill_value / dtype is bad\n # astype has the same problem.\n arr = SparseArray([1.0, np.nan, 1.0], fill_value=0.0)\n arr.fill_value = np.nan\n result = arr.isna()\n # Can't do direct comparison, since the sp_index will be different\n # So let's convert to ndarray and check there.\n result = np.asarray(result)\n\n expected = np.array([False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_setting_fill_value_updates():\n arr = SparseArray([0.0, np.nan], fill_value=0)\n arr.fill_value = np.nan\n # use private constructor to get the index right\n # otherwise both nans would be un-stored.\n expected = SparseArray._simple_new(\n sparse_array=np.array([np.nan]),\n sparse_index=IntIndex(2, [1]),\n dtype=SparseDtype(float, np.nan),\n )\n tm.assert_sp_array_equal(arr, expected)\n\n\[email protected](\n \"arr, loc\",\n [\n ([None, 1, 2], 0),\n ([0, None, 2], 1),\n ([0, 1, None], 2),\n ([0, 1, 1, None, None], 3),\n ([1, 1, 1, 2], -1),\n ([], -1),\n ],\n)\ndef test_first_fill_value_loc(arr, loc):\n result = SparseArray(arr)._first_fill_value_loc()\n assert result == loc\n\n\[email protected](\n \"arr\", [[1, 2, np.nan, np.nan], [1, np.nan, 2, np.nan], [1, 2, np.nan]]\n)\[email protected](\"fill_value\", [np.nan, 0, 1])\ndef test_unique_na_fill(arr, fill_value):\n a = SparseArray(arr, fill_value=fill_value).unique()\n b = pd.Series(arr).unique()\n assert isinstance(a, SparseArray)\n a = np.asarray(a)\n tm.assert_numpy_array_equal(a, b)\n\n\ndef test_unique_all_sparse():\n # https://github.com/pandas-dev/pandas/issues/23168\n arr = SparseArray([0, 0])\n result = arr.unique()\n expected = SparseArray([0])\n tm.assert_sp_array_equal(result, expected)\n\n\ndef test_map():\n arr = SparseArray([0, 1, 2])\n expected = SparseArray([10, 11, 12], fill_value=10)\n\n # dict\n result = arr.map({0: 10, 1: 11, 2: 12})\n tm.assert_sp_array_equal(result, expected)\n\n # series\n result = arr.map(pd.Series({0: 10, 1: 11, 2: 12}))\n tm.assert_sp_array_equal(result, expected)\n\n # function\n result = arr.map(pd.Series({0: 10, 1: 11, 2: 12}))\n expected = SparseArray([10, 11, 12], fill_value=10)\n tm.assert_sp_array_equal(result, expected)\n\n\ndef test_map_missing():\n arr = SparseArray([0, 1, 2])\n expected = SparseArray([10, 11, None], fill_value=10)\n\n result = arr.map({0: 10, 1: 11})\n tm.assert_sp_array_equal(result, expected)\n\n\[email protected](\"fill_value\", [np.nan, 1])\ndef test_dropna(fill_value):\n # GH-28287\n arr = SparseArray([np.nan, 1], fill_value=fill_value)\n exp = SparseArray([1.0], fill_value=fill_value)\n tm.assert_sp_array_equal(arr.dropna(), exp)\n\n df = pd.DataFrame({\"a\": [0, 1], \"b\": arr})\n expected_df = pd.DataFrame({\"a\": [1], \"b\": exp}, index=pd.Int64Index([1]))\n tm.assert_equal(df.dropna(), expected_df)\n\n\ndef test_drop_duplicates_fill_value():\n # GH 11726\n df = pd.DataFrame(np.zeros((5, 5))).apply(lambda x: SparseArray(x, fill_value=0))\n result = df.drop_duplicates()\n expected = pd.DataFrame({i: SparseArray([0.0], fill_value=0) for i in range(5)})\n tm.assert_frame_equal(result, expected)\n\n\nclass TestMinMax:\n plain_data = np.arange(5).astype(float)\n data_neg = plain_data * (-1)\n data_NaN = SparseArray(np.array([0, 1, 2, np.nan, 4]))\n data_all_NaN = SparseArray(np.array([np.nan, np.nan, np.nan, np.nan, np.nan]))\n data_NA_filled = SparseArray(\n np.array([np.nan, np.nan, np.nan, np.nan, np.nan]), fill_value=5\n )\n\n @pytest.mark.parametrize(\n \"raw_data,max_expected,min_expected\",\n [\n (plain_data, [4], [0]),\n (data_neg, [0], [-4]),\n (data_NaN, [4], [0]),\n (data_all_NaN, [np.nan], [np.nan]),\n (data_NA_filled, [5], [5]),\n ],\n )\n def test_maxmin(self, raw_data, max_expected, min_expected):\n max_result = SparseArray(raw_data).max()\n min_result = SparseArray(raw_data).min()\n assert max_result in max_expected\n assert min_result in min_expected\n",
"import pytest\nimport warnings\nimport json\n\nimport numpy as np\nimport pandas as pd\n\nfrom .. import infer_vegalite_type, sanitize_dataframe\n\n\ndef test_infer_vegalite_type():\n def _check(arr, typ):\n assert infer_vegalite_type(arr) == typ\n\n _check(np.arange(5, dtype=float), \"quantitative\")\n _check(np.arange(5, dtype=int), \"quantitative\")\n _check(np.zeros(5, dtype=bool), \"nominal\")\n _check(pd.date_range(\"2012\", \"2013\"), \"temporal\")\n _check(pd.timedelta_range(365, periods=12), \"temporal\")\n\n nulled = pd.Series(np.random.randint(10, size=10))\n nulled[0] = None\n _check(nulled, \"quantitative\")\n _check([\"a\", \"b\", \"c\"], \"nominal\")\n\n if hasattr(pytest, \"warns\"): # added in pytest 2.8\n with pytest.warns(UserWarning):\n _check([], \"nominal\")\n else:\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n _check([], \"nominal\")\n\n\ndef test_sanitize_dataframe():\n # create a dataframe with various types\n df = pd.DataFrame(\n {\n \"s\": list(\"abcde\"),\n \"f\": np.arange(5, dtype=float),\n \"i\": np.arange(5, dtype=int),\n \"b\": np.array([True, False, True, True, False]),\n \"d\": pd.date_range(\"2012-01-01\", periods=5, freq=\"H\"),\n \"c\": pd.Series(list(\"ababc\"), dtype=\"category\"),\n \"c2\": pd.Series([1, \"A\", 2.5, \"B\", None], dtype=\"category\"),\n \"o\": pd.Series([np.array(i) for i in range(5)]),\n \"p\": pd.date_range(\"2012-01-01\", periods=5, freq=\"H\").tz_localize(\"UTC\"),\n }\n )\n\n # add some nulls\n df.iloc[0, df.columns.get_loc(\"s\")] = None\n df.iloc[0, df.columns.get_loc(\"f\")] = np.nan\n df.iloc[0, df.columns.get_loc(\"d\")] = pd.NaT\n df.iloc[0, df.columns.get_loc(\"o\")] = np.array(np.nan)\n\n # JSON serialize. This will fail on non-sanitized dataframes\n print(df[[\"s\", \"c2\"]])\n df_clean = sanitize_dataframe(df)\n print(df_clean[[\"s\", \"c2\"]])\n print(df_clean[[\"s\", \"c2\"]].to_dict())\n s = json.dumps(df_clean.to_dict(orient=\"records\"))\n print(s)\n\n # Re-construct pandas dataframe\n df2 = pd.read_json(s)\n\n # Re-order the columns to match df\n df2 = df2[df.columns]\n\n # Re-apply original types\n for col in df:\n if str(df[col].dtype).startswith(\"datetime\"):\n # astype(datetime) introduces time-zone issues:\n # to_datetime() does not.\n utc = isinstance(df[col].dtype, pd.core.dtypes.dtypes.DatetimeTZDtype)\n df2[col] = pd.to_datetime(df2[col], utc=utc)\n else:\n df2[col] = df2[col].astype(df[col].dtype)\n\n # pandas doesn't properly recognize np.array(np.nan), so change it here\n df.iloc[0, df.columns.get_loc(\"o\")] = np.nan\n assert df.equals(df2)\n\n\ndef test_sanitize_dataframe_colnames():\n df = pd.DataFrame(np.arange(12).reshape(4, 3))\n\n # Test that RangeIndex is converted to strings\n df = sanitize_dataframe(df)\n assert [isinstance(col, str) for col in df.columns]\n\n # Test that non-string columns result in an error\n df.columns = [4, \"foo\", \"bar\"]\n with pytest.raises(ValueError) as err:\n sanitize_dataframe(df)\n assert str(err.value).startswith(\"Dataframe contains invalid column name: 4.\")\n\n\ndef test_sanitize_dataframe_timedelta():\n df = pd.DataFrame({\"r\": pd.timedelta_range(start=\"1 day\", periods=4)})\n with pytest.raises(ValueError) as err:\n sanitize_dataframe(df)\n assert str(err.value).startswith('Field \"r\" has type \"timedelta')\n\n\ndef test_sanitize_dataframe_infs():\n df = pd.DataFrame({\"x\": [0, 1, 2, np.inf, -np.inf, np.nan]})\n df_clean = sanitize_dataframe(df)\n assert list(df_clean.dtypes) == [object]\n assert list(df_clean[\"x\"]) == [0, 1, 2, None, None, None]\n\n\[email protected](\n not hasattr(pd, \"Int64Dtype\"),\n reason=\"Nullable integers not supported in pandas v{}\".format(pd.__version__),\n)\ndef test_sanitize_nullable_integers():\n\n df = pd.DataFrame(\n {\n \"int_np\": [1, 2, 3, 4, 5],\n \"int64\": pd.Series([1, 2, 3, None, 5], dtype=\"UInt8\"),\n \"int64_nan\": pd.Series([1, 2, 3, float(\"nan\"), 5], dtype=\"Int64\"),\n \"float\": [1.0, 2.0, 3.0, 4, 5.0],\n \"float_null\": [1, 2, None, 4, 5],\n \"float_inf\": [1, 2, None, 4, (float(\"inf\"))],\n }\n )\n\n df_clean = sanitize_dataframe(df)\n assert {col.dtype.name for _, col in df_clean.iteritems()} == {\"object\"}\n\n result_python = {col_name: list(col) for col_name, col in df_clean.iteritems()}\n assert result_python == {\n \"int_np\": [1, 2, 3, 4, 5],\n \"int64\": [1, 2, 3, None, 5],\n \"int64_nan\": [1, 2, 3, None, 5],\n \"float\": [1.0, 2.0, 3.0, 4.0, 5.0],\n \"float_null\": [1.0, 2.0, None, 4.0, 5.0],\n \"float_inf\": [1.0, 2.0, None, 4.0, None],\n }\n\n\[email protected](\n not hasattr(pd, \"StringDtype\"),\n reason=\"dedicated String dtype not supported in pandas v{}\".format(pd.__version__),\n)\ndef test_sanitize_string_dtype():\n df = pd.DataFrame(\n {\n \"string_object\": [\"a\", \"b\", \"c\", \"d\"],\n \"string_string\": pd.array([\"a\", \"b\", \"c\", \"d\"], dtype=\"string\"),\n \"string_object_null\": [\"a\", \"b\", None, \"d\"],\n \"string_string_null\": pd.array([\"a\", \"b\", None, \"d\"], dtype=\"string\"),\n }\n )\n\n df_clean = sanitize_dataframe(df)\n assert {col.dtype.name for _, col in df_clean.iteritems()} == {\"object\"}\n\n result_python = {col_name: list(col) for col_name, col in df_clean.iteritems()}\n assert result_python == {\n \"string_object\": [\"a\", \"b\", \"c\", \"d\"],\n \"string_string\": [\"a\", \"b\", \"c\", \"d\"],\n \"string_object_null\": [\"a\", \"b\", None, \"d\"],\n \"string_string_null\": [\"a\", \"b\", None, \"d\"],\n }\n\n\[email protected](\n not hasattr(pd, \"BooleanDtype\"),\n reason=\"Nullable boolean dtype not supported in pandas v{}\".format(pd.__version__),\n)\ndef test_sanitize_boolean_dtype():\n df = pd.DataFrame(\n {\n \"bool_none\": pd.array([True, False, None], dtype=\"boolean\"),\n \"none\": pd.array([None, None, None], dtype=\"boolean\"),\n \"bool\": pd.array([True, False, True], dtype=\"boolean\"),\n }\n )\n\n df_clean = sanitize_dataframe(df)\n assert {col.dtype.name for _, col in df_clean.iteritems()} == {\"object\"}\n\n result_python = {col_name: list(col) for col_name, col in df_clean.iteritems()}\n assert result_python == {\n \"bool_none\": [True, False, None],\n \"none\": [None, None, None],\n \"bool\": [True, False, True],\n }\n"
] | [
[
"pandas.util.hash_pandas_object",
"numpy.random.RandomState"
],
[
"pandas._testing.assert_produces_warning",
"pandas.compat._optional.import_optional_dependency"
],
[
"pandas.Series",
"pandas.PeriodIndex",
"numpy.asarray",
"pandas.Index",
"pandas.DatetimeIndex",
"pandas.Int64Index",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_index_equal",
"pandas._testing.assert_produces_warning",
"pandas.compat.is_platform_windows",
"pandas.Float64Index",
"pandas.Timedelta",
"numpy.timedelta64",
"pandas.date_range",
"numpy.array",
"pandas.timedelta_range",
"pandas.TimedeltaIndex",
"pandas._testing.assert_equal",
"numpy.int32",
"numpy.datetime64",
"numpy.int16",
"pandas.Period",
"pandas.Timestamp"
],
[
"numpy.asarray",
"pandas.core.indexes.datetimes.DatetimeIndex._simple_new",
"pandas._libs.tslibs.Resolution.from_attrname",
"pandas.core.indexes.numeric.Int64Index",
"pandas.core.indexes.extension.inherit_names",
"pandas.core.indexes.base.maybe_extract_name",
"pandas.core.arrays.period.PeriodArray",
"pandas.core.arrays.period.raise_on_incompatible",
"pandas.errors.InvalidIndexError",
"pandas.core.arrays.period.period_array",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.arrays.period.validate_dtype_freq",
"pandas.core.indexes.datetimes.Index.get_loc",
"pandas._libs.tslibs.Period",
"pandas.core.dtypes.missing.is_valid_na_for_dtype",
"pandas.core.common.count_not_none",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.arrays.period.PeriodArray._generate_range",
"pandas._libs.tslibs.parsing.parse_time_string",
"pandas.core.dtypes.common.is_integer",
"pandas.util._decorators.doc"
],
[
"pandas.Series",
"pandas.offsets.Day",
"pandas.DataFrame",
"numpy.random.randn",
"pandas._testing.assert_frame_equal",
"numpy.random.randint",
"numpy.arange",
"pandas._testing.makeTimeSeries",
"pandas.Index",
"pandas.tests.extension.decimal.array.to_decimal",
"pandas._testing.assert_series_equal",
"pandas.tests.extension.decimal.array.DecimalArray",
"pandas.concat",
"pandas._testing.assert_produces_warning",
"numpy.min",
"pandas.Categorical",
"pandas.Timedelta",
"pandas.tests.extension.decimal.array.make_data",
"pandas.MultiIndex.from_product",
"pandas.date_range",
"pandas.DataFrame.from_dict",
"numpy.array",
"pandas.CategoricalIndex",
"pandas.period_range",
"pandas.Timestamp",
"pandas.io.formats.printing.pprint_thing"
],
[
"pandas._testing.assert_produces_warning",
"pandas.Series",
"pandas.MultiIndex",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"pandas.MultiIndex.from_arrays",
"pandas.Index",
"pandas.MultiIndex.from_product",
"pandas._testing.assert_frame_equal",
"pandas._testing.assert_index_equal"
],
[
"pandas.Series",
"pandas.array",
"pandas.DataFrame",
"numpy.delete",
"numpy.array",
"numpy.zeros",
"pandas._testing.rands"
],
[
"pandas._libs.tslibs.timezones.maybe_get_tz",
"pandas.offsets.DateOffset",
"pandas._libs.tslibs.offsets.DateOffset",
"numpy.all",
"pandas.Index",
"pandas.DatetimeIndex",
"pandas._libs.tslibs.offsets.MonthEnd",
"pandas._libs.tslibs.offsets.BDay",
"pandas._libs.tslibs.timezones.dateutil_gettz",
"pandas._testing.assert_index_equal",
"pandas._testing.assert_produces_warning",
"pandas._libs.tslibs.offsets.CDay",
"pandas.bdate_range",
"pandas.offsets.FY5253",
"pandas.Timedelta",
"pandas.date_range",
"pandas.core.arrays.datetimes.generate_range",
"pandas.Timestamp.max.floor",
"pandas.Timestamp",
"pandas.offsets.CustomBusinessHour"
],
[
"numpy.array"
],
[
"pandas.io.formats.format.is_dates_only",
"pandas.Series",
"pandas.core.arrays.datetimes.tz_to_dtype",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.core.indexes.base.Index",
"pandas.core.arrays.datetimes.DatetimeArray",
"pandas.core.indexes.api.PeriodIndex._simple_new",
"pandas.core.dtypes.common.is_datetime64_dtype",
"pandas.core.arrays.datetimes.DatetimeArray._from_sequence_not_strict",
"pandas._libs.tslibs.Resolution.from_attrname",
"pandas.core.tools.times.to_time",
"pandas.core.arrays.datetimes.DatetimeArray._generate_range",
"pandas.core.indexes.extension.inherit_names",
"pandas.core.indexes.base.get_unanimous_names",
"pandas.core.indexes.api.TimedeltaIndex._simple_new",
"pandas._libs.tslibs.to_offset",
"pandas.core.indexes.base.Index.slice_indexer",
"pandas.core.common.any_none",
"pandas.core.indexes.base.maybe_extract_name",
"pandas._libs.tslibs.timezones.tz_compare",
"pandas._libs.Period",
"pandas.core.indexes.base.Index.union",
"pandas.errors.InvalidIndexError",
"pandas._libs.Timestamp",
"pandas.core.indexes.api.Float64Index._simple_new",
"numpy.array",
"pandas.util._exceptions.find_stack_level",
"pandas.core.dtypes.missing.is_valid_na_for_dtype",
"pandas.core.dtypes.common.is_scalar",
"pandas._libs.tslibs.parsing.parse_time_string",
"pandas.core.indexes.base.Index.get_loc",
"pandas.io.formats.format.get_format_datetime64",
"pandas.util._decorators.doc"
],
[
"pandas.isna",
"pandas._testing.assert_frame_equal",
"pandas.Index",
"pandas.DataFrame"
],
[
"pandas.Series",
"pandas.api.types.CategoricalDtype",
"pandas._libs.algos.NegInfinity",
"numpy.isnan",
"numpy.arange",
"pandas.Timestamp",
"numpy.repeat",
"numpy.random.shuffle",
"numpy.random.randn",
"pandas.date_range",
"pandas._testing.assert_series_equal",
"numpy.array",
"pandas._libs.algos.Infinity"
],
[
"pandas._libs.hashtable.get_hashtable_trace_domain",
"pandas._testing.assert_numpy_array_equal",
"numpy.ones_like",
"pandas._libs.hashtable.Int64HashTable",
"numpy.isnan",
"pandas.core.algorithms.isin",
"numpy.arange",
"numpy.sort",
"pandas._libs.hashtable.PyObjectHashTable",
"numpy.all",
"numpy.full",
"pandas._libs.hashtable.object_hash",
"pandas._libs.hashtable.StringHashTable",
"numpy.zeros_like",
"pandas._libs.hashtable.mode",
"numpy.repeat",
"numpy.array",
"pandas._libs.hashtable.objects_are_equal"
],
[
"pandas.Series",
"pandas.offsets.Day",
"numpy.dtype",
"pandas.NaT.to_numpy",
"pandas.isna",
"pandas.offsets.Hour",
"pandas._testing.assert_numpy_array_equal",
"pandas.offsets.MonthBegin",
"numpy.arange",
"pandas.DatetimeIndex",
"pandas.offsets.MonthEnd",
"pandas.DatetimeTZDtype",
"pandas._testing.assert_index_equal",
"pandas.NaT.to_datetime64",
"pandas._testing.assert_produces_warning",
"numpy.isnan",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.Timedelta",
"numpy.timedelta64",
"pandas.offsets.YearBegin",
"pandas._testing.round_trip_pickle",
"numpy.array",
"pandas.offsets.YearEnd",
"pandas._testing.assert_equal",
"pandas.TimedeltaIndex",
"pandas.core.arrays.DatetimeArray._from_sequence",
"numpy.datetime64",
"pandas.offsets.Minute",
"pandas.Timestamp.now",
"pandas.Period",
"pandas.Timestamp",
"numpy.empty"
],
[
"pandas._libs.tslibs.Timestamp",
"numpy.asarray",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas._libs.lib.is_scalar",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas._libs.tslibs.fields.round_nsint64",
"pandas.core.arrays.TimedeltaArray",
"pandas.core.dtypes.common.is_datetime64_dtype",
"pandas.core.indexers.check_array_indexer",
"pandas.core.nanops.nanmin",
"pandas.core.indexers.check_setitem_lengths",
"pandas.core.ops.common.unpack_zerodim_and_defer",
"pandas.compat.numpy.function.validate_median",
"pandas.core.dtypes.common.is_unsigned_integer_dtype",
"pandas.util._decorators.Substitution",
"pandas.core.ops.invalid.invalid_comparison",
"pandas._libs.tslibs.to_offset",
"pandas.errors.AbstractMethodError",
"pandas.core.construction.array",
"pandas._libs.lib.is_integer",
"pandas.Index",
"pandas._libs.lib.map_infer",
"pandas.errors.NullFrequencyError",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.is_string_dtype",
"pandas.core.nanops.nanmedian",
"numpy.zeros",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.ops.invalid.make_invalid_op",
"pandas.core.dtypes.common.is_list_like",
"numpy.putmask",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas._libs.tslibs.delta_to_nanoseconds",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.algorithms.checked_add_with_arr",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.dtypes.common.is_period_dtype",
"pandas._libs.tslibs.timestamps.integer_op_not_supported",
"pandas.core.arrays.DatetimeArray",
"numpy.int64",
"pandas._libs.algos.is_monotonic",
"numpy.errstate",
"pandas._libs.tslibs.Period._from_ordinal",
"pandas.compat.numpy.function.validate_minmax_axis",
"pandas.util._exceptions.find_stack_level",
"pandas.core.dtypes.missing.is_valid_na_for_dtype",
"pandas._libs.lib.is_float",
"numpy.array_equal",
"pandas.core.algorithms.isin",
"pandas._libs.tslibs.Resolution.get_reso_from_freq",
"pandas.core.arrays.TimedeltaArray._from_sequence",
"pandas.core.common.is_bool_indexer",
"pandas.compat.numpy.function.validate_max",
"pandas.compat.numpy.function.validate_min",
"pandas.tseries.frequencies.infer_freq",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.dtypes.missing.isna",
"pandas.core.dtypes.common.is_datetime_or_timedelta_dtype",
"pandas._libs.lib.infer_dtype",
"pandas.core.nanops.nanmax",
"pandas.core.construction.extract_array",
"numpy.empty"
],
[
"pandas.tseries.offsets.Day",
"pandas._libs.tslibs.Timestamp",
"pandas._libs.tslibs.timezones.maybe_get_tz",
"pandas._libs.tslibs.conversion.localize_pydatetime",
"pandas.tseries.offsets.BDay",
"pandas._testing.assert_numpy_array_equal",
"pandas.compat.np_datetime64_compat",
"pandas._libs.tslibs.Timestamp.now",
"pandas.tseries.offsets.DateOffset",
"pandas.DatetimeIndex",
"pandas.tseries.offsets.Easter",
"pandas._libs.tslibs.offsets._offset_map.clear",
"pandas.tseries.offsets.CustomBusinessHour",
"pandas._testing.assert_index_equal",
"pandas._testing.assert_produces_warning",
"pandas._libs.tslibs.offsets._get_offset",
"pandas._libs.tslibs.offsets._offset_map.items",
"numpy.timedelta64",
"pandas.tseries.offsets.LastWeekOfMonth",
"pandas._testing.round_trip_pickle",
"numpy.array",
"pandas._testing.assert_equal",
"pandas.tseries.offsets.WeekOfMonth",
"pandas.tseries.offsets.Nano",
"pandas.tseries.offsets.BMonthEnd",
"pandas.tseries.offsets.Week"
],
[
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.array",
"pandas._testing.assert_extension_array_equal"
],
[
"pandas._testing.assert_almost_equal",
"pandas.PeriodIndex",
"pandas.Series",
"pandas._libs.tslibs.Timestamp",
"pandas.DataFrame",
"numpy.random.randn",
"pandas._testing.assert_frame_equal",
"pandas.tests.io.pytables.common._maybe_remove",
"pandas.Index",
"pandas._testing.assert_series_equal",
"pandas.read_hdf",
"pandas.compat.is_platform_windows",
"pandas.tests.io.pytables.common.ensure_clean_store",
"pandas._testing.makeFloatSeries",
"numpy.random.rand",
"pandas.HDFStore",
"pandas._testing.makeTimeDataFrame",
"pandas.tests.io.pytables.common.ensure_clean_path",
"pandas.util._test_decorators.skip_if_no"
],
[
"pandas._testing.assert_almost_equal",
"pandas.to_datetime",
"pandas._testing.assert_sp_array_equal",
"pandas.Series",
"numpy.take",
"numpy.asarray",
"pandas._libs.sparse.IntIndex",
"pandas.DataFrame",
"numpy.dtype",
"numpy.random.randn",
"pandas.isna",
"pandas._testing.assert_frame_equal",
"pandas._testing.assert_numpy_array_equal",
"numpy.arange",
"numpy.sin",
"pandas.core.arrays.sparse.SparseArray",
"pandas.Int64Index",
"pandas._testing.assert_series_equal",
"numpy.zeros",
"pandas._testing.assert_produces_warning",
"pandas.core.arrays.sparse.SparseDtype",
"pandas.Series.sparse.from_coo",
"numpy.isnan",
"numpy.modf",
"pandas._testing.round_trip_pickle",
"pandas.MultiIndex.from_product",
"pandas.date_range",
"numpy.errstate",
"numpy.fromiter",
"numpy.array",
"numpy.abs",
"pandas.core.arrays.sparse.SparseArray.from_spmatrix",
"pandas.MultiIndex.from_arrays",
"numpy.add",
"pandas.Timestamp"
],
[
"pandas.timedelta_range",
"pandas.to_datetime",
"pandas.Series",
"numpy.arange",
"pandas.array",
"pandas.DataFrame",
"pandas.read_json",
"pandas.date_range",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.0",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"1.0",
"0.25"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
DeVriesMatt/pointMLP-pytorch | [
"e9c09a2038551e83b072353f3fd7e3294463e892",
"e9c09a2038551e83b072353f3fd7e3294463e892",
"e9c09a2038551e83b072353f3fd7e3294463e892"
] | [
"classification_ModelNet40/test.py",
"autoencoder_elite_tearing_rotation_tokenclassifier.py",
"create_autoencoder_tearing.py"
] | [
"\"\"\"\npython test.py --model pointMLP --msg 20220209053148-404\n\"\"\"\nimport argparse\nimport os\nimport datetime\nimport torch\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nfrom torch.utils.data import DataLoader\nimport models as models\nfrom utils import progress_bar, IOStream\nfrom data import ModelNet40\nimport sklearn.metrics as metrics\nfrom helper import cal_loss\nimport numpy as np\nimport torch.nn.functional as F\n\nmodel_names = sorted(\n name for name in models.__dict__ if callable(models.__dict__[name])\n)\n\n\ndef parse_args():\n \"\"\"Parameters\"\"\"\n parser = argparse.ArgumentParser(\"training\")\n parser.add_argument(\n \"-c\",\n \"--checkpoint\",\n type=str,\n metavar=\"PATH\",\n help=\"path to save checkpoint (default: checkpoint)\",\n )\n parser.add_argument(\"--msg\", type=str, help=\"message after checkpoint\")\n parser.add_argument(\n \"--batch_size\", type=int, default=16, help=\"batch size in training\"\n )\n parser.add_argument(\n \"--model\", default=\"pointMLP\", help=\"model name [default: pointnet_cls]\"\n )\n parser.add_argument(\n \"--num_classes\",\n default=40,\n type=int,\n choices=[10, 40],\n help=\"training on ModelNet10/40\",\n )\n parser.add_argument(\"--num_points\", type=int, default=1024, help=\"Point Number\")\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n print(f\"args: {args}\")\n os.environ[\"HDF5_USE_FILE_LOCKING\"] = \"FALSE\"\n\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n print(f\"==> Using device: {device}\")\n if args.msg is None:\n message = str(datetime.datetime.now().strftime(\"-%Y%m%d%H%M%S\"))\n else:\n message = \"-\" + args.msg\n args.checkpoint = \"checkpoints/\" + args.model + message\n\n print(\"==> Preparing data..\")\n test_loader = DataLoader(\n ModelNet40(partition=\"test\", num_points=args.num_points),\n num_workers=4,\n batch_size=args.batch_size,\n shuffle=False,\n drop_last=False,\n )\n # Model\n print(\"==> Building model..\")\n net = models.__dict__[args.model]()\n criterion = cal_loss\n net = net.to(device)\n checkpoint_path = os.path.join(args.checkpoint, \"best_checkpoint.pth\")\n checkpoint = torch.load(checkpoint_path, map_location=torch.device(\"cpu\"))\n # criterion = criterion.to(device)\n if device == \"cuda\":\n net = torch.nn.DataParallel(net)\n cudnn.benchmark = True\n net.load_state_dict(checkpoint[\"net\"])\n\n test_out = validate(net, test_loader, criterion, device)\n print(f\"Vanilla out: {test_out}\")\n\n\ndef validate(net, testloader, criterion, device):\n net.eval()\n test_loss = 0\n correct = 0\n total = 0\n test_true = []\n test_pred = []\n time_cost = datetime.datetime.now()\n with torch.no_grad():\n for batch_idx, (data, label) in enumerate(testloader):\n data, label = data.to(device), label.to(device).squeeze()\n data = data.permute(0, 2, 1)\n logits = net(data)\n loss = criterion(logits, label)\n test_loss += loss.item()\n preds = logits.max(dim=1)[1]\n test_true.append(label.cpu().numpy())\n test_pred.append(preds.detach().cpu().numpy())\n total += label.size(0)\n correct += preds.eq(label).sum().item()\n progress_bar(\n batch_idx,\n len(testloader),\n \"Loss: %.3f | Acc: %.3f%% (%d/%d)\"\n % (\n test_loss / (batch_idx + 1),\n 100.0 * correct / total,\n correct,\n total,\n ),\n )\n\n time_cost = int((datetime.datetime.now() - time_cost).total_seconds())\n test_true = np.concatenate(test_true)\n test_pred = np.concatenate(test_pred)\n return {\n \"loss\": float(\"%.3f\" % (test_loss / (batch_idx + 1))),\n \"acc\": float(\"%.3f\" % (100.0 * metrics.accuracy_score(test_true, test_pred))),\n \"acc_avg\": float(\n \"%.3f\" % (100.0 * metrics.balanced_accuracy_score(test_true, test_pred))\n ),\n \"time\": time_cost,\n }\n\n\nif __name__ == \"__main__\":\n main()\n",
"import torch\nfrom torch import nn\nimport classification_ModelNet40.models as models\nimport torch.backends.cudnn as cudnn\nfrom classification_ScanObjectNN.models import pointMLPElite\n\n# from cell_dataset import PointCloudDatasetAllBoth\nfrom torch.utils.data import DataLoader\nimport numpy as np\nimport pandas as pd\nfrom foldingnet import ReconstructionNet, ChamferLoss\nfrom angle_loss import AngleLoss\nfrom dataset import (\n PointCloudDatasetAllBoth,\n PointCloudDatasetAllBothNotSpec,\n PointCloudDatasetAllBothNotSpec1024,\n PointCloudDatasetAllBothNotSpecRotation,\n PointCloudDatasetAllBothNotSpecRotation1024,\n PointCloudDatasetAllBothNotSpec2DRotation1024,\n PointCloudDatasetAllBothKLDivergranceRotation1024\n)\nimport argparse\nimport os\nfrom tearing.folding_decoder import FoldingNetBasicDecoder\n\n\nclass MLPAutoencoder(nn.Module):\n def __init__(self, encoder, decoder):\n super(MLPAutoencoder, self).__init__()\n self.encoder = encoder\n token = torch.Tensor(50)\n rotation_token = nn.Parameter(token)\n self.rotation_token = nn.init.normal_(rotation_token)\n self.token_classifier = nn.Linear(50, 24)\n self.decoder = decoder\n\n def forward(self, x):\n embeddings = self.encoder(x)\n new_embeddings = torch.mul(embeddings, self.rotation_token)\n rotation_classifier = self.token_classifier(self.rotation_token)\n outs, grid = self.decoder(new_embeddings)\n return outs, embeddings, grid, new_embeddings, rotation_classifier\n\n\ndef create_dir_if_not_exist(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"Pointmlp-foldingnet\")\n parser.add_argument(\n \"--dataset_path\",\n default=\"/home/mvries/Documents/Datasets/OPM/SingleCellFromNathan_17122021/\",\n type=str,\n )\n parser.add_argument(\n \"--dataframe_path\",\n default=\"/home/mvries/Documents/Datasets/OPM/SingleCellFromNathan_17122021/all_cell_data.csv\",\n type=str,\n )\n parser.add_argument(\"--output_path\", default=\"./\", type=str)\n parser.add_argument(\"--num_epochs\", default=250, type=int)\n parser.add_argument(\n \"--pmlp_ckpt_path\", default=\"best_checkpoint_elite.pth\", type=str\n )\n parser.add_argument(\n \"--fold_ckpt_path\",\n default=\"/home/mvries/Documents/GitHub/FoldingNetNew/nets/FoldingNetNew_50feats_planeshape_foldingdecoder_trainallTrue_centringonlyTrue_train_bothTrue_003.pt\",\n type=str,\n )\n parser.add_argument(\n \"--full_checkpoint_path\",\n default=\"/home/mvries/Documents/GitHub/pointMLP-pytorch/\"\n \"pointmlpelite_foldingTearingVersion_autoencoder_allparams.pt\",\n type=str,\n )\n\n args = parser.parse_args()\n df = args.dataframe_path\n root_dir = args.dataset_path\n output_path = args.output_path\n num_epochs = args.num_epochs\n pmlp_ckpt_path = args.pmlp_ckpt_path\n fold_ckpt_path = args.fold_ckpt_path\n full_checkpoint_path = args.full_checkpoint_path\n\n name_net = output_path + \"pointmlpelite_foldingTearingVersion_autoencoder_allparams1024RotationTokenClassifier\"\n print(\"==> Building encoder...\")\n net = pointMLPElite(num_classes=15)\n device = \"cuda\"\n net = net.to(device)\n if device == \"cuda\":\n net = torch.nn.DataParallel(net)\n cudnn.benchmark = True\n\n # checkpoint_path = pmlp_ckpt_path\n # checkpoint = torch.load(checkpoint_path)\n # net.load_state_dict(checkpoint[\"net\"])\n # for param in net.module.parameters():\n # param.requires_grad = False\n new_embedding = nn.Linear(in_features=256, out_features=50, bias=True)\n net.module.classifier[8] = new_embedding\n net.module.classifier[8].weight.requires_grad = True\n net.module.classifier[8].bias.requires_grad = True\n print(net.module.classifier)\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n print(\"==> Building decoder...\")\n decoder = FoldingNetBasicDecoder(num_features=50, num_clusters=10)\n\n model = MLPAutoencoder(encoder=net.module, decoder=decoder).cuda()\n\n checkpoint = torch.load(full_checkpoint_path)\n model_dict = model.state_dict() # load parameters from pre-trained FoldingNet\n for k in checkpoint[\"model_state_dict\"]:\n\n if k in model_dict:\n model_dict[k] = checkpoint[\"model_state_dict\"][k]\n print(\" Found weight: \" + k)\n elif k.replace(\"folding1\", \"folding\") in model_dict:\n model_dict[k.replace(\"folding1\", \"folding\")] = checkpoint[\n \"model_state_dict\"\n ][k]\n print(\" Found weight: \" + k)\n # model.load_state_dict(torch.load(full_checkpoint_path)['model_state_dict'])\n\n data = torch.rand(2, 3, 1024).cuda()\n print(\"===> testing pointMLP ...\")\n out, embedding, _, pred_a = model(data)\n print(out.shape)\n print(embedding.shape)\n\n batch_size = 16\n learning_rate = 0.0001\n dataset = PointCloudDatasetAllBothKLDivergranceRotation1024(\n df,\n root_dir,\n transform=None,\n img_size=400,\n target_transform=True,\n centring_only=True,\n cell_component=\"cell\",\n )\n\n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n optimizer = torch.optim.Adam(\n filter(lambda p: p.requires_grad, model.parameters()),\n lr=learning_rate * 16 / batch_size,\n betas=(0.9, 0.999),\n weight_decay=1e-8,\n )\n criterion = ChamferLoss()\n criterion_rot_a = AngleLoss()\n criterion_rot_b = AngleLoss()\n criterion_rot_c = AngleLoss()\n total_loss = 0.0\n rec_loss = 0.0\n clus_loss = 0.0\n num_epochs = num_epochs\n model.train()\n threshold = 0.0\n losses = []\n test_acc = []\n best_acc = 0.0\n best_loss = 1000000000\n niter = 1\n for epoch in range(num_epochs):\n batch_num = 1\n running_loss = 0.0\n print(\"Training epoch {}\".format(epoch))\n model.train()\n batches = []\n\n for i, data in enumerate(dataloader, 0):\n image, rotated_image, serial_number = data\n inputs = image.to(device)\n rotated_inputs = rotated_image.to(device)\n\n # ===================forward=====================\n with torch.set_grad_enabled(True):\n outputs, embeddings, grid, new_embedding = model(rotated_inputs.permute(0, 2, 1))\n optimizer.zero_grad() \n loss_rec = criterion(inputs, outputs)\n \n # ===================backward====================\n loss = loss_rec\n loss.backward()\n optimizer.step()\n\n running_loss += loss.detach().item() / batch_size\n batch_num += 1\n niter += 1\n\n lr = np.asarray(optimizer.param_groups[0][\"lr\"])\n\n if i % 10 == 0:\n print(\n \"[%d/%d][%d/%d]\\tLossTot: %.2f\\tLossRec: %.2f\"\n % (\n epoch,\n num_epochs,\n i,\n len(dataloader),\n loss.detach().item() / batch_size,\n loss_rec.detach().item() / batch_size,\n )\n )\n\n # ===================log========================\n total_loss = running_loss / len(dataloader)\n if total_loss < best_loss:\n checkpoint = {\n \"model_state_dict\": model.state_dict(),\n \"optimizer_state_dict\": optimizer.state_dict(),\n \"epoch\": epoch,\n \"loss\": total_loss,\n }\n best_loss = total_loss\n create_dir_if_not_exist(output_path)\n print(\n \"Saving model to:\"\n + name_net\n + \".pt\"\n + \" with loss = {}\".format(total_loss)\n + \" at epoch {}\".format(epoch)\n )\n torch.save(checkpoint, name_net + \".pt\")\n print(\"epoch [{}/{}], loss:{}\".format(epoch + 1, num_epochs, total_loss))\n\n print(\n \"epoch [{}/{}], loss:{:.4f}, Rec loss:{:.4f}\".format(\n epoch + 1, num_epochs, total_loss, total_loss\n )\n )\n",
"import torch\nfrom torch import nn\nimport classification_ModelNet40.models as models\nimport torch.backends.cudnn as cudnn\nfrom classification_ModelNet40.models import pointMLP\n\n# from cell_dataset import PointCloudDatasetAllBoth\nfrom torch.utils.data import DataLoader\nimport numpy as np\nimport pandas as pd\nfrom foldingnet import ReconstructionNet, ChamferLoss\nfrom dataset import PointCloudDatasetAllBoth, PointCloudDatasetAllBothNotSpec\nimport argparse\nimport os\nfrom tearing.folding_decoder import FoldingNetBasicDecoder\n\n\nclass MLPAutoencoder(nn.Module):\n def __init__(self, encoder, decoder):\n super(MLPAutoencoder, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n\n def forward(self, x):\n embedding = self.encoder(x)\n output, grid = self.decoder(embedding)\n return output, embedding, grid\n\n\ndef create_dir_if_not_exist(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"Pointmlp-foldingnet\")\n parser.add_argument(\n \"--dataset_path\",\n default=\"/home/mvries/Documents/Datasets/OPM/SingleCellFromNathan_17122021/\",\n type=str,\n )\n parser.add_argument(\n \"--dataframe_path\",\n default=\"/home/mvries/Documents/Datasets/OPM/SingleCellFromNathan_17122021/all_cell_data.csv\",\n type=str,\n )\n parser.add_argument(\"--output_path\", default=\"./\", type=str)\n parser.add_argument(\"--num_epochs\", default=250, type=int)\n parser.add_argument(\"--pmlp_ckpt_path\", default=\"best_checkpoint.pth\", type=str)\n parser.add_argument(\n \"--fold_ckpt_path\",\n default=\"/home/mvries/Documents/GitHub/FoldingNetNew/nets/FoldingNetNew_50feats_planeshape_foldingdecoder_trainallTrue_centringonlyTrue_train_bothTrue_003.pt\",\n type=str,\n )\n parser.add_argument(\n \"--full_checkpoint_path\",\n default=\"/run/user/1128299809/gvfs/smb-share:server=rds.icr.ac.uk,share=data/DBI/DUDBI/DYNCESYS/mvries/ResultsAlma/pointMLP-pytorch/pointmlp_folding_autoencoder.pt\",\n type=str,\n )\n\n args = parser.parse_args()\n df = args.dataframe_path\n root_dir = args.dataset_path\n output_path = args.output_path\n num_epochs = args.num_epochs\n pmlp_ckpt_path = args.pmlp_ckpt_path\n fold_ckpt_path = args.fold_ckpt_path\n full_checkpoint_path = args.full_checkpoint_path\n\n name_net = output_path + \"pointmlp_foldingTearingVersion_autoencoder_allparams\"\n print(\"==> Building encoder...\")\n net = pointMLP()\n device = \"cuda\"\n net = net.to(device)\n if device == \"cuda\":\n net = torch.nn.DataParallel(net)\n cudnn.benchmark = True\n\n checkpoint_path = pmlp_ckpt_path\n checkpoint = torch.load(checkpoint_path)\n net.load_state_dict(checkpoint[\"net\"])\n # for param in net.module.parameters():\n # param.requires_grad = False\n new_embedding = nn.Linear(in_features=256, out_features=50, bias=True)\n net.module.classifier[8] = new_embedding\n net.module.classifier[8].weight.requires_grad = True\n net.module.classifier[8].bias.requires_grad = True\n print(net.module.classifier)\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n print(\"==> Building decoder...\")\n decoder = FoldingNetBasicDecoder(num_features=50, num_clusters=10)\n\n model = MLPAutoencoder(encoder=net.module, decoder=decoder).cuda()\n\n checkpoint = torch.load(full_checkpoint_path)\n model_dict = model.state_dict() # load parameters from pre-trained FoldingNet\n for k in checkpoint[\"model_state_dict\"]:\n if k in model_dict:\n model_dict[k] = checkpoint[\"model_state_dict\"][k]\n print(\" Found weight: \" + k)\n elif k.replace(\"folding1\", \"folding\") in model_dict:\n model_dict[k.replace(\"folding1\", \"folding\")] = checkpoint[\n \"model_state_dict\"\n ][k]\n print(\" Found weight: \" + k)\n model.load_state_dict(model_dict)\n\n data = torch.rand(2, 3, 2048).cuda()\n print(\"===> testing pointMLP ...\")\n out, embedding, _ = model(data)\n print(out.shape)\n print(embedding.shape)\n\n batch_size = 16\n learning_rate = 0.00001\n dataset = PointCloudDatasetAllBothNotSpec(\n df,\n root_dir,\n transform=None,\n img_size=400,\n target_transform=True,\n centring_only=True,\n cell_component=\"cell\",\n )\n\n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n optimizer = torch.optim.Adam(\n filter(lambda p: p.requires_grad, model.parameters()),\n lr=learning_rate * 16 / batch_size,\n betas=(0.9, 0.999),\n weight_decay=1e-6,\n )\n criterion = ChamferLoss()\n total_loss = 0.0\n rec_loss = 0.0\n clus_loss = 0.0\n num_epochs = num_epochs\n model.train()\n threshold = 0.0\n losses = []\n test_acc = []\n best_acc = 0.0\n best_loss = 1000000000\n niter = 1\n for epoch in range(num_epochs):\n batch_num = 1\n running_loss = 0.0\n print(\"Training epoch {}\".format(epoch))\n model.train()\n batches = []\n\n for i, data in enumerate(dataloader, 0):\n inputs, labels, _ = data\n inputs = inputs.to(device)\n\n # ===================forward=====================\n with torch.set_grad_enabled(True):\n output, embedding, _ = model(inputs.permute(0, 2, 1))\n optimizer.zero_grad()\n loss = criterion(inputs, output)\n # ===================backward====================\n loss.backward()\n optimizer.step()\n\n running_loss += loss.detach().item() / batch_size\n batch_num += 1\n niter += 1\n\n lr = np.asarray(optimizer.param_groups[0][\"lr\"])\n\n if i % 10 == 0:\n print(\n \"[%d/%d][%d/%d]\\tLossTot: %.4f\\tLossRec: %.4f\"\n % (\n epoch,\n num_epochs,\n i,\n len(dataloader),\n loss.detach().item() / batch_size,\n loss.detach().item() / batch_size,\n )\n )\n\n # ===================log========================\n total_loss = running_loss / len(dataloader)\n if total_loss < best_loss:\n checkpoint = {\n \"model_state_dict\": model.state_dict(),\n \"optimizer_state_dict\": optimizer.state_dict(),\n \"epoch\": epoch,\n \"loss\": total_loss,\n }\n best_loss = total_loss\n create_dir_if_not_exist(output_path)\n print(\n \"Saving model to:\"\n + name_net\n + \".pt\"\n + \" with loss = {}\".format(total_loss)\n + \" at epoch {}\".format(epoch)\n )\n torch.save(checkpoint, name_net + \".pt\")\n print(\"epoch [{}/{}], loss:{}\".format(epoch + 1, num_epochs, total_loss))\n\n print(\n \"epoch [{}/{}], loss:{:.4f}, Rec loss:{:.4f}\".format(\n epoch + 1, num_epochs, total_loss, total_loss\n )\n )\n"
] | [
[
"sklearn.metrics.balanced_accuracy_score",
"numpy.concatenate",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"torch.nn.DataParallel",
"sklearn.metrics.accuracy_score"
],
[
"torch.nn.Parameter",
"torch.Tensor",
"torch.load",
"numpy.asarray",
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"torch.mul",
"torch.nn.init.normal_",
"torch.rand",
"torch.cuda.is_available",
"torch.set_grad_enabled",
"torch.nn.DataParallel",
"torch.save"
],
[
"torch.load",
"numpy.asarray",
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"torch.set_grad_enabled",
"torch.rand",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
roy881020/VSGNet | [
"a9ba741871d1d7ff401cecf23659f0b75576e7c3",
"a9ba741871d1d7ff401cecf23659f0b75576e7c3"
] | [
"scripts_hico/HICO_eval/bbox_utils.py",
"scripts/prior_vcoco.py"
] | [
"import numpy as np\n#import skimage.draw as skdraw\n\n\ndef add_bbox(img,bbox,color=[0,0,0],fill=False,alpha=1):\n x1,y1,x2,y2 = bbox\n \n # Clockwise starting from top left\n r = [y1,y1,y2,y2]\n c = [x1,x2,x2,x1]\n \n if fill:\n coords = skdraw.polygon(r,c,shape=img.shape[0:2])\n skdraw.set_color(img,coords,color,alpha=alpha)\n return\n\n peri_coords = skdraw.polygon_perimeter(r,c,shape=img.shape[0:2])\n skdraw.set_color(img,peri_coords,color,alpha=alpha)\n\n\ndef compute_area(bbox,invalid=None):\n x1,y1,x2,y2 = bbox\n\n if (x2 <= x1) or (y2 <= y1):\n area = invalid\n else:\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n return area\n\n\ndef compute_iou(bbox1,bbox2,verbose=False):\n x1,y1,x2,y2 = bbox1\n x1_,y1_,x2_,y2_ = bbox2\n \n x1_in = max(x1,x1_)\n y1_in = max(y1,y1_)\n x2_in = min(x2,x2_)\n y2_in = min(y2,y2_)\n\n intersection = compute_area(bbox=[x1_in,y1_in,x2_in,y2_in],invalid=0.0)\n area1 = compute_area(bbox1)\n area2 = compute_area(bbox2)\n union = area1 + area2 - intersection\n iou = intersection / (union + 1e-6)\n\n if verbose:\n return iou, intersection, union\n\n return iou \n\n\ndef compute_area_batch(bbox):\n x1,y1,x2,y2 = [bbox[:,i] for i in range(4)]\n area = np.zeros(x1.shape[0])\n valid_mask = np.logical_and(x2 > x1, y2 > y1)\n area_ = (x2 - x1 + 1) * (y2 - y1 + 1)\n area[valid_mask] = area_[valid_mask]\n return area\n\n\ndef compute_iou_batch(bbox1,bbox2,verbose=False):\n x1,y1,x2,y2 = [bbox1[:,i] for i in range(4)]\n x1_,y1_,x2_,y2_ = [bbox2[:,i] for i in range(4)]\n \n x1_in = np.maximum(x1,x1_)\n y1_in = np.maximum(y1,y1_)\n x2_in = np.minimum(x2,x2_)\n y2_in = np.minimum(y2,y2_)\n \n intersection_bbox = np.stack((x1_in,y1_in,x2_in,y2_in),1)\n intersection = compute_area_batch(bbox=intersection_bbox)\n \n area1 = compute_area_batch(bbox1)\n area2 = compute_area_batch(bbox2)\n union = area1 + area2 - intersection\n iou = intersection / (union + 1e-6)\n \n if verbose:\n return iou, intersection, union\n\n return iou \n \n\ndef vis_bbox(bbox,img,color=(0,0,0),modify=False):\n im_h,im_w = img.shape[0:2]\n x1,y1,x2,y2 = bbox\n x1 = max(0,min(x1,im_w-1))\n x2 = max(x1,min(x2,im_w-1))\n y1 = max(0,min(y1,im_h-1))\n y2 = max(y1,min(y2,im_h-1))\n r = [y1,y1,y2,y2]\n c = [x1,x2,x2,x1]\n\n if modify:\n img_ = img\n else:\n img_ = np.copy(img)\n\n rr,cc = skdraw.polygon(r,c,img.shape[:2])\n skdraw.set_color(img_,(rr,cc),color,alpha=0.2)\n\n rr,cc = skdraw.polygon_perimeter(r,c,img.shape[:2])\n for k in range(3):\n img_[rr,cc,k] = color[k]\n\n return img_\n\n\ndef vis_bboxes(bboxes,img,color=(0,0,0),modify=False):\n if modify:\n img_ = img\n else:\n img_ = np.copy(img)\n\n for bbox in bboxes:\n img_ = vis_bbox(bbox,img_,color,True)\n\n return img_\n\n\ndef join_bboxes_by_line(bbox1,bbox2,img,color=(255,0,255),modify=False):\n im_h,im_w = img.shape[0:2]\n x1,y1,x2,y2 = bbox1\n x1_,y1_,x2_,y2_ = bbox2\n\n c0 = 0.5*(x1+x2)\n r0 = 0.5*(y1+y2)\n c1 = 0.5*(x1_+x2_)\n r1 = 0.5*(y1_+y2_)\n r0,c0,r1,c1 = [int(x) for x in [r0,c0,r1,c1]]\n c0 = max(0,min(c0,im_w-1))\n c1 = max(0,min(c1,im_w-1))\n r0 = max(0,min(r0,im_h-1))\n r1 = max(0,min(r1,im_h-1))\n rr,cc,val = skdraw.draw.line_aa(r0,c0,r1,c1)\n \n if modify:\n img_ = img\n else:\n img_ = np.copy(img)\n\n for k in range(3):\n img_[rr,cc,k] = val*color[k]\n\n rr,cc = skdraw.circle(r0,c0,4,img_.shape[:2])\n for k in range(3):\n img_[rr,cc,k] = color[k]\n\n rr,cc = skdraw.circle(r1,c1,4,img_.shape[:2])\n for k in range(3):\n img_[rr,cc,k] = color[k]\n\n return img_\n\n\ndef vis_sub_obj_bboxes(\n sub_bboxes,\n obj_bboxes,\n img,\n sub_color=(0,0,255),\n obj_color=(255,0,0),\n modify=False):\n\n img_ = vis_bboxes(sub_bboxes,img,sub_color,modify)\n img_ = vis_bboxes(obj_bboxes,img_,obj_color,modify=True)\n \n for sub_bbox,obj_bbox in zip(sub_bboxes,obj_bboxes):\n img_ = join_bboxes_by_line(sub_bbox,obj_bbox,img_,modify=True)\n\n return img_\n\n\ndef vis_human_keypts(\n img,\n keypts,\n radius=2,\n pt_color=(0,255,255),\n line_color=(0,255,255),\n modify=False):\n LINKS = [\n (0,1),\n (1,2),\n (2,3),\n (3,4),\n (1,5),\n (5,6),\n (6,7),\n (0,15),\n (15,17),\n (0,14),\n (14,16),\n (1,8),\n (8,9),\n (9,10),\n (1,11),\n (11,12),\n (12,13),\n (8,11)\n ]\n\n if modify:\n img_ = img\n else:\n img_ = np.copy(img)\n\n h,w = img.shape[:2]\n\n for i,j in LINKS:\n c0,r0,conf0 = keypts[i]\n c1,r1,conf1 = keypts[j]\n r0,r1 = [max(0,min(h-1,int(v))) for v in [r0,r1]]\n c0,c1 = [max(0,min(w-1,int(v))) for v in [c0,c1]]\n if conf0 > 0 and conf1 > 0:\n rr,cc,val = skdraw.draw.line_aa(r0,c0,r1,c1)\n for k in range(3):\n img_[rr,cc,k] = val*line_color[k]\n\n num_keypts = keypts.shape[0]\n for i in range(num_keypts):\n c,r,conf = keypts[i]\n if conf==0.0:\n continue\n \n rr,cc = skdraw.circle(r,c,radius,img_.shape[:2])\n for k in range(3):\n img_[rr,cc,k] = pt_color[k]\n\n return img_\n\n\n",
"##### This script will refine the predictions based on detected object by the object detector. Following by the work of https://github.com/vt-vl-lab/iCAN#######\n\n\nimport numpy as np\nimport pickle\n\nwith open('../infos/prior.pickle', 'rb') as fp: priors = pickle.load(fp, encoding='bytes')\n\n\ndef apply_prior(Object, prediction_HOI_in):\n prediction_HOI = np.ones(prediction_HOI_in.shape)\n for index, prediction in enumerate(prediction_HOI):\n prediction_HOI[index] = priors[int(Object[index])]\n return prediction_HOI\n\n\nif __name__ == '__main__':\n res = {}\n for k in range(80):\n prediction_HOI = np.ones((1, 29))\n res[k] = apply_prior([k], prediction_HOI)\n\n\n\n"
] | [
[
"numpy.maximum",
"numpy.minimum",
"numpy.stack",
"numpy.copy",
"numpy.logical_and",
"numpy.zeros"
],
[
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PacktPublishing/Python-Machine-Learning-Solutions-V- | [
"130c9881757fa90bbb124d48ddd0c6c1136fa20c"
] | [
"Section_07_code/speech_recognizer.py"
] | [
"import os\nimport argparse\nimport warnings\nimport numpy as np\nfrom scipy.io import wavfile\nfrom hmmlearn import hmm\nfrom python_speech_features import mfcc\n\n# Function to parse input arguments\ndef build_arg_parser():\n parser = argparse.ArgumentParser(description='Trains the HMM classifier')\n parser.add_argument(\"--input-folder\", dest=\"input_folder\", required=True,\n help=\"Input folder containing the audio files in subfolders\")\n return parser\n\n\n# Class to handle all HMM related processing\nclass HMMTrainer(object):\n def __init__(self, model_name='GaussianHMM', n_components=4, cov_type='diag', n_iter=1000):\n self.model_name = model_name\n self.n_components = n_components\n self.cov_type = cov_type\n self.n_iter = n_iter\n self.models = []\n\n if self.model_name == 'GaussianHMM':\n self.model = hmm.GaussianHMM(n_components=self.n_components,\n covariance_type=self.cov_type, n_iter=self.n_iter)\n else:\n raise TypeError('Invalid model type')\n\n # X is a 2D numpy array where each row is 13D\n def train(self, X):\n np.seterr(all='ignore')\n self.models.append(self.model.fit(X))\n\n # Run the model on input data\n def get_score(self, input_data):\n return self.model.score(input_data)\n\nif __name__=='__main__':\n args = build_arg_parser().parse_args()\n input_folder = args.input_folder\n\n hmm_models = []\n\n # Parse the input directory\n for dirname in os.listdir(input_folder):\n # Get the name of the subfolder\n subfolder = os.path.join(input_folder, dirname)\n\n if not os.path.isdir(subfolder):\n continue\n\n # Extract the label\n label = subfolder[subfolder.rfind('/') + 1:]\n\n # Initialize variables\n X = np.array([])\n y_words = []\n warnings.filterwarnings(\"ignore\")\n # Iterate through the audio files (leaving 1 file for testing in each class)\n for filename in [x for x in os.listdir(subfolder) if x.endswith('.wav')][:-1]:\n # Read the input file\n filepath = os.path.join(subfolder, filename)\n sampling_freq, audio = wavfile.read(filepath)\n\n # Extract MFCC features\n mfcc_features = mfcc(audio, sampling_freq)\n\n # Append to the variable X\n if len(X) == 0:\n X = mfcc_features\n else:\n X = np.append(X, mfcc_features, axis=0)\n\n # Append the label\n y_words.append(label)\n\n #print('X.shape =', X.shape)\n # Train and save HMM model\n hmm_trainer = HMMTrainer()\n hmm_trainer.train(X)\n hmm_models.append((hmm_trainer, label))\n hmm_trainer = None\n\n # Test files\n input_files = [\n 'data/pineapple/pineapple15.wav',\n 'data/orange/orange15.wav',\n 'data/apple/apple15.wav',\n 'data/kiwi/kiwi15.wav'\n ]\n\n # Classify input data\n for input_file in input_files:\n # Read input file\n sampling_freq, audio = wavfile.read(input_file)\n\n # Extract MFCC features\n mfcc_features = mfcc(audio, sampling_freq)\n\n # Define variables\n max_score = [float(\"-inf\")]\n output_label = [float(\"-inf\")]\n\n # Iterate through all HMM models and pick\n # the one with the highest score\n for item in hmm_models:\n hmm_model, label = item\n score = hmm_model.get_score(mfcc_features)\n if score > max_score:\n max_score = score\n output_label = label\n\n # Print the output\n print( \"\\nTrue:\", input_file[input_file.find('/')+1:input_file.rfind('/')])\n print(\"Predicted:\", output_label)\n warnings.filterwarnings(\"ignore\")\n"
] | [
[
"numpy.seterr",
"numpy.append",
"numpy.array",
"scipy.io.wavfile.read"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
baklanovp/pystella | [
"f6f44ed12d9648585a52a09e15d494daa4c70c59",
"f6f44ed12d9648585a52a09e15d494daa4c70c59"
] | [
"tests/test_reader_table.py",
"tests/test_velocity.py"
] | [
"# coding=utf-8\nimport numpy as np\nimport unittest\n\nimport pystella as ps\n# from pystella.rf import band\n# from pystella.rf.lc import LightCurve\n# from pystella.util.reader_table import read_table_header_float, table2curves, read_obs_table_header, curves2table\n\n__author__ = 'bakl'\n\n\ndef lc_create(b, m=-19, dt=0.):\n n = 10\n time = np.linspace(0. + dt, 200. + dt, n)\n mags = m * np.ones(n)\n return ps.LightCurve(b, time, mags)\n\n\nclass TestReaderTable(unittest.TestCase):\n def test_read_table_header_float(self):\n fname = 'data/stella/cat_R500_M15_Ni006_E12.gri'\n data = ps.util.read_table_header_float(fname)\n cols = len(data.dtype.names)\n self.assertTrue(cols == 15,\n msg=\"The number of colums in the data should be 15, but it's : %d.\" % cols)\n\n def test_read_table_header_float_skiprows(self):\n fname = 'data/stella/rednova_R3.2_M6_Ni0_E0.25.tt'\n data = ps.util.read_table_header_float(fname, skip=87)\n cols = len(data.dtype.names)\n self.assertTrue(cols == 14,\n msg=\"The number of colums in [%s] should be 14, but it's : %d.\" % (fname, cols))\n\n def test_table2curves_no_bands(self):\n ps.Band.load_settings()\n fname = 'data/stella/rednova_R3.2_M6_Ni0_E0.25.tt'\n data = ps.util.read_table_header_float(fname, skip=87)\n data.dtype.names = [col.replace('M', '') for col in data.dtype.names]\n curves = ps.table2curves('test', data)\n for bname in curves.BandNames:\n self.assertTrue(bname in data.dtype.names,\n msg=\"No band %s in [%s] after table2curves.\" % (bname, ''.join(data.dtype.names)))\n\n def test_curves2table(self):\n ps.Band.load_settings()\n fname = 'data/stella/rednova_R3.2_M6_Ni0_E0.25.tt'\n data = ps.util.read_table_header_float(fname, skip=87)\n data.dtype.names = [col.replace('M', '') for col in data.dtype.names]\n curves = ps.table2curves('test', data, is_filter_zero=False)\n tbl = ps.curves2table(curves)\n self.assertCountEqual(curves.Length, len(tbl.names))\n\n def test_read_obs_table_header(self):\n fname = 'data/obs/1999em-uphHamuy.dat'\n tbl, cols_data = ps.util.read_obs_table_header(fname, is_out=True)\n for c in ('JD', 'V'):\n self.assertTrue(c in tbl.dtype.names,\n msg=\"No band %s in [%s] after read_obs_table_header.\" % (c, ','.join(tbl.dtype.names)))\n",
"import os\nimport unittest\nfrom os.path import dirname, abspath, join\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nfrom matplotlib import interactive\n\nfrom pystella import velocity as vel\n\n# interactive(True)\n\n\n__author__ = 'bakl'\n\n\nclass TestVelocity(unittest.TestCase):\n def setUp(self):\n self.mname = 'cat_R1000_M15_Ni007_E15'\n self.path = join(dirname(abspath(__file__)), 'data', 'stella')\n\n # @unittest.skip(\"just for plot\")\n def test_velocity_ttres(self):\n fig = plt.figure(num=None, figsize=(12, 8), dpi=100, facecolor='w', edgecolor='k')\n gs1 = gridspec.GridSpec(4, 1)\n plt.matplotlib.rcParams.update({'font.size': 14})\n\n nm, path = self.mname, self.path\n # nm = 'rn300_R10_M3Mht3t30_Ni0_E003wAq2e3'\n # path = '~/Sn/Release/svn_kepler/stella/branches/lucy/run/res/sncurve/rednovaM31/tt/'\n print(nm, os.path.expanduser(path))\n vels = vel.compute_vel_res_tt(nm, os.path.expanduser(path), is_new_std=False)\n\n ax = fig.add_subplot(gs1[:, 0])\n vel.plot_vel(ax, vels)\n plt.show() # @unittest.skip(\"just for plot\")\n\n def test_velocity_ttres_is_new_std(self):\n fig = plt.figure(num=None, figsize=(12, 8), dpi=100, facecolor='w', edgecolor='k')\n gs1 = gridspec.GridSpec(4, 1)\n plt.matplotlib.rcParams.update({'font.size': 14})\n\n nm, path = self.mname, self.path\n # nm = 'rn300_R10_M3Mht3t30_Ni0_E003wAq2e3'\n # path = '~/Sn/Release/svn_kepler/stella/branches/lucy/run/res/sncurve/rednovaM31/tt/'\n nm, path = 'nirefE5R50M26Ni3m2b2m4Z01', '/home/bakl/Sn/Release/seb_git/run/87a/2fit/tmp'\n print(nm, os.path.expanduser(path))\n vels = vel.compute_vel_res_tt(nm, os.path.expanduser(path), is_new_std=True)\n\n ax = fig.add_subplot(gs1[:, 0])\n vel.plot_vel(ax, vels)\n plt.show()\n\n def test_velocity_swd(self):\n fig = plt.figure(num=None, figsize=(12, 8), dpi=100, facecolor='w', edgecolor='k')\n gs1 = gridspec.GridSpec(4, 1)\n plt.matplotlib.rcParams.update({'font.size': 14})\n\n nm, path = self.mname, self.path\n # nm = 'rn300_R10_M3Mht3t30_Ni0_E003wAq2e3'\n # path = '~/Sn/Release/svn_kepler/stella/branches/lucy/run/res/sncurve/rednovaM31/tt/'\n vels = vel.compute_vel_swd(nm, os.path.expanduser(path))\n\n ax = fig.add_subplot(gs1[:, 0])\n vel.plot_vel(ax, vels)\n plt.show()\n\n def test_velocity_compare_ttresVSswd(self):\n nm, path = self.mname, self.path\n # nm = 'rn300_R10_M3Mht3t30_Ni0_E003wAq2e3'\n # path = '~/Sn/Release/svn_kepler/stella/branches/lucy/run/res/sncurve/rednovaM31/tt/'\n vels_tt = vel.compute_vel_res_tt(nm, os.path.expanduser(path))\n vels_swd = vel.compute_vel_swd(nm, os.path.expanduser(path))\n\n plt.matplotlib.rcParams.update({'font.size': 14})\n fig = plt.figure(num=None, figsize=(12, 8), dpi=100, facecolor='w', edgecolor='k')\n gs1 = gridspec.GridSpec(4, 1)\n ax = fig.add_subplot(gs1[:, 0])\n # fig = plt.figure(figsize=(20, 10))\n # ax = fig.add_axes((0.1, 0.3, 0.8, 0.65))\n\n vel.plot_vel(ax, vels_tt, label='tt')\n vel.plot_vel(ax, vels_swd, color='green', label='swd')\n\n ax.legend()\n plt.grid(linestyle=':', linewidth=1)\n plt.show()\n\n\ndef main():\n unittest.main()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.linspace",
"numpy.ones"
],
[
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.matplotlib.rcParams.update",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jreback/ibis | [
"fdcca59b085416b1311eb268be3886abad1db230",
"fdcca59b085416b1311eb268be3886abad1db230"
] | [
"ibis/backends/clickhouse/tests/test_functions.py",
"ibis/backends/pandas/tests/test_udf.py"
] | [
"import math\nimport operator\nfrom datetime import date, datetime\nfrom operator import methodcaller\n\nimport pandas as pd\nimport pandas.testing as tm\nimport pytest\nfrom pytest import param\n\nimport ibis\nimport ibis.expr.datatypes as dt\nimport ibis.expr.types as ir\nfrom ibis import literal as L\n\nclickhouse_driver = pytest.importorskip('clickhouse_driver')\npytestmark = pytest.mark.clickhouse\n\n\[email protected](\n ('to_type', 'expected'),\n [\n ('int8', 'CAST(`double_col` AS Int8)'),\n ('int16', 'CAST(`double_col` AS Int16)'),\n ('float', 'CAST(`double_col` AS Float32)'),\n # alltypes.double_col is non-nullable\n (dt.Double(nullable=False), '`double_col`'),\n ],\n)\ndef test_cast_double_col(alltypes, translate, to_type, expected):\n expr = alltypes.double_col.cast(to_type)\n assert translate(expr) == expected\n\n\[email protected](\n ('to_type', 'expected'),\n [\n ('int8', 'CAST(`string_col` AS Int8)'),\n ('int16', 'CAST(`string_col` AS Int16)'),\n (dt.String(nullable=False), '`string_col`'),\n ('timestamp', 'CAST(`string_col` AS DateTime)'),\n ('date', 'CAST(`string_col` AS Date)'),\n ],\n)\ndef test_cast_string_col(alltypes, translate, to_type, expected):\n expr = alltypes.string_col.cast(to_type)\n assert translate(expr) == expected\n\n\[email protected](\n raises=AssertionError, reason='Clickhouse doesn\\'t have decimal type'\n)\ndef test_decimal_cast():\n assert False\n\n\[email protected](\n 'column',\n [\n 'index',\n 'Unnamed: 0',\n 'id',\n 'bool_col',\n 'tinyint_col',\n 'smallint_col',\n 'int_col',\n 'bigint_col',\n 'float_col',\n 'double_col',\n 'date_string_col',\n 'string_col',\n 'timestamp_col',\n 'year',\n 'month',\n ],\n)\ndef test_noop_cast(alltypes, translate, column):\n col = alltypes[column]\n result = col.cast(col.type())\n assert result.equals(col)\n assert translate(result) == '`{}`'.format(column)\n\n\ndef test_timestamp_cast_noop(alltypes, translate):\n target = dt.Timestamp(nullable=False)\n result1 = alltypes.timestamp_col.cast(target)\n result2 = alltypes.int_col.cast(target)\n\n assert isinstance(result1, ir.TimestampColumn)\n assert isinstance(result2, ir.TimestampColumn)\n\n assert translate(result1) == '`timestamp_col`'\n assert translate(result2) == 'CAST(`int_col` AS DateTime)'\n\n\ndef test_timestamp_now(con, translate):\n expr = ibis.now()\n # now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n assert translate(expr) == 'now()'\n # assert con.execute(expr) == now\n\n\[email protected](\n ('unit', 'expected'),\n [\n ('y', '2009-01-01'),\n param('m', '2009-05-01', marks=pytest.mark.xfail),\n ('d', '2009-05-17'),\n ('w', '2009-05-11'),\n ('h', '2009-05-17 12:00:00'),\n ('minute', '2009-05-17 12:34:00'),\n ],\n)\ndef test_timestamp_truncate(con, translate, unit, expected):\n stamp = ibis.timestamp('2009-05-17 12:34:56')\n expr = stamp.truncate(unit)\n assert con.execute(expr) == pd.Timestamp(expected)\n\n\[email protected](\n ('func', 'expected'),\n [\n (methodcaller('year'), 2015),\n (methodcaller('month'), 9),\n (methodcaller('day'), 1),\n (methodcaller('hour'), 14),\n (methodcaller('minute'), 48),\n (methodcaller('second'), 5),\n ],\n)\ndef test_simple_datetime_operations(con, func, expected):\n value = ibis.timestamp('2015-09-01 14:48:05.359')\n with pytest.raises(ValueError):\n con.execute(func(value))\n\n value = ibis.timestamp('2015-09-01 14:48:05')\n con.execute(func(value)) == expected\n\n\[email protected](('value', 'expected'), [(0, None), (5.5, 5.5)])\ndef test_nullifzero(con, value, expected):\n result = con.execute(L(value).nullifzero())\n if expected is None:\n assert pd.isnull(result)\n else:\n assert result == expected\n\n\[email protected](\n ('expr', 'expected'),\n [\n (L(None).isnull(), True),\n (L(1).isnull(), False),\n (L(None).notnull(), False),\n (L(1).notnull(), True),\n ],\n)\ndef test_isnull_notnull(con, expr, expected):\n assert con.execute(expr) == expected\n\n\[email protected](\n ('expr', 'expected'),\n [\n (ibis.coalesce(5, None, 4), 5),\n (ibis.coalesce(ibis.NA, 4, ibis.NA), 4),\n (ibis.coalesce(ibis.NA, ibis.NA, 3.14), 3.14),\n ],\n)\ndef test_coalesce(con, expr, expected):\n assert con.execute(expr) == expected\n\n\[email protected](\n ('expr', 'expected'),\n [\n (ibis.NA.fillna(5), 5),\n (L(5).fillna(10), 5),\n (L(5).nullif(5), None),\n (L(10).nullif(5), 10),\n ],\n)\ndef test_fillna_nullif(con, expr, expected):\n result = con.execute(expr)\n if expected is None:\n assert pd.isnull(result)\n else:\n assert result == expected\n\n\[email protected](\n ('value', 'expected'),\n [\n (L('foo_bar'), 'String'),\n (L(5), 'UInt8'),\n (L(1.2345), 'Float64'),\n (L(datetime(2015, 9, 1, hour=14, minute=48, second=5)), 'DateTime'),\n (L(date(2015, 9, 1)), 'Date'),\n param(\n ibis.NA,\n 'Null',\n marks=pytest.mark.xfail(\n raises=AssertionError,\n reason=(\n 'Client/server version mismatch not handled in the '\n 'clickhouse driver'\n ),\n ),\n ),\n ],\n)\ndef test_typeof(con, value, expected):\n assert con.execute(value.typeof()) == expected\n\n\[email protected](('value', 'expected'), [('foo_bar', 7), ('', 0)])\ndef test_string_length(con, value, expected):\n assert con.execute(L(value).length()) == expected\n\n\[email protected](\n ('op', 'expected'),\n [\n (methodcaller('substr', 0, 3), 'foo'),\n (methodcaller('substr', 4, 3), 'bar'),\n (methodcaller('substr', 1), 'oo_bar'),\n ],\n)\ndef test_string_substring(con, op, expected):\n value = L('foo_bar')\n assert con.execute(op(value)) == expected\n\n\ndef test_string_column_substring(con, alltypes, translate):\n expr = alltypes.string_col.substr(2)\n assert translate(expr) == 'substring(`string_col`, 2 + 1)'\n assert len(con.execute(expr))\n\n expr = alltypes.string_col.substr(0, 3)\n assert translate(expr) == 'substring(`string_col`, 0 + 1, 3)'\n assert len(con.execute(expr))\n\n\ndef test_string_reverse(con):\n assert con.execute(L('foo').reverse()) == 'oof'\n\n\ndef test_string_upper(con):\n assert con.execute(L('foo').upper()) == 'FOO'\n\n\ndef test_string_lower(con):\n assert con.execute(L('FOO').lower()) == 'foo'\n\n\ndef test_string_lenght(con):\n assert con.execute(L('FOO').length()) == 3\n\n\[email protected](\n ('value', 'op', 'expected'),\n [\n (L('foobar'), methodcaller('contains', 'bar'), True),\n (L('foobar'), methodcaller('contains', 'foo'), True),\n (L('foobar'), methodcaller('contains', 'baz'), False),\n (L('100%'), methodcaller('contains', '%'), True),\n (L('a_b_c'), methodcaller('contains', '_'), True),\n ],\n)\ndef test_string_contains(con, op, value, expected):\n assert con.execute(op(value)) == expected\n\n\n# TODO: clickhouse-driver escaping bug\ndef test_re_replace(con, translate):\n expr1 = L('Hello, World!').re_replace('.', '\\\\\\\\0\\\\\\\\0')\n expr2 = L('Hello, World!').re_replace('^', 'here: ')\n\n assert con.execute(expr1) == 'HHeelllloo,, WWoorrlldd!!'\n assert con.execute(expr2) == 'here: Hello, World!'\n\n\[email protected](\n ('value', 'expected'),\n [(L('a'), 0), (L('b'), 1), (L('d'), -1)], # TODO: what's the expected?\n)\ndef test_find_in_set(con, value, expected, translate):\n vals = list('abc')\n expr = value.find_in_set(vals)\n assert con.execute(expr) == expected\n\n\ndef test_string_column_find_in_set(con, alltypes, translate):\n s = alltypes.string_col\n vals = list('abc')\n\n expr = s.find_in_set(vals)\n assert translate(expr) == \"indexOf(['a','b','c'], `string_col`) - 1\"\n assert len(con.execute(expr))\n\n\[email protected](\n ('url', 'extract', 'expected'),\n [\n (L('https://www.cloudera.com'), 'HOST', 'www.cloudera.com'),\n (L('https://www.cloudera.com'), 'PROTOCOL', 'https'),\n (\n L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10'),\n 'PATH',\n '/watch',\n ),\n (\n L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10'),\n 'QUERY',\n 'v=kEuEcWfewf8&t=10',\n ),\n ],\n)\ndef test_parse_url(con, translate, url, extract, expected):\n expr = url.parse_url(extract)\n assert con.execute(expr) == expected\n\n\ndef test_parse_url_query_parameter(con, translate):\n url = L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10')\n expr = url.parse_url('QUERY', 't')\n assert con.execute(expr) == '10'\n\n expr = url.parse_url('QUERY', 'v')\n assert con.execute(expr) == 'kEuEcWfewf8'\n\n\[email protected](\n ('expr', 'expected'),\n [\n (L('foobar').find('bar'), 3),\n (L('foobar').find('baz'), -1),\n (L('foobar').like('%bar'), True),\n (L('foobar').like('foo%'), True),\n (L('foobar').like('%baz%'), False),\n (L('foobar').like(['%bar']), True),\n (L('foobar').like(['foo%']), True),\n (L('foobar').like(['%baz%']), False),\n (L('foobar').like(['%bar', 'foo%']), True),\n (L('foobarfoo').replace('foo', 'H'), 'HbarH'),\n ],\n)\ndef test_string_find_like(con, expr, expected):\n assert con.execute(expr) == expected\n\n\ndef test_string_column_like(con, alltypes, translate):\n expr = alltypes.string_col.like('foo%')\n assert translate(expr) == \"`string_col` LIKE 'foo%'\"\n assert len(con.execute(expr))\n\n expr = alltypes.string_col.like(['foo%', '%bar'])\n expected = \"`string_col` LIKE 'foo%' OR `string_col` LIKE '%bar'\"\n assert translate(expr) == expected\n assert len(con.execute(expr))\n\n\ndef test_string_column_find(con, alltypes, translate):\n s = alltypes.string_col\n\n expr = s.find('a')\n assert translate(expr) == \"position(`string_col`, 'a') - 1\"\n assert len(con.execute(expr))\n\n expr = s.find(s)\n assert translate(expr) == \"position(`string_col`, `string_col`) - 1\"\n assert len(con.execute(expr))\n\n\[email protected](\n ('call', 'expected'),\n [\n (methodcaller('log'), 'log(`double_col`)'),\n (methodcaller('log2'), 'log2(`double_col`)'),\n (methodcaller('log10'), 'log10(`double_col`)'),\n (methodcaller('round'), 'round(`double_col`)'),\n (methodcaller('round', 0), 'round(`double_col`, 0)'),\n (methodcaller('round', 2), 'round(`double_col`, 2)'),\n (methodcaller('exp'), 'exp(`double_col`)'),\n (methodcaller('abs'), 'abs(`double_col`)'),\n (methodcaller('ceil'), 'ceil(`double_col`)'),\n (methodcaller('floor'), 'floor(`double_col`)'),\n (methodcaller('sqrt'), 'sqrt(`double_col`)'),\n (\n methodcaller('sign'),\n 'intDivOrZero(`double_col`, abs(`double_col`))',\n ),\n ],\n)\ndef test_translate_math_functions(con, alltypes, translate, call, expected):\n expr = call(alltypes.double_col)\n assert translate(expr) == expected\n assert len(con.execute(expr))\n\n\[email protected](\n ('expr', 'expected'),\n [\n (L(-5).abs(), 5),\n (L(5).abs(), 5),\n (L(5.5).round(), 6.0),\n (L(5.556).round(2), 5.56),\n (L(5.556).ceil(), 6.0),\n (L(5.556).floor(), 5.0),\n (L(5.556).exp(), math.exp(5.556)),\n (L(5.556).sign(), 1),\n (L(-5.556).sign(), -1),\n (L(0).sign(), 0),\n (L(5.556).sqrt(), math.sqrt(5.556)),\n (L(5.556).log(2), math.log(5.556, 2)),\n (L(5.556).ln(), math.log(5.556)),\n (L(5.556).log2(), math.log(5.556, 2)),\n (L(5.556).log10(), math.log10(5.556)),\n ],\n)\ndef test_math_functions(con, expr, expected, translate):\n assert con.execute(expr) == expected\n\n\ndef test_greatest(con, alltypes, translate):\n expr = ibis.greatest(alltypes.int_col, 10)\n\n assert translate(expr) == \"greatest(`int_col`, 10)\"\n assert len(con.execute(expr))\n\n expr = ibis.greatest(alltypes.int_col, alltypes.bigint_col)\n assert translate(expr) == \"greatest(`int_col`, `bigint_col`)\"\n assert len(con.execute(expr))\n\n\ndef test_least(con, alltypes, translate):\n expr = ibis.least(alltypes.int_col, 10)\n assert translate(expr) == \"least(`int_col`, 10)\"\n assert len(con.execute(expr))\n\n expr = ibis.least(alltypes.int_col, alltypes.bigint_col)\n assert translate(expr) == \"least(`int_col`, `bigint_col`)\"\n assert len(con.execute(expr))\n\n\n# TODO: clickhouse-driver escaping bug\[email protected](\n ('expr', 'expected'),\n [\n (L('abcd').re_search('[a-z]'), True),\n (L('abcd').re_search(r'[\\\\d]+'), False),\n (L('1222').re_search(r'[\\\\d]+'), True),\n ],\n)\ndef test_regexp(con, expr, expected):\n assert con.execute(expr) == expected\n\n\[email protected](\n ('expr', 'expected'),\n [\n (L('abcd').re_extract('([a-z]+)', 0), 'abcd'),\n # (L('abcd').re_extract('(ab)(cd)', 1), 'cd'),\n # valid group number but no match => empty string\n (L('abcd').re_extract(r'(\\\\d)', 0), ''),\n # match but not a valid group number => NULL\n # (L('abcd').re_extract('abcd', 3), None),\n ],\n)\ndef test_regexp_extract(con, expr, expected, translate):\n assert con.execute(expr) == expected\n\n\ndef test_column_regexp_extract(con, alltypes, translate):\n expected = r\"extractAll(`string_col`, '[\\d]+')[3 + 1]\"\n\n expr = alltypes.string_col.re_extract(r'[\\d]+', 3)\n assert translate(expr) == expected\n assert len(con.execute(expr))\n\n\ndef test_column_regexp_replace(con, alltypes, translate):\n expected = r\"replaceRegexpAll(`string_col`, '[\\d]+', 'aaa')\"\n\n expr = alltypes.string_col.re_replace(r'[\\d]+', 'aaa')\n assert translate(expr) == expected\n assert len(con.execute(expr))\n\n\ndef test_numeric_builtins_work(con, alltypes, df, translate):\n expr = alltypes.double_col\n result = expr.execute()\n expected = df.double_col.fillna(0)\n tm.assert_series_equal(result, expected)\n\n\ndef test_null_column(alltypes, translate):\n t = alltypes\n nrows = t.count().execute()\n expr = t.mutate(na_column=ibis.NA).na_column\n result = expr.execute()\n expected = pd.Series([None] * nrows, name='na_column')\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n ('attr', 'expected'),\n [\n (operator.methodcaller('year'), {2009, 2010}),\n (operator.methodcaller('month'), set(range(1, 13))),\n (operator.methodcaller('day'), set(range(1, 32))),\n ],\n)\ndef test_date_extract_field(db, alltypes, attr, expected):\n t = alltypes\n expr = attr(t.timestamp_col.cast('date')).distinct()\n result = expr.execute().astype(int)\n assert set(result) == expected\n\n\ndef test_timestamp_from_integer(con, alltypes, translate):\n # timestamp_col has datetime type\n expr = alltypes.int_col.to_timestamp()\n assert translate(expr) == 'toDateTime(`int_col`)'\n assert len(con.execute(expr))\n\n\ndef test_count_distinct_with_filter(alltypes):\n expr = alltypes.string_col.nunique(\n where=alltypes.string_col.cast('int64') > 1\n )\n result = expr.execute()\n expected = alltypes.string_col.execute()\n expected = expected[expected.astype('int64') > 1].nunique()\n assert result == expected\n",
"import collections\n\nimport numpy as np\nimport pandas as pd\nimport pandas.testing as tm\nimport pytest\n\nimport ibis\nimport ibis.expr.datatypes as dt\nimport ibis.expr.types as ir\n\nfrom .. import connect\nfrom ..udf import nullable, udf\n\n\[email protected]\ndef df():\n return pd.DataFrame(\n {\n 'a': list('abc'),\n 'b': [1, 2, 3],\n 'c': [4.0, 5.0, 6.0],\n 'key': list('aab'),\n }\n )\n\n\[email protected]\ndef df2():\n return pd.DataFrame(\n {\n 'a': np.arange(4, dtype=float).tolist()\n + np.random.rand(3).tolist(),\n 'b': np.arange(4, dtype=float).tolist()\n + np.random.rand(3).tolist(),\n 'c': np.arange(7, dtype=int).tolist(),\n 'key': list('ddeefff'),\n }\n )\n\n\[email protected]\ndef con(df, df2):\n return connect({'df': df, 'df2': df2})\n\n\[email protected]\ndef t(con):\n return con.table('df')\n\n\[email protected]\ndef t2(con):\n return con.table('df2')\n\n\[email protected](input_type=['string'], output_type='int64')\ndef my_string_length(series, **kwargs):\n return series.str.len() * 2\n\n\[email protected](input_type=[dt.double, dt.double], output_type=dt.double)\ndef my_add(series1, series2, **kwargs):\n return series1 + series2\n\n\[email protected](['double'], 'double')\ndef my_mean(series):\n return series.mean()\n\n\[email protected](input_type=[dt.string], output_type=dt.int64)\ndef my_string_length_sum(series, **kwargs):\n return (series.str.len() * 2).sum()\n\n\[email protected](input_type=[dt.double, dt.double], output_type=dt.double)\ndef my_corr(lhs, rhs, **kwargs):\n return lhs.corr(rhs)\n\n\[email protected]([dt.double], dt.double)\ndef add_one(x):\n return x + 1.0\n\n\[email protected]([dt.double], dt.double)\ndef times_two(x):\n return x * 2.0\n\n\[email protected](input_type=['double'], output_type='double')\ndef zscore(series):\n return (series - series.mean()) / series.std()\n\n\[email protected](\n input_type=[dt.double], output_type=dt.Array(dt.double),\n)\ndef quantiles(series, *, quantiles):\n return list(series.quantile(quantiles))\n\n\ndef test_udf(t, df):\n expr = my_string_length(t.a)\n\n assert isinstance(expr, ir.ColumnExpr)\n\n result = expr.execute()\n expected = df.a.str.len().mul(2)\n tm.assert_series_equal(result, expected)\n\n\ndef test_elementwise_udf_with_non_vectors(con):\n expr = my_add(1.0, 2.0)\n result = con.execute(expr)\n assert result == 3.0\n\n\ndef test_multiple_argument_udf(con, t, df):\n expr = my_add(t.b, t.c)\n\n assert isinstance(expr, ir.ColumnExpr)\n assert isinstance(expr, ir.NumericColumn)\n assert isinstance(expr, ir.FloatingColumn)\n\n result = expr.execute()\n expected = df.b + df.c\n tm.assert_series_equal(result, expected)\n\n\ndef test_multiple_argument_udf_group_by(con, t, df):\n expr = t.groupby(t.key).aggregate(my_add=my_add(t.b, t.c).sum())\n\n assert isinstance(expr, ir.TableExpr)\n assert isinstance(expr.my_add, ir.ColumnExpr)\n assert isinstance(expr.my_add, ir.NumericColumn)\n assert isinstance(expr.my_add, ir.FloatingColumn)\n\n result = expr.execute()\n expected = pd.DataFrame(\n {'key': list('ab'), 'my_add': [sum([1.0 + 4.0, 2.0 + 5.0]), 3.0 + 6.0]}\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_udaf(con, t, df):\n expr = my_string_length_sum(t.a)\n\n assert isinstance(expr, ir.ScalarExpr)\n\n result = expr.execute()\n expected = t.a.execute().str.len().mul(2).sum()\n assert result == expected\n\n\ndef test_udaf_analytic(con, t, df):\n expr = zscore(t.c)\n\n assert isinstance(expr, ir.ColumnExpr)\n\n result = expr.execute()\n\n def f(s):\n return s.sub(s.mean()).div(s.std())\n\n expected = f(df.c)\n tm.assert_series_equal(result, expected)\n\n\ndef test_udaf_analytic_groupby(con, t, df):\n expr = zscore(t.c).over(ibis.window(group_by=t.key))\n\n assert isinstance(expr, ir.ColumnExpr)\n\n result = expr.execute()\n\n def f(s):\n return s.sub(s.mean()).div(s.std())\n\n expected = df.groupby('key').c.transform(f)\n tm.assert_series_equal(result, expected)\n\n\ndef test_udaf_groupby():\n df = pd.DataFrame(\n {\n 'a': np.arange(4, dtype=float).tolist()\n + np.random.rand(3).tolist(),\n 'b': np.arange(4, dtype=float).tolist()\n + np.random.rand(3).tolist(),\n 'key': list('ddeefff'),\n }\n )\n con = connect({'df': df})\n t = con.table('df')\n\n expr = t.groupby(t.key).aggregate(my_corr=my_corr(t.a, t.b))\n\n assert isinstance(expr, ir.TableExpr)\n\n result = expr.execute().sort_values('key')\n\n dfi = df.set_index('key')\n expected = pd.DataFrame(\n {\n 'key': list('def'),\n 'my_corr': [\n dfi.loc[value, 'a'].corr(dfi.loc[value, 'b'])\n for value in 'def'\n ],\n }\n )\n\n columns = ['key', 'my_corr']\n tm.assert_frame_equal(result[columns], expected[columns])\n\n\ndef test_nullable():\n t = ibis.table([('a', 'int64')])\n assert nullable(t.a.type()) == (type(None),)\n\n\ndef test_nullable_non_nullable_field():\n t = ibis.table([('a', dt.String(nullable=False))])\n assert nullable(t.a.type()) == ()\n\n\ndef test_udaf_parameter_mismatch():\n with pytest.raises(TypeError):\n\n @udf.reduction(input_type=[dt.double], output_type=dt.double)\n def my_corr(lhs, rhs, **kwargs):\n pass\n\n\ndef test_udf_parameter_mismatch():\n with pytest.raises(TypeError):\n\n @udf.reduction(input_type=[], output_type=dt.double)\n def my_corr2(lhs, **kwargs):\n pass\n\n\ndef test_udf_error(t):\n @udf.elementwise(input_type=[dt.double], output_type=dt.double)\n def error_udf(s):\n raise ValueError('xxx')\n\n with pytest.raises(ValueError):\n error_udf(t.c).execute()\n\n\ndef test_compose_udfs(t2, df2):\n expr = times_two(add_one(t2.a))\n result = expr.execute()\n expected = df2.a.add(1.0).mul(2.0)\n tm.assert_series_equal(expected, result)\n\n\ndef test_udaf_window(t2, df2):\n window = ibis.trailing_window(2, order_by='a', group_by='key')\n expr = t2.mutate(rolled=my_mean(t2.b).over(window))\n result = expr.execute().sort_values(['key', 'a'])\n expected = df2.sort_values(['key', 'a']).assign(\n rolled=lambda df: df.groupby('key')\n .b.rolling(3, min_periods=1)\n .mean()\n .reset_index(level=0, drop=True)\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_udaf_window_interval():\n df = pd.DataFrame(\n collections.OrderedDict(\n [\n (\n \"time\",\n pd.date_range(\n start='20190105', end='20190101', freq='-1D'\n ),\n ),\n (\"key\", [1, 2, 1, 2, 1]),\n (\"value\", np.arange(5)),\n ]\n )\n )\n\n con = connect({'df': df})\n t = con.table('df')\n window = ibis.trailing_range_window(\n ibis.interval(days=2), order_by='time', group_by='key'\n )\n\n expr = t.mutate(rolled=my_mean(t.value).over(window))\n\n result = expr.execute().sort_values(['time', 'key']).reset_index(drop=True)\n expected = (\n df.sort_values(['time', 'key'])\n .set_index('time')\n .assign(\n rolled=lambda df: df.groupby('key')\n .value.rolling('2D', closed='both')\n .mean()\n .reset_index(level=0, drop=True)\n )\n ).reset_index(drop=False)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiple_argument_udaf_window():\n # PR 2035\n\n @udf.reduction(['double', 'double'], 'double')\n def my_wm(v, w):\n return np.average(v, weights=w)\n\n df = pd.DataFrame(\n {\n 'a': np.arange(4, 0, dtype=float, step=-1).tolist()\n + np.random.rand(3).tolist(),\n 'b': np.arange(4, dtype=float).tolist()\n + np.random.rand(3).tolist(),\n 'c': np.arange(4, dtype=float).tolist()\n + np.random.rand(3).tolist(),\n 'd': np.repeat(1, 7),\n 'key': list('deefefd'),\n }\n )\n con = connect({'df': df})\n t = con.table('df')\n window = ibis.trailing_window(2, order_by='a', group_by='key')\n window2 = ibis.trailing_window(1, order_by='b', group_by='key')\n expr = t.mutate(\n wm_b=my_wm(t.b, t.d).over(window),\n wm_c=my_wm(t.c, t.d).over(window),\n wm_c2=my_wm(t.c, t.d).over(window2),\n )\n result = expr.execute().sort_values(['key', 'a'])\n expected = (\n df.sort_values(['key', 'a'])\n .assign(\n wm_b=lambda df: df.groupby('key')\n .b.rolling(3, min_periods=1)\n .mean()\n .reset_index(level=0, drop=True)\n )\n .assign(\n wm_c=lambda df: df.groupby('key')\n .c.rolling(3, min_periods=1)\n .mean()\n .reset_index(level=0, drop=True)\n )\n )\n expected = expected.sort_values(['key', 'b']).assign(\n wm_c2=lambda df: df.groupby('key')\n .c.rolling(2, min_periods=1)\n .mean()\n .reset_index(level=0, drop=True)\n )\n expected = expected.sort_values(['key', 'a'])\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_udaf_window_nan():\n df = pd.DataFrame(\n {\n 'a': np.arange(10, dtype=float),\n 'b': [3.0, np.NaN] * 5,\n 'key': list('ddeefffggh'),\n }\n )\n con = connect({'df': df})\n t = con.table('df')\n window = ibis.trailing_window(2, order_by='a', group_by='key')\n expr = t.mutate(rolled=my_mean(t.b).over(window))\n result = expr.execute().sort_values(['key', 'a'])\n expected = df.sort_values(['key', 'a']).assign(\n rolled=lambda d: d.groupby('key')\n .b.rolling(3, min_periods=1)\n .apply(lambda x: x.mean(), raw=True)\n .reset_index(level=0, drop=True)\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](params=[[0.25, 0.75], [0.01, 0.99]])\ndef qs(request):\n return request.param\n\n\ndef test_array_return_type_reduction(con, t, df, qs):\n expr = quantiles(t.b, quantiles=qs)\n result = expr.execute()\n expected = df.b.quantile(qs)\n assert result == expected.tolist()\n\n\ndef test_array_return_type_reduction_window(con, t, df, qs):\n expr = quantiles(t.b, quantiles=qs).over(ibis.window())\n result = expr.execute()\n expected_raw = df.b.quantile(qs).tolist()\n expected = pd.Series([expected_raw] * len(df))\n tm.assert_series_equal(result, expected)\n\n\ndef test_elementwise_udf_with_many_args(t2):\n @udf.elementwise(\n input_type=[dt.double] * 16 + [dt.int32] * 8, output_type=dt.double\n )\n def my_udf(\n c1,\n c2,\n c3,\n c4,\n c5,\n c6,\n c7,\n c8,\n c9,\n c10,\n c11,\n c12,\n c13,\n c14,\n c15,\n c16,\n c17,\n c18,\n c19,\n c20,\n c21,\n c22,\n c23,\n c24,\n ):\n return c1\n\n expr = my_udf(*([t2.a] * 8 + [t2.b] * 8 + [t2.c] * 8))\n result = expr.execute()\n expected = t2.a.execute()\n\n tm.assert_series_equal(result, expected)\n"
] | [
[
"pandas.Timestamp",
"pandas.testing.assert_series_equal",
"pandas.Series",
"pandas.isnull"
],
[
"pandas.testing.assert_series_equal",
"numpy.arange",
"pandas.testing.assert_frame_equal",
"numpy.random.rand",
"pandas.date_range",
"numpy.repeat",
"numpy.average"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
HugoPfister/Pyrats | [
"fc2cab0d1e14b8dd19b3eba361d47f053187ab47",
"fc2cab0d1e14b8dd19b3eba361d47f053187ab47"
] | [
"pyrats/halos.py",
"pyrats/scripts/Struc_To_hdf5.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"Module to deal with halos, to be used with HaloMaker.\n\nThis module is heavily inspired by the set of IDL routines originally\nfound in the Ramses Analysis ToolSuite (RATS).\n\nTODO: Some more documentation\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport yt\nfrom yt.utilities.logger import ytLogger as mylog\nimport yt.utilities.fortran_utils as fpu\nfrom yt.funcs import get_pbar\nimport os\nimport pandas as pd\n\n\nclass HaloList(object):\n def __init__(self, ds, folder='.', contam=False):\n \"\"\"\n PandaList with halos and their properties\n \"\"\"\n\n self.folder = folder\n self.iout = int(str(ds).split('_')[1])\n if os.path.exists(\n '{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}.hdf'.format(\n s=self)):\n self.halos = pd.read_hdf(\n '{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}.hdf'.format(\n s=self))\n else:\n self.halos = self._read_halos(data_set=ds, with_contam_option=contam)\n if self.halos.index.size > 0:\n self.halos.to_hdf(\n '{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}.hdf'.format(\n s=self), 'hdf')\n self.ds = ds\n\n self.halos['bhid'] = -1 ; self.halos['galID'] = -1\n self.halos['mgal'] = 0 ; self.halos['msink'] = 0\n # read purity of halos\n self.halos['pollution'] = 0\n contam_file_path = '{s.folder}/Halos/{s.iout}/contam_halos{s.iout:03d}'.format(\n s=self)\n if os.path.exists(contam_file_path):\n p = np.loadtxt(contam_file_path)\n if len(p) > 0:\n p = p.T\n self.halos.loc[p[0], 'pollution'] = p[1]/p[2]\n\n def get_halo(self, hid, fname=None):\n\n halo = self.halos.loc[hid]\n scale_mpc = float(self.ds.length_unit.in_units('Mpc'))\n\n halostr = (\"Halo {hid:.0f} (level {h.level:.0f}):\\n\"\n \"\\tContains {h.nbpart:.0f} particles and {h.nbsub:.0f} subhalo(s)\\n\"\n \"\\tCenter:\\t\\t ({h.x}, {h.y}, {h.z}) box units\\n\"\n \"\\tVelocity:\\t ({h.vx}, {h.vy}, {h.vz}) km/s\\n\"\n \"\\tL:\\t\\t ({h.Lx}, {h.Ly}, {h.Lz}) ToCheck\\n\"\n \"\\tMass:\\t\\t {h.m:.3e} Msun\\n\"\n \"\\tMvir:\\t\\t {h.mvir:.3e} Msun\\n\"\n \"\\tRadius:\\t\\t {h.r:.3e} Mpc ({rcodeunits:.3e} box units)\\n\"\n \"\\tRvir:\\t\\t {h.rvir:.3e} Mpc ({rvcodeunits:.3e} box units)\\n\"\n \"\\tTvir:\\t\\t {h.tvir:.3e} K\".format(hid=hid,\n h=halo,\n rcodeunits=halo.r / scale_mpc,\n rvcodeunits=halo.rvir / scale_mpc))\n\n if fname is not None:\n with open(fname, 'w') as f:\n f.write(halostr)\n\n return halostr\n\n def get_halo_sphere(self, hid, rvir_factor=5):\n halo_spheres = getattr(self, '_halo_spheres', {})\n if (hid, rvir_factor) in halo_spheres:\n return halo_spheres[hid, rvir_factor]\n\n tmp = self.halos.loc[hid, ['x', 'y', 'z', 'rvir', 'vx', 'vy', 'vz']]\\\n .values\n center = self.ds.arr(tmp[:3], 'code_length')\n radius = self.ds.arr(tmp[3] * rvir_factor, 'Mpc')\n vel = self.ds.arr(tmp[4:7], 'km/s')\n\n # Get a sphere centered on the halo\n sphere = self.ds.sphere(center, radius)\n sphere.set_field_parameter('bulk_velocity', vel)\n\n halo_spheres[(hid, rvir_factor)] = sphere\n self._halo_spheres = halo_spheres\n\n return sphere\n\n def plot_halo(self, hid, rvir_factor=5, field=('deposit', 'all_density'), folder='./',\n weight_field=('index', 'ones'), cmap='viridis', slice=False,\n axis='z', **kwargs):\n '''Plot a given halo.\n\n Parameters\n ----------\n * hid, integer\n The halo id to plot\n * rvir_factor, float, default=5\n Size of the region to plot in unit of Rvir\n\n * field, tuple\n The yt field to plot\n * folder, string\n The folder where to save the data\n * weight_field, tuple\n The field to weight the projection by.\n * cmap, string\n The colormap to use\n * slice, boolean\n If true, do a slice plot instead of a projection plot\n * axis, 'x', 'y' or 'z'\n The axis to project onto\n '''\n for k, v in kwargs.items():\n print('%s: %s not supported' % (k, v))\n\n if hid not in self.halos.index:\n mylog.error('%s not found.' % hid)\n return\n\n # Get position\n tmp = np.array(self.halos.loc[hid, ['x', 'y', 'z', 'rvir']])\n center = self.ds.arr(tmp[:3], 'code_length')\n radius = self.ds.arr(tmp[3] * rvir_factor, 'Mpc')\n\n # Get a sphere centered on the halo\n sphere = self.ds.sphere(center, radius)\n\n # Make a projection plot\n p = yt.ProjectionPlot(self.ds, axis, field, data_source=sphere,\n weight_field=weight_field)\n\n p.set_cmap(field=field, cmap=cmap)\n p.annotate_timestamp(corner='upper_left', time=True, redshift=True)\n p.annotate_scale(corner='upper_right')\n\n # TODO: annotate halos\n # TODO: better name\n p.save(folder)\n\n # Accessors\n def __getitem__(self, item):\n if str(item) in self.halos:\n return self.halos[item]\n else:\n return self.halos.ix[item]\n\n # def __getattr__(self, name):\n # return self.halos.__getattr__(name) # self.halos[name]\n\n def __len__(self):\n return len(self.halos)\n\n def __iter__(self):\n return self.halos.iterrows()\n\n # Printing functions\n def __str__(self):\n return self.halos.__str__()\n\n # Convenience functions\n def _read_halos(self, data_set, with_contam_option=False):\n halo_keys = ('ID', 'nbpart', 'level', 'min_part_id',\n 'host', 'hostsub', 'nbsub', 'nextsub',\n 'x', 'y', 'z', 'vx', 'vy', 'vz', 'Lx', 'Ly', 'Lz',\n 'a', 'b', 'c', 'ek', 'ep', 'et', 'rho0', 'r_c',\n 'spin', 'm', 'r', 'mvir', 'rvir', 'tvir', 'cvel')\n filename = '{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}'.format(\n s=self)\n\n data = np.empty(shape=(0, len(halo_keys)), dtype=object)\n yt.funcs.mylog.debug('Reading halo catalog %s (ds=%s)' % (filename, data_set))\n offsets = {}\n if os.path.exists(filename):\n with open(filename, 'rb') as f:\n [npart] = fpu.read_vector(f, 'i')\n [massp] = fpu.read_vector(f, 'f')\n [aexp] = fpu.read_vector(f, 'f')\n [omega_t] = fpu.read_vector(f, 'f')\n [age] = fpu.read_vector(f, 'f')\n [nhalos, nsubs] = fpu.read_vector(f, 'i')\n\n # Save the age/aexp, the mass of the particle,\n # as well as the number of (sub)halos\n self.nhalos = nhalos\n self.nsubs = nsubs\n self.aexp = aexp\n self.age = age\n self.massp = massp\n data = np.empty(shape=(nhalos + nsubs, len(halo_keys)), dtype=object)\n\n mylog.info('Brick: halos : %s' % nhalos)\n mylog.info('Brick: sub halos : %s' % nsubs)\n mylog.info('Brick: aexp : %s' % aexp)\n\n #pbar = get_pbar('', nhalos+nsubs)\n\n for ihalo in range(nhalos + nsubs):\n pos = f.tell()\n [nbpart] = fpu.read_vector(f, 'i') # Number of particles\n listp = fpu.read_vector(f, 'i') # List of the particles IDs\n [ID] = fpu.read_vector(f, 'i') # Halo ID\n fpu.skip(f, 1) # Skip timestep\n [level, host, hostsub, nbsub, nextsub] = fpu.read_vector(f, 'i')\n [m] = fpu.read_vector(f, 'f') # Total mass\n [x, y, z] = fpu.read_vector(f, 'f') # Center\n [vx, vy, vz] = fpu.read_vector(f, 'f') # Velocity\n [Lx, Ly, Lz] = fpu.read_vector(f, 'f') # Angular momentum\n [r, a, b, c] = fpu.read_vector(f, 'f') # Shape (ellipticity)\n [ek, ep, et] = fpu.read_vector(f, 'f') # Energetics\n [spin] = fpu.read_vector(f, 'f') # Total angular momentum\n [rvir, mvir, tvir, cvel] = fpu.read_vector(f, 'f') # Virial parameters\n [rho0, r_c] = fpu.read_vector(f, 'f') # NFW params\n\n if with_contam_option:\n [contam] = fpu.read_vector(f, 'i') # Contamination\n\n # Add the halo to the list\n # halos.loc[ihalo] = [ID, nbpart, level, listp.min(),\n # host, hostsub, nbsub, nextsub,\n # x, y, z, vx, vy, vz, Lx, Ly, Lz,\n # a, b, c, ek, ep, et, rho0, r_c,\n # spin, m, r, mvir, rvir, tvir, cvel]\n data[ihalo] = [ID, nbpart, level, listp.min(),\n host, hostsub, nbsub, nextsub,\n x, y, z, vx, vy, vz, Lx, Ly, Lz,\n a, b, c, ek, ep, et, rho0, r_c,\n spin, m, r, mvir, rvir, tvir, cvel]\n #pbar.update()\n offsets[ID] = pos\n\n print('')\n types = {}\n for k in ('ID', 'nbpart', 'level', 'min_part_id',\n 'host', 'hostsub', 'nbsub', 'nextsub'):\n types[k] = np.int64\n for k in ('x', 'y', 'z', 'vx', 'vy', 'vz', 'Lx', 'Ly', 'Lz',\n 'a', 'b', 'c', 'ek', 'ep', 'et', 'rho0', 'r_c',\n 'spin', 'm', 'r', 'mvir', 'rvir', 'tvir', 'cvel'):\n types[k] = np.float64\n dd = {k: data[:, i].astype(types[k])\n for i, k in enumerate(halo_keys)}\n\n halos = pd.DataFrame(dd)\n\n # Get properties in the right units\n # Masses\n halos.m *= 1e11\n halos.mvir *= 1e11\n # Positions and distances\n scale_mpc = float(data_set.length_unit.in_units('cm') / 3.08e24)\n halos.x = halos.x / scale_mpc + .5\n halos.y = halos.y / scale_mpc + .5\n halos.z = halos.z / scale_mpc + .5\n\n self.offsets = offsets\n\n\n return halos.set_index('ID')\n\n def get_halo_parts(self, hid):\n filename = '{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}'.format(\n s=self)\n with open(filename, 'br') as fd:\n fd.seek(self.offsets[hid])\n fpu.skip(fd, 1)\n listp = fpu.read_vector(fd, 'i')\n\n return listp\n",
"# This script converts files from the HaloFinder\n# into hdf5 readable files.\n\n#It also computes the Sersic index of all structures\n\n# Not all quantities are dumped, should be modified depending on what is needed\n# and the version of the HaloFinder\n\nimport numpy as np\nimport pandas as pd\nimport yt.utilities.fortran_utils as fpu\nfrom yt.utilities.logger import ytLogger as mylog\nfrom yt.funcs import get_pbar\nimport yt\nimport os\nfrom scipy.optimize import curve_fit\nimport pyrats\n\ndef main():\n output_folder = '../Outputs'\n tree_brick_folder = './AdaptaHOP'\n\n\n if not os.path.exists('hdf5'):\n os.system('mkdir hdf5')\n mylog.info('Making the folder hdf5')\n \n files = pyrats.utils.find_outputs(output_folder)\n files.sort()\n mylog.info('Found {} outputs from {} to {}'.format(len(files), files[0], files[-1]))\n\n for f in files:\n GalList(int(f[-9:-4]), contam=False, tree_brick_folder=tree_brick_folder)\n #f = files[130]\n #GalList(int(f[-9:-4]), contam=False, tree_brick_folder=tree_brick_folder)\n\n return\n\nclass GalList(object):\n def __init__(self, iout, tree_brick_folder, contam=False):\n self.iout = iout\n filename = os.path.join(tree_brick_folder,'tree_bricks{:03}'.format(self.iout))\n print(filename)\n self.gal = self._read_halos(contam,filename)\n if self.gal.index.size > 0: \n self.gal.to_hdf(\n './hdf5/tree_bricks{:03d}.hdf'.format(self.iout), 'hdf5')\n\n\n # Convenience functions\n def _read_halos(self, contam, filename, prec='d', longint=True):\n halo_keys = ['ID', 'nbpart', 'level', 'min_part_id',\n 'host', 'hostsub', 'nbsub', 'nextsub',\n 'x', 'y', 'z', 'vx', 'vy', 'vz', 'Lx', 'Ly', 'Lz',\n 'a', 'b', 'c', 'ek', 'ep', 'et', 'rho0', 'r_c',\n 'spin', 'm', 'ntot', 'mtot',\n 'r', 'mvir', 'rvir', 'tvir', 'cvel',\n 'rvmax', 'vmax', 'cNFW',\n 'r200','m200',\n 'r50', 'r90', 'sigma',\n 'nDM', 'nstar', 'mDM', 'mstar',\n 'ntotDM', 'ntotstar',\n 'mtotDM', 'mtotstar',\n 'xDM', 'yDM', 'zDM', 'xstar', 'ystar', 'zstar',\n 'vxDM', 'vyDM', 'vzDM', 'vxstar', 'vystar', 'vzstar',\n 'rDM', 'rstar',\n 'aDM', 'bDM', 'cDM', 'astar', 'bstar', 'cstar',\n 'sigmaDM', 'sigmastar',\n 'reff', 'Zstar', 'tstar',\n 'sfr10', 'sfr100', 'sfr1000',\n 'r50DM', 'r90DM', 'r50star', 'r90star',\n 'Vsigma', 'sigma1D',\n 'Vsigma_disc', 'sigma1D_disc',\n 'sigma_bulge', 'mbulge',\n 'n_sersic']\n #WE DO NOT STORE THE PROFILES\n #'rr', 'rho', 'rr3D', 'rho3D',\n #'rr3DDM', 'rho3DDM', 'rr3Dstar', 'rho3Dstar',\n #]\n \n if contam:\n halo_keys.append('contam')\n halo_keys.append('mcontam')\n halo_keys.append('mtotcontam')\n halo_keys.append('ncontam')\n halo_keys.append('ntotcontam')\n iprec = 'q' if longint else 'i'\n \n data = np.empty(shape=(0, len(halo_keys)), dtype=object)\n if os.path.exists(filename):\n with open(filename, 'rb') as f:\n [npart] = fpu.read_vector(f, iprec)\n [massp] = fpu.read_vector(f, prec)\n [aexp] = fpu.read_vector(f, prec)\n [omega_t] = fpu.read_vector(f, prec)\n [age] = fpu.read_vector(f, prec)\n [nhalos, nsubs] = fpu.read_vector(f, 'i')\n \n # Save the age/aexp, the mass of the particle,\n # as well as the number of (sub)halos\n data = np.empty(shape=(nhalos + nsubs, len(halo_keys)), dtype=object)\n \n mylog.info('Brick: groups : %s' % nhalos)\n mylog.info('Brick: sub group : %s' % nsubs)\n mylog.info('Brick: aexp : %s' % aexp)\n \n pbar = get_pbar('', nhalos+nsubs)\n for ihalo in range(nhalos + nsubs):\n [nbpart] = fpu.read_vector(f, iprec) # Number of particles\n listp = fpu.read_vector(f, iprec) # List of particles IDs\n listm = fpu.read_vector(f, prec) # List of particles masses\n listf = fpu.read_vector(f, 'b') # List of particles families\n [ID] = fpu.read_vector(f, 'i') # Halo ID\n fpu.skip(f, 1) # Skip timestep\n [level, host, hostsub, nbsub, nextsub] = fpu.read_vector(f, 'i')\n [m] = fpu.read_vector(f, prec) # Total mass\n [ntot] = fpu.read_vector(f, iprec) # Total number of particles\n [mtot] = fpu.read_vector(f, prec) # Total mass + subs\n [x, y, z] = fpu.read_vector(f, prec) # Center\n [vx, vy, vz] = fpu.read_vector(f, prec) # Velocity\n [Lx, Ly, Lz] = fpu.read_vector(f, prec) # Angular momentum\n [r, a, b, c] = fpu.read_vector(f, prec) # Shape (ellipticity)\n [ek, ep, et] = fpu.read_vector(f, prec) # Energetics\n [spin] = fpu.read_vector(f, prec) # Total angular momentum\n [sigma] = fpu.read_vector(f, prec) # 3D velocity dispersion\n [rvir, mvir, tvir, cvel] = fpu.read_vector(f, prec) # Virial parameters\n [rvmax, vmax] = fpu.read_vector(f, prec) # RVmax and Vmax\n [cNFW] = fpu.read_vector(f, prec) # NFW concentration from Prada+2012\n [r200, m200] = fpu.read_vector(f, prec) # R200 and M200\n [r50, r90] = fpu.read_vector(f, prec) # R50 and R90\n rr3D = fpu.read_vector(f, prec) # Radial bins\n rho3D = fpu.read_vector(f, prec) # 3D density profile\n [rho0, r_c] = fpu.read_vector(f, prec) # ?\n # Stellar-only properties\n [reff] = fpu.read_vector(f, prec) # Effective radius\n [Zstar] = fpu.read_vector(f, prec) # Metallicity\n [tstar] = fpu.read_vector(f, prec) # Age\n [sfr10, sfr100, sfr1000] = fpu.read_vector(f, prec) # SFR\n [Vsigma, sigma1D] = fpu.read_vector(f, prec) # V/sigma and sigma1D\n [Vsigma_disc, sigma1D_disc] = fpu.read_vector(f, prec) # V/sigma and sigma1D for the disc\n [sigma_bulge, mbulge] = fpu.read_vector(f, prec) # Bulge properties\n # Stellar surface density profile\n fpu.skip(f, 1) # number of bins\n rr = fpu.read_vector(f, prec) # Radial bins\n rho = fpu.read_vector(f, prec) # Surface density profile\n # fpu.skip(f, 1)\n # fpu.skip(f, 1)\n \n # DM vs stars quantities\n [ndm, nstar] = fpu.read_vector(f, iprec) # Nb of particles\n [mdm, mstar] = fpu.read_vector(f, prec) # Masses\n [ntotdm, ntotstar] = fpu.read_vector(f, iprec) # Nb of particles with substructures\n [mtotdm, mtotstar] = fpu.read_vector(f, prec) # Masses with substructures\n [xdm, ydm, zdm] = fpu.read_vector(f, prec) # Halo centre (DM)\n [xstar, ystar, zstar] = fpu.read_vector(f, prec) # Halo centre (stars)\n [vxdm, vydm, vzdm] = fpu.read_vector(f, prec) # Halo velocity (DM)\n [vxstar, vystar, vzstar] = fpu.read_vector(f, prec) # Halo velocity (stars)\n [Lxdm, Lydm, Lzdm] = fpu.read_vector(f, prec) # Angular momentum (DM)\n [Lxstar, Lystar, Lzstar] = fpu.read_vector(f, prec) # Angular momentum (stars)\n [rdm, adm, bdm, cdm] = fpu.read_vector(f, prec) # Shape (DM)\n [rstar, astar, bstar, cstar] = fpu.read_vector(f, prec) # Shape (stars)\n [r50dm, r90dm] = fpu.read_vector(f, prec) # R50 and R90 (DM)\n [r50star, r90star] = fpu.read_vector(f, prec) # R50 and R90 (stars)\n #rr3Ddm = fpu.read_vector(f, prec) # Radial bins\n #rho3Ddm = fpu.read_vector(f, prec) # 3D density profile\n #rr3Dstar = fpu.read_vector(f, prec) # Radial bins\n #rho3Dstar = fpu.read_vector(f, prec) # 3D density profile\n fpu.read_vector(f, prec) #dummy \n fpu.read_vector(f, prec) #dummy \n fpu.read_vector(f, prec) #dummy \n fpu.read_vector(f, prec) #dummy \n [sigmadm, sigmastar] = fpu.read_vector(f, prec) # Velocity dispersions\n \n if contam:\n [contamlevel] = fpu.read_vector(f, 'i') # Contamination\n [mcontam, mtotcontam] = fpu.read_vector(f, prec) # Mass of contaminated particles\n [ncontam, ntotcontam] = fpu.read_vector(f, iprec) # Mass of contaminated particles\n \n n_sersic = 0\n arg = rho > 0 \n if True in arg:\n #fit with a Sersic profile\n (_,n_sersic),pcov = curve_fit(lambda r,I0,n: I0-(2*n-1/3)*(r/reff/1e3)**(1/n),\n rr[arg], np.log(rho[arg]),\n p0 = [np.log(mstar/reff**2*1e6), 1],\n bounds = ([-np.inf,0.5], [np.inf,20]), maxfev = 10000)\n \n halodata = [ID, nbpart, level, listp.min(),\n host, hostsub, nbsub, nextsub,\n x, y, z, vx, vy, vz, Lx, Ly, Lz,\n a, b, c, ek, ep, et, rho0, r_c,\n spin, m, ntot, mtot, r, mvir, rvir, tvir, cvel,\n rvmax, vmax, cNFW,\n r200, m200,\n r50, r90, sigma,\n ndm, nstar, mdm, mstar, ntotdm, ntotstar, mtotdm, mtotstar,\n xdm, ydm, zdm, xstar, ystar, zstar,\n vxdm, vydm, vzdm, vxstar, vystar, vzstar,\n rdm, rstar,\n adm, bdm, cdm, astar, bstar, cstar,\n sigmadm, sigmastar,\n reff, Zstar, tstar,\n sfr10, sfr100, sfr1000,\n r50dm, r90dm, r50star, r90star,\n Vsigma, sigma1D,\n Vsigma_disc, sigma1D_disc,\n sigma_bulge, mbulge,\n n_sersic]\n #rr, rho,\n #rr3D, rho3D,\n #rr3Ddm, rho3Ddm, rr3Dstar, rho3Dstar]\n if contam:\n halodata.append(contamlevel)\n halodata.append(mcontam)\n halodata.append(mtotcontam)\n halodata.append(ncontam)\n halodata.append(ntotcontam)\n \n data[ihalo] = halodata\n \n pbar.update()\n \n types = {}\n for k in ('ID', 'nbpart', 'level', 'min_part_id',\n 'host', 'hostsub', 'nbsub', 'nextsub', 'contam',\n 'nDM', 'nstar', 'ntot', 'ntotDM', 'ntotstar',\n 'ncontam', 'ntotcontam'):\n types[k] = np.int64\n for k in ('x', 'y', 'z', 'vx', 'vy', 'vz', 'Lx', 'Ly', 'Lz',\n 'a', 'b', 'c', 'ek', 'ep', 'et', 'rho0', 'r_c',\n 'spin', 'm', 'mtot', 'r', 'mvir', 'rvir', 'tvir', 'cvel',\n 'rvmax', 'vmax', 'cNFW',\n 'r200', 'm200',\n 'r50', 'r90',\n 'mDM', 'mstar', 'mtotDM', 'mtotstar',\n 'xDM', 'yDM', 'zDM', 'xstar', 'ystar', 'zstar',\n 'vxDM', 'vyDM', 'vzDM', 'vxstar', 'vystar', 'vzstar',\n 'rDM', 'rstar', 'sigmaDM', 'sigmastar',\n 'aDM', 'bDM', 'cDM', 'astar', 'bstar', 'cstar',\n 'reff', 'Zstar', 'tstar',\n 'sfr10', 'sfr100', 'sfr1000',\n 'r50DM', 'r90DM', 'r50star', 'r90star',\n 'Vsigma', 'sigma1D',\n 'Vsigma_disc', 'sigma1D_disc',\n 'sigma', 'sigma_bulge', 'mbulge',\n 'mcontam', 'mtotcontam',\n 'n_sersic'):\n types[k] = np.float64\n #for k in ('rr', 'rho',\n # 'rr3D', 'rr3DDM', 'rr3Dstar',\n # 'rho3D', 'rho3DDM', 'rho3Dstar'\n # ):\n # types[k] = 'object'\n dd = {k: data[:, i].astype(types[k])\n for i, k in enumerate(halo_keys)}\n \n halos = pd.DataFrame(dd)\n halos.set_index('ID', inplace=True)\n \n # Get properties in theright units\n # Masses\n halos.m *= 1e11\n halos.mvir *= 1e11\n halos.m200 *= 1e11\n halos.mbulge *= 1e11\n halos.mstar *= 1e11\n halos.mDM *= 1e11\n halos.mtot *= 1e11\n halos.mtotDM *= 1e11\n halos.mtotstar *= 1e11\n if contam:\n halos.mcontam *= 1e11\n halos.mtotcontam *= 1e11\n # SFR\n halos.sfr10 *= 1e11\n halos.sfr100 *= 1e11\n halos.sfr1000 *= 1e11\n # Positions and distances\n data_set = yt.load('../Outputs/output_{:05}/info_{:05}.txt'.format(self.iout, self.iout))\n scale_mpc = float(data_set.length_unit.in_units('cm') / 3.08e24)\n halos.x = halos.x / scale_mpc + .5\n halos.y = halos.y / scale_mpc + .5\n halos.z = halos.z / scale_mpc + .5\n halos.xDM = halos.xDM / scale_mpc + .5\n halos.yDM = halos.yDM / scale_mpc + .5\n halos.zDM = halos.zDM / scale_mpc + .5\n halos.xstar = halos.xstar / scale_mpc + .5\n halos.ystar = halos.ystar / scale_mpc + .5\n halos.zstar = halos.zstar / scale_mpc + .5\n \n # Some cheap derived quantitites\n halos['fstar'] = halos.mstar/halos.m\n halos['fstartot'] = halos.mtotstar/halos.mtot\n # Contamination fraction\n if contam:\n # Mass fractions\n halos['fcontam_mass'] = halos.mcontam / halos.mDM\n halos['fcontam_mass_tot'] = halos.mtotcontam / halos.mtotDM\n halos['fcontam_nb'] = halos.ncontam / halos.nDM\n halos['fcontam_nb_tot'] = halos.ntotcontam / halos.ntotDM\n # Propagate values from the host\n halos['ncontam_host'] = halos.ntotcontam.loc[halos.host].values\n halos['mcontam_host'] = halos.mtotcontam.loc[halos.host].values\n halos['fcontam_nb_host'] = halos.fcontam_nb_tot.loc[halos.host].values\n halos['fcontam_mass_host'] = halos.fcontam_mass_tot.loc[halos.host].values\n # Max contam\n halos['fcontam_mass_max'] = np.maximum.reduce([halos.fcontam_mass.values,\n halos.fcontam_mass_tot.values,\n halos.fcontam_mass_host.values])\n halos['fcontam_nb_max'] = np.maximum.reduce([halos.fcontam_nb.values,\n halos.fcontam_nb_tot.values,\n halos.fcontam_nb_host.values])\n # For subs, check if within rvir\n dist_to_host = np.sqrt((halos.x.values - halos.x.loc[halos.host].values)**2 +\n (halos.y.values - halos.y.loc[halos.host].values)**2 +\n (halos.z.values - halos.z.loc[halos.host].values)**2) * scale_mpc\n dist_to_host_rvir = dist_to_host / halos.rvir.loc[halos.host].values\n halos['within_host'] = np.logical_and(dist_to_host_rvir <= 1, halos.level.values > 1)\n \n return halos \n\nmain()\n"
] | [
[
"numpy.loadtxt",
"numpy.array",
"pandas.DataFrame"
],
[
"numpy.log",
"numpy.sqrt",
"pandas.DataFrame",
"numpy.logical_and",
"numpy.maximum.reduce"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
gr33n-made/catalyst | [
"bd413abc908ef7cbdeab42b0e805277a791e3ddb",
"bd413abc908ef7cbdeab42b0e805277a791e3ddb",
"bd413abc908ef7cbdeab42b0e805277a791e3ddb",
"bd413abc908ef7cbdeab42b0e805277a791e3ddb",
"bd413abc908ef7cbdeab42b0e805277a791e3ddb",
"bd413abc908ef7cbdeab42b0e805277a791e3ddb"
] | [
"tests/pipelines/test_distillation.py",
"tests/pipelines/test_multilabel_classification.py",
"tests/catalyst/metrics/functional/test_cmc_metric.py",
"tests/pipelines/test_optuna.py",
"tests/catalyst/metrics/functional/test_average_precision.py",
"tests/catalyst/engines/test_parallel_amp.py"
] | [
"# flake8: noqa\n\nimport os\nfrom tempfile import TemporaryDirectory\n\nfrom pytest import mark\nimport torch\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader\n\nfrom catalyst import dl\nfrom catalyst.contrib.datasets import MNIST\nfrom catalyst.data import ToTensor\nfrom catalyst.settings import IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES, SETTINGS\n\n\nclass DistilRunner(dl.Runner):\n def handle_batch(self, batch):\n x, y = batch\n\n self.model[\"teacher\"].eval() # let's manually set teacher model to eval mode\n with torch.no_grad():\n t_logits = self.model[\"teacher\"](x)\n\n s_logits = self.model[\"student\"](x)\n self.batch = {\n \"t_logits\": t_logits,\n \"s_logits\": s_logits,\n \"targets\": y,\n \"s_logprobs\": F.log_softmax(s_logits, dim=-1),\n \"t_probs\": F.softmax(t_logits, dim=-1),\n }\n\n\ndef train_experiment(device, engine=None):\n with TemporaryDirectory() as logdir:\n teacher = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))\n student = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))\n model = {\"teacher\": teacher, \"student\": student}\n criterion = {\"cls\": nn.CrossEntropyLoss(), \"kl\": nn.KLDivLoss(reduction=\"batchmean\")}\n optimizer = optim.Adam(student.parameters(), lr=0.02)\n\n loaders = {\n \"train\": DataLoader(\n MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32\n ),\n \"valid\": DataLoader(\n MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32\n ),\n }\n\n runner = DistilRunner()\n # model training\n runner.train(\n engine=engine or dl.DeviceEngine(device),\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n loaders=loaders,\n num_epochs=1,\n logdir=logdir,\n verbose=False,\n callbacks=[\n dl.AccuracyCallback(\n input_key=\"t_logits\", target_key=\"targets\", num_classes=2, prefix=\"teacher_\"\n ),\n dl.AccuracyCallback(\n input_key=\"s_logits\", target_key=\"targets\", num_classes=2, prefix=\"student_\"\n ),\n dl.CriterionCallback(\n input_key=\"s_logits\",\n target_key=\"targets\",\n metric_key=\"cls_loss\",\n criterion_key=\"cls\",\n ),\n dl.CriterionCallback(\n input_key=\"s_logprobs\",\n target_key=\"t_probs\",\n metric_key=\"kl_div_loss\",\n criterion_key=\"kl\",\n ),\n dl.MetricAggregationCallback(\n metric_key=\"loss\", metrics=[\"kl_div_loss\", \"cls_loss\"], mode=\"mean\"\n ),\n dl.OptimizerCallback(metric_key=\"loss\", model_key=\"student\"),\n dl.CheckpointCallback(\n logdir=logdir,\n loader_key=\"valid\",\n metric_key=\"loss\",\n minimize=True,\n save_n_best=3,\n ),\n ],\n )\n\n\n# Torch\ndef test_distillation_on_cpu():\n train_experiment(\"cpu\")\n\n\[email protected](not IS_CUDA_AVAILABLE, reason=\"CUDA device is not available\")\ndef test_distillation_on_torch_cuda0():\n train_experiment(\"cuda:0\")\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason=\"No CUDA>=2 found\",\n)\ndef test_distillation_on_torch_cuda1():\n train_experiment(\"cuda:1\")\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason=\"No CUDA>=2 found\",\n)\ndef test_distillation_on_torch_dp():\n train_experiment(None, dl.DataParallelEngine())\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason=\"No CUDA>=2 found\",\n)\ndef test_distillation_on_torch_ddp():\n train_experiment(None, dl.DistributedDataParallelEngine())\n\n\n# AMP\[email protected](\n not (IS_CUDA_AVAILABLE and SETTINGS.amp_required), reason=\"No CUDA or AMP found\",\n)\ndef test_distillation_on_amp():\n train_experiment(None, dl.AMPEngine())\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),\n reason=\"No CUDA>=2 or AMP found\",\n)\ndef test_distillation_on_amp_dp():\n train_experiment(None, dl.DataParallelAMPEngine())\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),\n reason=\"No CUDA>=2 or AMP found\",\n)\ndef test_distillation_on_amp_ddp():\n train_experiment(None, dl.DistributedDataParallelAMPEngine())\n\n\n# APEX\[email protected](\n not (IS_CUDA_AVAILABLE and SETTINGS.apex_required), reason=\"No CUDA or Apex found\",\n)\ndef test_distillation_on_apex():\n train_experiment(None, dl.APEXEngine())\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),\n reason=\"No CUDA>=2 or Apex found\",\n)\ndef test_distillation_on_apex_dp():\n train_experiment(None, dl.DataParallelAPEXEngine())\n\n\n# @mark.skipif(\n# not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),\n# reason=\"No CUDA>=2 or Apex found\",\n# )\n# def test_distillation_on_apex_ddp():\n# train_experiment(None, dl.DistributedDataParallelApexEngine())\n",
"# flake8: noqa\n\nfrom tempfile import TemporaryDirectory\n\nfrom pytest import mark\nimport torch\nfrom torch.utils.data import DataLoader, TensorDataset\n\nfrom catalyst import dl\nfrom catalyst.settings import IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES, SETTINGS\n\n\ndef train_experiment(device, engine=None):\n with TemporaryDirectory() as logdir:\n # sample data\n num_samples, num_features, num_classes = int(1e4), int(1e1), 4\n X = torch.rand(num_samples, num_features)\n y = (torch.rand(num_samples, num_classes) > 0.5).to(torch.float32)\n\n # pytorch loaders\n dataset = TensorDataset(X, y)\n loader = DataLoader(dataset, batch_size=32, num_workers=1)\n loaders = {\"train\": loader, \"valid\": loader}\n\n # model, criterion, optimizer, scheduler\n model = torch.nn.Linear(num_features, num_classes)\n criterion = torch.nn.BCEWithLogitsLoss()\n optimizer = torch.optim.Adam(model.parameters())\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [2])\n\n # model training\n runner = dl.SupervisedRunner(\n input_key=\"features\", output_key=\"logits\", target_key=\"targets\", loss_key=\"loss\"\n )\n callbacks = [\n dl.BatchTransformCallback(\n transform=\"F.sigmoid\",\n scope=\"on_batch_end\",\n input_key=\"logits\",\n output_key=\"scores\",\n ),\n dl.MultilabelAccuracyCallback(input_key=\"scores\", target_key=\"targets\", threshold=0.5),\n dl.MultilabelPrecisionRecallF1SupportCallback(\n input_key=\"scores\", target_key=\"targets\", num_classes=num_classes\n ),\n ]\n if SETTINGS.amp_required and (\n engine is None\n or not isinstance(\n engine,\n (dl.AMPEngine, dl.DataParallelAMPEngine, dl.DistributedDataParallelAMPEngine),\n )\n ):\n callbacks.append(dl.AUCCallback(input_key=\"scores\", target_key=\"targets\"))\n runner.train(\n engine=engine or dl.DeviceEngine(device),\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n scheduler=scheduler,\n loaders=loaders,\n logdir=logdir,\n num_epochs=1,\n valid_loader=\"valid\",\n valid_metric=\"accuracy\",\n minimize_valid_metric=False,\n verbose=False,\n callbacks=callbacks,\n )\n\n\n# Torch\ndef test_on_cpu():\n train_experiment(\"cpu\")\n\n\[email protected](not IS_CUDA_AVAILABLE, reason=\"CUDA device is not available\")\ndef test_on_torch_cuda0():\n train_experiment(\"cuda:0\")\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason=\"No CUDA>=2 found\",\n)\ndef test_on_torch_cuda1():\n train_experiment(\"cuda:1\")\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason=\"No CUDA>=2 found\",\n)\ndef test_on_torch_dp():\n train_experiment(None, dl.DataParallelEngine())\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2), reason=\"No CUDA>=2 found\",\n)\ndef test_on_torch_ddp():\n train_experiment(None, dl.DistributedDataParallelEngine())\n\n\n# AMP\[email protected](\n not (IS_CUDA_AVAILABLE and SETTINGS.amp_required), reason=\"No CUDA or AMP found\",\n)\ndef test_on_amp():\n train_experiment(None, dl.AMPEngine())\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),\n reason=\"No CUDA>=2 or AMP found\",\n)\ndef test_on_amp_dp():\n train_experiment(None, dl.DataParallelAMPEngine())\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),\n reason=\"No CUDA>=2 or AMP found\",\n)\ndef test_on_amp_ddp():\n train_experiment(None, dl.DistributedDataParallelAMPEngine())\n\n\n# APEX\[email protected](\n not (IS_CUDA_AVAILABLE and SETTINGS.apex_required), reason=\"No CUDA or Apex found\",\n)\ndef test_on_apex():\n train_experiment(None, dl.APEXEngine())\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),\n reason=\"No CUDA>=2 or Apex found\",\n)\ndef test_on_apex_dp():\n train_experiment(None, dl.DataParallelAPEXEngine())\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),\n reason=\"No CUDA>=2 or Apex found\",\n)\ndef test_on_apex_ddp():\n train_experiment(None, dl.DistributedDataParallelAPEXEngine())\n",
"# flake8: noqa\nfrom typing import List, Tuple\nfrom itertools import chain\n\nimport numpy as np\nimport pytest\nimport torch\n\nfrom catalyst.metrics.functional._cmc_score import cmc_score, cmc_score_count, masked_cmc_score\n\nEPS = 1e-4\n\nTEST_DATA_SIMPLE = (\n # (distance_matrix, conformity_matrix, topk, expected_value)\n (torch.tensor([[1, 2], [2, 1]]), torch.tensor([[0, 1], [1, 0]]), 1, 0.0),\n (torch.tensor([[0, 0.5], [0.0, 0.5]]), torch.tensor([[0, 1], [1, 0]]), 1, 0.5),\n (torch.tensor([[0, 0.5], [0.0, 0.5]]), torch.tensor([[0, 1], [1, 0]]), 2, 1),\n (\n torch.tensor([[1, 0.5, 0.2], [2, 3, 4], [0.4, 3, 4]]),\n torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),\n 2,\n 1 / 3,\n ),\n (torch.randn((10, 10)), torch.ones((10, 10)), 1, 1),\n)\n\nTEST_DATA_LESS_SMALL = (\n (torch.rand((10, 10)) + torch.tril(torch.ones((10, 10))), torch.eye(10), i, i / 10)\n for i in range(1, 10)\n)\n\nTEST_DATA_GREATER_SMALL = (\n (\n torch.rand((10, 10)) + torch.triu(torch.ones((10, 10)), diagonal=1),\n torch.eye(10),\n i,\n i / 10,\n )\n for i in range(1, 10)\n)\n\nTEST_DATA_LESS_BIG = (\n (torch.rand((100, 100)) + torch.tril(torch.ones((100, 100))), torch.eye(100), i, i / 100)\n for i in range(1, 101, 10)\n)\n\n\[email protected](\"distance_matrix,conformity_matrix,topk,expected\", TEST_DATA_SIMPLE)\ndef test_metric_count(distance_matrix, conformity_matrix, topk, expected):\n \"\"\"Simple test\"\"\"\n out = cmc_score_count(\n distances=distance_matrix, conformity_matrix=conformity_matrix, topk=topk,\n )\n assert np.isclose(out, expected)\n\n\[email protected](\n \"distance_matrix,conformity_matrix,topk,expected\",\n chain(TEST_DATA_LESS_SMALL, TEST_DATA_LESS_BIG),\n)\ndef test_metric_less(distance_matrix, conformity_matrix, topk, expected):\n \"\"\"Simple test\"\"\"\n out = cmc_score_count(\n distances=distance_matrix, conformity_matrix=conformity_matrix, topk=topk,\n )\n assert out - EPS <= expected\n\n\[email protected](\n \"distance_matrix,conformity_matrix,topk,expected\", chain(TEST_DATA_GREATER_SMALL),\n)\ndef test_metric_greater(distance_matrix, conformity_matrix, topk, expected):\n \"\"\"Simple test\"\"\"\n out = cmc_score_count(\n distances=distance_matrix, conformity_matrix=conformity_matrix, topk=topk,\n )\n assert out + EPS >= expected\n\n\[email protected]\ndef generate_samples_for_cmc_score() -> List[\n Tuple[float, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]\n]:\n \"\"\"\n Generate list of query and gallery data for cmc score testing.\n \"\"\"\n data = []\n for error_rate in [\n 0.05,\n 0.1,\n 0.15,\n 0.2,\n 0.25,\n ]:\n # generate params of the datasets\n class_number = np.random.randint(low=2, high=10)\n kq = np.random.randint(low=1000, high=1500)\n kg = np.random.randint(low=500, high=1000)\n\n def generate_samples(n_labels, samples_per_label):\n samples = []\n labels = []\n # for each label generate dots that will be close to each other and\n # distanced from samples of other classes\n for i in range(n_labels):\n tmp_samples = np.random.uniform(\n low=2 * i, high=2 * i + 0.2, size=(samples_per_label,)\n )\n samples = np.concatenate((samples, tmp_samples))\n labels = np.concatenate((labels, [i] * samples_per_label))\n return samples.reshape((-1, 1)), labels\n\n query_embs, query_labels = generate_samples(n_labels=class_number, samples_per_label=kq)\n\n gallery_embs, gallery_labels = generate_samples(\n n_labels=class_number, samples_per_label=kg\n )\n\n # spoil generated gallery dataset: for each sample from data change\n # label to any other one with probability error_rate\n def confuse_labels(labels, error_rate):\n unique_labels = set(labels)\n size = len(labels)\n for i in range(size):\n if np.random.binomial(n=1, p=error_rate, size=1)[0]:\n labels[i] = np.random.choice(list(unique_labels - {labels[i]}))\n return labels\n\n gallery_labels = confuse_labels(gallery_labels, error_rate=error_rate)\n\n query_embs = torch.tensor(query_embs)\n gallery_embs = torch.tensor(gallery_embs)\n query_labels = torch.tensor(query_labels, dtype=torch.long)\n gallery_labels = torch.tensor(gallery_labels, dtype=torch.long)\n\n data.append((error_rate, query_embs, query_labels, gallery_embs, gallery_labels,))\n return data\n\n\ndef test_cmc_score_with_samples(generate_samples_for_cmc_score):\n \"\"\"\n Count cmc score callback for sets of well-separated data clusters labeled\n with error_rate probability mistake.\n \"\"\"\n for (\n error_rate,\n query_embs,\n query_labels,\n gallery_embs,\n gallery_labels,\n ) in generate_samples_for_cmc_score:\n true_cmc_01 = 1 - error_rate\n conformity_matrix = (query_labels.reshape((-1, 1)) == gallery_labels).to(torch.bool)\n cmc = cmc_score(\n query_embeddings=query_embs,\n gallery_embeddings=gallery_embs,\n conformity_matrix=conformity_matrix,\n topk=1,\n )\n assert abs(cmc - true_cmc_01) <= 0.05\n\n\[email protected](\n (\n \"query_embeddings\",\n \"gallery_embeddings\",\n \"conformity_matrix\",\n \"available_samples\",\n \"topk\",\n \"expected\",\n ),\n (\n (\n torch.tensor([[1, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 1], [0, 0, 1, 1],]).float(),\n torch.tensor([[1, 1, 1, 0], [1, 1, 1, 1], [0, 1, 1, 0],]).float(),\n torch.tensor(\n [\n [True, False, False],\n [True, False, False],\n [False, True, True],\n [False, True, True],\n ]\n ),\n torch.tensor(\n [[False, True, True], [True, True, True], [True, False, True], [True, True, True],]\n ),\n 1,\n 0.75,\n ),\n (\n torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 1],]).float(),\n torch.tensor([[0, 1, 0], [0, 0, 1], [1, 0, 1],]).float(),\n torch.tensor(\n [\n [False, False, True],\n [True, False, False],\n [False, True, False],\n [False, False, True],\n ]\n ),\n torch.tensor(\n [\n [True, True, True],\n [False, True, True],\n [True, False, True],\n [True, True, False],\n ]\n ),\n 1,\n 0.25,\n ),\n ),\n)\ndef test_masked_cmc_score(\n query_embeddings, gallery_embeddings, conformity_matrix, available_samples, topk, expected,\n):\n score = masked_cmc_score(\n query_embeddings=query_embeddings,\n gallery_embeddings=gallery_embeddings,\n conformity_matrix=conformity_matrix,\n available_samples=available_samples,\n topk=topk,\n )\n assert score == expected\n\n\[email protected](\n (\"query_embeddings\", \"gallery_embeddings\", \"conformity_matrix\", \"available_samples\", \"topk\",),\n (\n (\n torch.rand(size=(query_size, 32)).float(),\n torch.rand(size=(gallery_size, 32)).float(),\n torch.randint(low=0, high=2, size=(query_size, gallery_size)).bool(),\n torch.ones(size=(query_size, gallery_size)).bool(),\n k,\n )\n for query_size, gallery_size, k in zip(\n list(range(10, 20)), list(range(25, 35)), list(range(1, 11))\n )\n ),\n)\ndef test_no_mask_cmc_score(\n query_embeddings, gallery_embeddings, conformity_matrix, available_samples, topk,\n) -> None:\n \"\"\"\n In this test we just check that masked_cmc_score is equal to cmc_score\n when all the samples are available for for scoring.\n \"\"\"\n masked_score = masked_cmc_score(\n query_embeddings=query_embeddings,\n gallery_embeddings=gallery_embeddings,\n conformity_matrix=conformity_matrix,\n available_samples=available_samples,\n topk=topk,\n )\n score = cmc_score(\n query_embeddings=query_embeddings,\n gallery_embeddings=gallery_embeddings,\n conformity_matrix=conformity_matrix,\n topk=topk,\n )\n assert masked_score == score\n",
"# flake8: noqa\n\nimport os\nfrom tempfile import TemporaryDirectory\n\nfrom pytest import mark\nimport torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader\n\nfrom catalyst import dl\nfrom catalyst.contrib.datasets import MNIST\nfrom catalyst.data import ToTensor\nfrom catalyst.settings import IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES, SETTINGS\n\nif SETTINGS.optuna_required:\n import optuna\n\n\ndef train_experiment(device, engine=None):\n with TemporaryDirectory() as logdir:\n\n def objective(trial):\n lr = trial.suggest_loguniform(\"lr\", 1e-3, 1e-1)\n num_hidden = int(trial.suggest_loguniform(\"num_hidden\", 32, 128))\n\n loaders = {\n \"train\": DataLoader(\n MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()),\n batch_size=32,\n ),\n \"valid\": DataLoader(\n MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()),\n batch_size=32,\n ),\n }\n model = nn.Sequential(\n nn.Flatten(), nn.Linear(784, num_hidden), nn.ReLU(), nn.Linear(num_hidden, 10)\n )\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n criterion = nn.CrossEntropyLoss()\n\n runner = dl.SupervisedRunner(\n input_key=\"features\", output_key=\"logits\", target_key=\"targets\"\n )\n runner.train(\n engine=engine or dl.DeviceEngine(device),\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n loaders=loaders,\n callbacks={\n \"optuna\": dl.OptunaPruningCallback(\n loader_key=\"valid\", metric_key=\"accuracy01\", minimize=False, trial=trial\n ),\n \"accuracy\": dl.AccuracyCallback(\n input_key=\"logits\", target_key=\"targets\", num_classes=10\n ),\n },\n num_epochs=2,\n )\n score = trial.best_score\n return score\n\n study = optuna.create_study(\n direction=\"maximize\",\n pruner=optuna.pruners.MedianPruner(\n n_startup_trials=1, n_warmup_steps=0, interval_steps=1\n ),\n )\n study.optimize(objective, n_trials=3, timeout=300)\n print(study.best_value, study.best_params)\n\n\n# Torch\[email protected](not SETTINGS.optuna_required, reason=\"catalyst[optuna] in not required\")\ndef test_on_cpu():\n train_experiment(\"cpu\")\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and SETTINGS.optuna_required), reason=\"CUDA device is not available\"\n)\ndef test_on_torch_cuda0():\n train_experiment(\"cuda:0\")\n\n\[email protected](\n not (SETTINGS.optuna_required and IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2),\n reason=\"No CUDA>=2 found\",\n)\ndef test_on_torch_cuda1():\n train_experiment(\"cuda:1\")\n\n\[email protected](\n not (SETTINGS.optuna_required and IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2),\n reason=\"No CUDA>=2 found\",\n)\ndef test_on_torch_dp():\n train_experiment(None, dl.DataParallelEngine())\n\n\n# @mark.skipif(\n# not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >=2),\n# reason=\"No CUDA>=2 found\",\n# )\n# def test_on_ddp():\n# train_experiment(None, dl.DistributedDataParallelEngine())\n\n# AMP\[email protected](\n not (SETTINGS.optuna_required and IS_CUDA_AVAILABLE and SETTINGS.amp_required),\n reason=\"No CUDA or AMP found\",\n)\ndef test_on_amp():\n train_experiment(None, dl.AMPEngine())\n\n\[email protected](\n not (\n SETTINGS.optuna_required\n and IS_CUDA_AVAILABLE\n and NUM_CUDA_DEVICES >= 2\n and SETTINGS.amp_required\n ),\n reason=\"No CUDA>=2 or AMP found\",\n)\ndef test_on_amp_dp():\n train_experiment(None, dl.DataParallelAMPEngine())\n\n\n# @mark.skipif(\n# not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),\n# reason=\"No CUDA>=2 or AMP found\",\n# )\n# def test_on_amp_ddp():\n# train_experiment(None, dl.DistributedDataParallelAMPEngine())\n\n# APEX\[email protected](\n not (SETTINGS.optuna_required and IS_CUDA_AVAILABLE and SETTINGS.apex_required),\n reason=\"No CUDA or Apex found\",\n)\ndef test_on_apex():\n train_experiment(None, dl.APEXEngine())\n\n\[email protected](\n not (\n SETTINGS.optuna_required\n and IS_CUDA_AVAILABLE\n and NUM_CUDA_DEVICES >= 2\n and SETTINGS.apex_required\n ),\n reason=\"No CUDA>=2 or Apex found\",\n)\ndef test_on_apex_dp():\n train_experiment(None, dl.DataParallelAPEXEngine())\n\n\n# @mark.skipif(\n# not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),\n# reason=\"No CUDA>=2 or Apex found\",\n# )\n# def test_on_apex_ddp():\n# train_experiment(None, dl.DistributedDataParallelApexEngine())\n",
"# flake8: noqa\nimport math\n\nimport numpy as np\nimport torch\n\nfrom catalyst.metrics.functional._average_precision import (\n average_precision,\n binary_average_precision,\n mean_average_precision,\n)\n\n\ndef test_binary_average_precision_base():\n \"\"\"\n Tests for catalyst.binary_average_precision metric.\n \"\"\"\n outputs = torch.Tensor([0.1, 0.4, 0.35, 0.8])\n targets = torch.Tensor([0, 0, 1, 1])\n\n assert torch.isclose(\n binary_average_precision(outputs, targets), torch.tensor(0.8333), atol=1e-3,\n )\n\n\ndef test_binary_average_precision_weighted():\n \"\"\"\n Tests for catalyst.binary_average_precision metric.\n \"\"\"\n target = torch.Tensor([0, 1, 0, 1])\n output = torch.Tensor([0.1, 0.2, 0.3, 4])\n weight = torch.Tensor([0.5, 1.0, 2.0, 0.1])\n ap = binary_average_precision(outputs=output, targets=target, weights=weight)\n val = (1 * 0.1 / 0.1 + 0 * 2.0 / 2.1 + 1.1 * 1 / 3.1 + 0 * 1 / 4) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test1 failed\"\n\n ap = binary_average_precision(outputs=output, targets=target, weights=None)\n val = (1 * 1.0 / 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3.0 + 0 * 1.0 / 4.0) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test2 failed\"\n\n target = torch.Tensor([0, 1, 0, 1])\n output = torch.Tensor([4, 3, 2, 1])\n weight = torch.Tensor([1, 2, 3, 4])\n ap = binary_average_precision(outputs=output, targets=target, weights=weight)\n val = (0 * 1.0 / 1.0 + 1.0 * 2.0 / 3.0 + 2.0 * 0 / 6.0 + 6.0 * 1.0 / 10.0) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test3 failed\"\n\n ap = binary_average_precision(outputs=output, targets=target, weights=None)\n val = (0 * 1.0 + 1 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 2 * 1.0 / 4.0) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test4 failed\"\n\n target = torch.Tensor([0, 1, 0, 1])\n output = torch.Tensor([1, 4, 2, 3])\n weight = torch.Tensor([1, 2, 3, 4])\n ap = binary_average_precision(outputs=output, targets=target, weights=weight)\n val = (4 * 1.0 / 4.0 + 6 * 1.0 / 6.0 + 0 * 6.0 / 9.0 + 0 * 6.0 / 10.0) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test5 failed\"\n\n ap = binary_average_precision(outputs=output, targets=target, weights=None)\n val = (1 * 1.0 + 2 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 0 * 1.0 / 4.0) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test6 failed\"\n\n target = torch.Tensor([0, 0, 0, 0])\n output = torch.Tensor([1, 4, 2, 3])\n weight = torch.Tensor([1.0, 0.1, 0.0, 0.5])\n ap = binary_average_precision(outputs=output, targets=target, weights=weight)\n val = 0.0\n assert math.fabs(ap - val) < 0.01, \"ap test7 failed\"\n\n ap = binary_average_precision(outputs=output, targets=target, weights=None)\n val = 0.0\n assert math.fabs(ap - val) < 0.01, \"ap test8 failed\"\n\n target = torch.Tensor([1, 1, 0])\n output = torch.Tensor([3, 1, 2])\n weight = torch.Tensor([1, 0.1, 3])\n ap = binary_average_precision(outputs=output, targets=target, weights=weight)\n val = (1 * 1.0 / 1.0 + 1 * 0.0 / 4.0 + 1.1 / 4.1) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test9 failed\"\n\n ap = binary_average_precision(outputs=output, targets=target, weights=None)\n val = (1 * 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3.0) / 2.0\n assert math.fabs(ap - val) < 0.01, \"ap test10 failed\"\n\n # Test multiple K's\n target = torch.Tensor([[0, 1, 0, 1], [0, 1, 0, 1]]).transpose(0, 1)\n output = torch.Tensor([[0.1, 0.2, 0.3, 4], [4, 3, 2, 1]]).transpose(0, 1)\n weight = torch.Tensor([[1.0, 0.5, 2.0, 3.0]]).transpose(0, 1)\n ap = binary_average_precision(outputs=output, targets=target, weights=weight)\n assert (\n math.fabs(\n ap.sum()\n - torch.Tensor(\n [\n (1 * 3.0 / 3.0 + 0 * 3.0 / 5.0 + 3.5 * 1 / 5.5 + 0 * 3.5 / 6.5) / 2.0,\n (0 * 1.0 / 1.0 + 1 * 0.5 / 1.5 + 0 * 0.5 / 3.5 + 1 * 3.5 / 6.5) / 2.0,\n ]\n ).sum()\n )\n < 0.01\n ), \"ap test11 failed\"\n\n ap = binary_average_precision(outputs=output, targets=target, weights=None)\n assert (\n math.fabs(\n ap.sum()\n - torch.Tensor(\n [\n (1 * 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3 + 0 * 1.0 / 4.0) / 2.0,\n (0 * 1.0 + 1 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 2.0 * 1.0 / 4.0) / 2.0,\n ]\n ).sum()\n )\n < 0.01\n ), \"ap test12 failed\"\n\n\ndef test_average_precision():\n \"\"\"\n Tests for catalyst.metrics.average_precision metric.\n \"\"\"\n # # check everything is relevant\n y_pred = [0.5, 0.2, 0.3, 0.8]\n y_true = [1.0, 1.0, 1.0, 1.0]\n k = 4\n\n avg_precision = average_precision(torch.Tensor([y_pred]), torch.Tensor([y_true]), k)\n assert avg_precision[0] == 1\n\n # # check is everything is relevant for 3 users\n y_pred = [0.5, 0.2, 0.3, 0.8]\n y_true = [1.0, 1.0, 1.0, 1.0]\n k = 4\n\n avg_precision = average_precision(\n torch.Tensor([y_pred, y_pred, y_pred]), torch.Tensor([y_true, y_true, y_true]), k,\n )\n assert torch.equal(avg_precision, torch.ones(3))\n\n # # check everything is irrelevant\n y_pred = [0.5, 0.2, 0.3, 0.8]\n y_true = [0.0, 0.0, 0.0, 0.0]\n k = 4\n\n avg_precision = average_precision(torch.Tensor([y_pred]), torch.Tensor([y_true]), k)\n assert avg_precision[0] == 0\n\n # # check is everything is irrelevant for 3 users\n y_pred = [0.5, 0.2, 0.3, 0.8]\n y_true = [0.0, 0.0, 0.0, 0.0]\n k = 4\n\n avg_precision = average_precision(\n torch.Tensor([y_pred, y_pred, y_pred]), torch.Tensor([y_true, y_true, y_true]), k,\n )\n assert torch.equal(avg_precision, torch.zeros(3))\n\n # # check 4\n y_pred1 = [4.0, 2.0, 3.0, 1.0]\n y_pred2 = [1.0, 2.0, 3.0, 4.0]\n y_true1 = [0.0, 1.0, 1.0, 1.0]\n y_true2 = [0.0, 1.0, 0.0, 0.0]\n k = 4\n\n y_pred_torch = torch.Tensor([y_pred1, y_pred2])\n y_true_torch = torch.Tensor([y_true1, y_true2])\n\n avg_precision = average_precision(y_pred_torch, y_true_torch, k)\n\n assert np.isclose(avg_precision[0], 0.6389, atol=1e-3)\n assert np.isclose(avg_precision[1], 0.333, atol=1e-3)\n\n # check 5\n # Stanford Introdcution to information retrieval primer\n y_pred1 = np.arange(9, -1, -1)\n y_true1 = [1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0]\n y_pred2 = np.arange(9, -1, -1)\n y_true2 = [0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]\n k = 10\n\n y_pred_torch = torch.Tensor([y_pred1, y_pred2])\n y_true_torch = torch.Tensor([y_true1, y_true2])\n\n avg_precision = average_precision(y_pred_torch, y_true_torch, k)\n\n assert np.isclose(avg_precision[0], 0.6222, atol=1e-3)\n assert np.isclose(avg_precision[1], 0.4429, atol=1e-3)\n\n\ndef test_mean_avg_precision():\n \"\"\"\n Tests for catalyst.mean_avg_precision metric.\n \"\"\"\n # check 1\n # Stanford Introdcution to information retrieval primer\n y_pred1 = np.arange(9, -1, -1)\n y_true1 = [1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0]\n y_pred2 = np.arange(9, -1, -1)\n y_true2 = [0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]\n\n y_pred_torch = torch.Tensor([y_pred1, y_pred2])\n y_true_torch = torch.Tensor([y_true1, y_true2])\n\n top_k = [10]\n map_at10 = mean_average_precision(y_pred_torch, y_true_torch, top_k)[0]\n\n assert np.allclose(map_at10, 0.5325, atol=1e-3)\n\n # check 2\n # map_at1: (1.0 + 0.0) / 2 = 0.5\n # map_at3: ((1 + 0.67)/2 + 0.5) / 2 = 0.6675\n # map_at5: ((1 + 0.67)/2 + (0.5 + 0.4)/2) / 2 = 0.6425\n # map_at10: ((1 + 0.67 + 0.5 + 0.44 + 0.5)/5 + (0.5 + 0.4 + 0.43)/3 ) / 2 = 0.53\n\n y_pred1 = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]\n y_pred2 = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]\n\n y_true1 = [1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0]\n y_true2 = [0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]\n\n y_pred_torch = torch.Tensor([y_pred1, y_pred2])\n y_true_torch = torch.Tensor([y_true1, y_true2])\n\n top_k = [1, 3, 5, 10]\n\n map_k = mean_average_precision(y_pred_torch, y_true_torch, top_k)\n\n map_at1 = map_k[0]\n map_at3 = map_k[1]\n map_at5 = map_k[2]\n map_at10 = map_k[3]\n\n assert np.allclose(map_at1, 0.5, atol=1e-3)\n assert np.allclose(map_at3, 0.6675, atol=1e-3)\n assert np.allclose(map_at5, 0.6425, atol=1e-3)\n assert np.allclose(map_at10, 0.5325, atol=1e-3)\n",
"# flake8: noqa\n\nfrom typing import Any, Dict, List\nimport logging\nfrom tempfile import TemporaryDirectory\n\nfrom pytest import mark\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom catalyst.callbacks import CheckpointCallback, CriterionCallback, OptimizerCallback\nfrom catalyst.core.runner import IRunner\nfrom catalyst.loggers import ConsoleLogger, CSVLogger\nfrom catalyst.runners.config import SupervisedConfigRunner\nfrom catalyst.settings import IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES, SETTINGS\n\nif SETTINGS.amp_required:\n from catalyst.engines.amp import DataParallelAMPEngine\n\nfrom .misc import (\n DataParallelTypeChecker,\n DummyDataset,\n DummyModel,\n LossMinimizationCallback,\n TensorTypeChecker,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass CustomRunner(IRunner):\n def __init__(self, logdir):\n super().__init__()\n self._logdir = logdir\n\n def get_engine(self):\n return DataParallelAMPEngine()\n\n def get_callbacks(self, stage: str):\n return {\n \"criterion\": CriterionCallback(\n metric_key=\"loss\", input_key=\"logits\", target_key=\"targets\"\n ),\n \"optimizer\": OptimizerCallback(metric_key=\"loss\"),\n # \"scheduler\": dl.SchedulerCallback(loader_key=\"valid\", metric_key=\"loss\"),\n \"checkpoint\": CheckpointCallback(\n self._logdir, loader_key=\"valid\", metric_key=\"loss\", minimize=True, save_n_best=3\n ),\n \"test_nn_parallel_data_parallel\": DataParallelTypeChecker(),\n \"test_loss_minimization\": LossMinimizationCallback(\"loss\", logger=logger),\n \"test_logits_type\": TensorTypeChecker(\"logits\"),\n }\n\n @property\n def stages(self) -> \"Iterable[str]\":\n return [\"train\"]\n\n def get_stage_len(self, stage: str) -> int:\n return 10\n\n def get_loaders(self, stage: str) -> \"OrderedDict[str, DataLoader]\":\n dataset = DummyDataset(6)\n loader = DataLoader(dataset, batch_size=4)\n return {\"train\": loader, \"valid\": loader}\n\n def get_model(self, stage: str):\n return DummyModel(4, 2)\n\n def get_criterion(self, stage: str):\n return torch.nn.MSELoss()\n\n def get_optimizer(self, model, stage: str):\n return torch.optim.Adam(model.parameters())\n\n def get_scheduler(self, optimizer, stage: str):\n return None\n\n def get_trial(self):\n return None\n\n def get_loggers(self):\n return {\"console\": ConsoleLogger(), \"csv\": CSVLogger(logdir=self._logdir)}\n\n def handle_batch(self, batch):\n x, y = batch\n logits = self.model(x)\n\n self.batch = {\"features\": x, \"targets\": y, \"logits\": logits}\n\n\ndef train_from_runner():\n with TemporaryDirectory() as logdir:\n runner = CustomRunner(logdir)\n runner.run()\n\n\ndef train_from_config():\n with TemporaryDirectory() as logdir:\n dataset = DummyDataset(6)\n runner = SupervisedConfigRunner(\n config={\n \"args\": {\"logdir\": logdir},\n \"model\": {\"_target_\": \"DummyModel\", \"in_features\": 4, \"out_features\": 2},\n \"engine\": {\"_target_\": \"DataParallelAMPEngine\"},\n \"args\": {\"logdir\": logdir},\n \"stages\": {\n \"stage1\": {\n \"num_epochs\": 10,\n \"loaders\": {\"batch_size\": 4, \"num_workers\": 0},\n \"criterion\": {\"_target_\": \"MSELoss\"},\n \"optimizer\": {\"_target_\": \"Adam\", \"lr\": 1e-3},\n \"callbacks\": {\n \"criterion\": {\n \"_target_\": \"CriterionCallback\",\n \"metric_key\": \"loss\",\n \"input_key\": \"logits\",\n \"target_key\": \"targets\",\n },\n \"optimizer\": {\"_target_\": \"OptimizerCallback\", \"metric_key\": \"loss\"},\n \"test_nn_parallel_data_parallel\": {\n \"_target_\": \"DataParallelTypeChecker\"\n },\n \"test_loss_minimization\": {\n \"_target_\": \"LossMinimizationCallback\",\n \"key\": \"loss\",\n },\n \"test_logits_type\": {\"_target_\": \"TensorTypeChecker\", \"key\": \"logits\"},\n },\n },\n },\n }\n )\n runner.get_datasets = lambda *args, **kwargs: {\n \"train\": dataset,\n \"valid\": dataset,\n }\n runner.run()\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),\n reason=\"Number of CUDA devices is less than 2 or no AMP found\",\n)\ndef test_parallel_amp():\n train_from_runner()\n\n\[email protected](\n not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),\n reason=\"Number of CUDA devices is less than 2 or no AMP found\",\n)\ndef test_config_parallel_amp():\n train_from_config()\n"
] | [
[
"torch.nn.functional.softmax",
"torch.nn.CrossEntropyLoss",
"torch.nn.KLDivLoss",
"torch.nn.functional.log_softmax",
"torch.nn.Flatten",
"torch.nn.Linear",
"torch.no_grad"
],
[
"torch.optim.lr_scheduler.MultiStepLR",
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"torch.nn.BCEWithLogitsLoss",
"torch.rand"
],
[
"torch.ones",
"torch.randint",
"torch.randn",
"torch.eye",
"torch.tensor",
"numpy.concatenate",
"numpy.random.randint",
"torch.rand",
"numpy.random.binomial",
"numpy.random.uniform",
"numpy.isclose"
],
[
"torch.nn.Linear",
"torch.nn.CrossEntropyLoss",
"torch.nn.Flatten",
"torch.nn.ReLU"
],
[
"torch.ones",
"numpy.allclose",
"torch.Tensor",
"torch.zeros",
"numpy.arange",
"torch.tensor",
"numpy.isclose"
],
[
"torch.utils.data.DataLoader",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mcasanova1445/models | [
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"37be0fdb4abccca633bb3199a4e6f3f71cd174d9",
"37be0fdb4abccca633bb3199a4e6f3f71cd174d9",
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"37be0fdb4abccca633bb3199a4e6f3f71cd174d9",
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"37be0fdb4abccca633bb3199a4e6f3f71cd174d9",
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"37be0fdb4abccca633bb3199a4e6f3f71cd174d9",
"7214e17eb425963ec3d0295be215d5d26deaeb32",
"7214e17eb425963ec3d0295be215d5d26deaeb32"
] | [
"official/nlp/modeling/networks/albert_encoder_test.py",
"official/projects/edgetpu/vision/serving/tflite_imagenet_evaluator_run.py",
"official/nlp/tasks/masked_lm.py",
"official/nlp/data/classifier_data_lib_test.py",
"official/recommendation/ranking/common.py",
"official/legacy/bert/model_training_utils.py",
"official/vision/tasks/image_classification.py",
"official/vision/configs/video_classification_test.py",
"official/projects/movinet/tools/convert_3d_2plus1d.py",
"official/nlp/modeling/layers/reuse_transformer_test.py",
"official/projects/assemblenet/modeling/assemblenet.py",
"official/vision/beta/serving/export_saved_model_lib_test.py",
"official/projects/triviaqa/train.py",
"official/legacy/transformer/misc.py",
"official/vision/beta/projects/simclr/dataloaders/preprocess_ops.py",
"official/vision/beta/projects/centernet/ops/target_assigner_test.py",
"official/projects/nhnet/decoder_test.py",
"official/vision/beta/projects/yolo/modeling/layers/nn_blocks_test.py",
"official/vision/losses/loss_utils.py",
"official/vision/beta/configs/semantic_segmentation_test.py",
"official/projects/edgetpu/vision/dataloaders/classification_input.py",
"official/modeling/activations/gelu.py",
"orbit/actions/export_saved_model.py"
] | [
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for ALBERT transformer-based text encoder network.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import\nfrom official.nlp.modeling.networks import albert_encoder\n\n\n# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It\n# guarantees forward compatibility of this code for the V2 switchover.\n@keras_parameterized.run_all_keras_modes\nclass AlbertEncoderTest(keras_parameterized.TestCase):\n\n def tearDown(self):\n super(AlbertEncoderTest, self).tearDown()\n tf.keras.mixed_precision.set_global_policy(\"float32\")\n\n @parameterized.named_parameters(\n dict(testcase_name=\"default\", expected_dtype=tf.float32),\n dict(testcase_name=\"with_float16_dtype\", expected_dtype=tf.float16),\n )\n def test_network_creation(self, expected_dtype):\n hidden_size = 32\n sequence_length = 21\n\n kwargs = dict(\n vocab_size=100,\n hidden_size=hidden_size,\n num_attention_heads=2,\n num_layers=3)\n if expected_dtype == tf.float16:\n tf.keras.mixed_precision.set_global_policy(\"mixed_float16\")\n\n # Create a small TransformerEncoder for testing.\n test_network = albert_encoder.AlbertEncoder(**kwargs)\n\n # Create the inputs (note that the first dimension is implicit).\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n data, pooled = test_network([word_ids, mask, type_ids])\n\n expected_data_shape = [None, sequence_length, hidden_size]\n expected_pooled_shape = [None, hidden_size]\n self.assertAllEqual(expected_data_shape, data.shape.as_list())\n self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())\n\n # If float_dtype is set to float16, the data output is float32 (from a layer\n # norm) and pool output should be float16.\n self.assertEqual(tf.float32, data.dtype)\n self.assertEqual(expected_dtype, pooled.dtype)\n\n # ALBERT has additonal 'embedding_hidden_mapping_in' weights and\n # it shares transformer weights.\n self.assertNotEmpty(\n [x for x in test_network.weights if \"embedding_projection/\" in x.name])\n self.assertNotEmpty(\n [x for x in test_network.weights if \"transformer/\" in x.name])\n self.assertEmpty(\n [x for x in test_network.weights if \"transformer/layer\" in x.name])\n\n def test_network_invocation(self):\n hidden_size = 32\n sequence_length = 21\n vocab_size = 57\n num_types = 7\n num_layers = 3\n # Create a small TransformerEncoder for testing.\n test_network = albert_encoder.AlbertEncoder(\n vocab_size=vocab_size,\n embedding_width=8,\n hidden_size=hidden_size,\n num_attention_heads=2,\n num_layers=num_layers,\n type_vocab_size=num_types)\n # Create the inputs (note that the first dimension is implicit).\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n data, pooled = test_network([word_ids, mask, type_ids])\n\n # Create a model based off of this network:\n model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])\n\n # Invoke the model. We can't validate the output data here (the model is too\n # complex) but this will catch structural runtime errors.\n batch_size = 3\n word_id_data = np.random.randint(\n vocab_size, size=(batch_size, sequence_length))\n mask_data = np.random.randint(2, size=(batch_size, sequence_length))\n type_id_data = np.random.randint(\n num_types, size=(batch_size, sequence_length))\n list_outputs = model.predict([word_id_data, mask_data, type_id_data])\n\n # Creates a TransformerEncoder with max_sequence_length != sequence_length\n max_sequence_length = 128\n test_network = albert_encoder.AlbertEncoder(\n vocab_size=vocab_size,\n embedding_width=8,\n hidden_size=hidden_size,\n max_sequence_length=max_sequence_length,\n num_attention_heads=2,\n num_layers=num_layers,\n type_vocab_size=num_types)\n model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])\n _ = model.predict([word_id_data, mask_data, type_id_data])\n\n # Tests dictionary outputs.\n test_network_dict = albert_encoder.AlbertEncoder(\n vocab_size=vocab_size,\n embedding_width=8,\n hidden_size=hidden_size,\n max_sequence_length=max_sequence_length,\n num_attention_heads=2,\n num_layers=num_layers,\n type_vocab_size=num_types,\n dict_outputs=True)\n _ = test_network_dict([word_ids, mask, type_ids])\n test_network_dict.set_weights(test_network.get_weights())\n list_outputs = test_network([word_id_data, mask_data, type_id_data])\n dict_outputs = test_network_dict(\n dict(\n input_word_ids=word_id_data,\n input_mask=mask_data,\n input_type_ids=type_id_data))\n self.assertAllEqual(list_outputs[0], dict_outputs[\"sequence_output\"])\n self.assertAllEqual(list_outputs[1], dict_outputs[\"pooled_output\"])\n self.assertLen(dict_outputs[\"pooled_output\"], num_layers)\n\n def test_serialize_deserialize(self):\n tf.keras.mixed_precision.set_global_policy(\"mixed_float16\")\n # Create a network object that sets all of its config options.\n kwargs = dict(\n vocab_size=100,\n embedding_width=8,\n hidden_size=32,\n num_layers=3,\n num_attention_heads=2,\n max_sequence_length=21,\n type_vocab_size=12,\n intermediate_size=1223,\n activation=\"relu\",\n dropout_rate=0.05,\n attention_dropout_rate=0.22,\n initializer=\"glorot_uniform\")\n network = albert_encoder.AlbertEncoder(**kwargs)\n\n expected_config = dict(kwargs)\n expected_config[\"activation\"] = tf.keras.activations.serialize(\n tf.keras.activations.get(expected_config[\"activation\"]))\n expected_config[\"initializer\"] = tf.keras.initializers.serialize(\n tf.keras.initializers.get(expected_config[\"initializer\"]))\n self.assertEqual(network.get_config(), expected_config)\n\n # Create another network object from the first object's config.\n new_network = (\n albert_encoder.AlbertEncoder.from_config(\n network.get_config()))\n\n # Validate that the config can be forced to JSON.\n _ = new_network.to_json()\n\n # If the serialization was successful, the new config should match the old.\n self.assertAllEqual(network.get_config(), new_network.get_config())\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Evaluates image classification accuracy using tflite_imagenet_evaluator.\n\nUsage:\ntflite_imagenet_evaluator_run --tflite_model_path=/PATH/TO/MODEL.tflite\n\"\"\"\n\nfrom typing import Sequence\nfrom absl import app\nfrom absl import flags\nimport tensorflow as tf\n\nfrom official.core import exp_factory\nfrom official.projects.edgetpu.vision.serving import tflite_imagenet_evaluator\nfrom official.projects.edgetpu.vision.tasks import image_classification\n\n\nflags.DEFINE_string('tflite_model_path', None,\n 'Path to the tflite file to be evaluated.')\nflags.DEFINE_integer('num_threads', 16, 'Number of local threads.')\nflags.DEFINE_integer('batch_size', 256, 'Batch size per thread.')\nflags.DEFINE_string(\n 'model_name', 'mobilenet_edgetpu_v2_xs',\n 'Model name to identify a registered data pipeline setup and use as the '\n 'validation dataset.')\n\nFLAGS = flags.FLAGS\n\n\ndef main(argv: Sequence[str]):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n with tf.io.gfile.GFile(FLAGS.tflite_model_path, 'rb') as f:\n model_content = f.read()\n\n config = exp_factory.get_exp_config(FLAGS.model_name)\n global_batch_size = FLAGS.num_threads * FLAGS.batch_size\n config.task.validation_data.global_batch_size = global_batch_size\n config.task.validation_data.dtype = 'float32'\n\n task = image_classification.EdgeTPUTask(config.task)\n dataset = task.build_inputs(config.task.validation_data)\n\n evaluator = tflite_imagenet_evaluator.AccuracyEvaluator(\n model_content=model_content,\n dataset=dataset,\n num_threads=FLAGS.num_threads)\n\n evals, corrects = evaluator.evaluate_all()\n accuracy = 100.0 * corrects / evals if evals > 0 else 0\n print('Final accuracy: {}, Evaluated: {}, Correct: {} '.format(\n accuracy, evals, corrects))\n\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('tflite_model_path')\n app.run(main)\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Masked language task.\"\"\"\n\nimport dataclasses\nimport tensorflow as tf\n\nfrom official.core import base_task\nfrom official.core import config_definitions as cfg\nfrom official.core import task_factory\nfrom official.modeling import tf_utils\nfrom official.nlp.configs import bert\nfrom official.nlp.configs import encoders\nfrom official.nlp.data import data_loader_factory\nfrom official.nlp.modeling import layers\nfrom official.nlp.modeling import models\n\n\[email protected]\nclass MaskedLMConfig(cfg.TaskConfig):\n \"\"\"The model config.\"\"\"\n model: bert.PretrainerConfig = bert.PretrainerConfig(cls_heads=[\n bert.ClsHeadConfig(\n inner_dim=768, num_classes=2, dropout_rate=0.1, name='next_sentence')\n ])\n # TODO(b/154564893): Mathematically, scale_loss should be True.\n # However, it works better with scale_loss being False.\n scale_loss: bool = False\n train_data: cfg.DataConfig = cfg.DataConfig()\n validation_data: cfg.DataConfig = cfg.DataConfig()\n\n\n@task_factory.register_task_cls(MaskedLMConfig)\nclass MaskedLMTask(base_task.Task):\n \"\"\"Task object for Mask language modeling.\"\"\"\n\n def _build_encoder(self, encoder_cfg):\n return encoders.build_encoder(encoder_cfg)\n\n def build_model(self, params=None):\n config = params or self.task_config.model\n encoder_cfg = config.encoder\n encoder_network = self._build_encoder(encoder_cfg)\n cls_heads = [\n layers.ClassificationHead(**cfg.as_dict()) for cfg in config.cls_heads\n ] if config.cls_heads else []\n return models.BertPretrainerV2(\n mlm_activation=tf_utils.get_activation(config.mlm_activation),\n mlm_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=config.mlm_initializer_range),\n encoder_network=encoder_network,\n classification_heads=cls_heads)\n\n def build_losses(self,\n labels,\n model_outputs,\n metrics,\n aux_losses=None) -> tf.Tensor:\n with tf.name_scope('MaskedLMTask/losses'):\n metrics = dict([(metric.name, metric) for metric in metrics])\n lm_prediction_losses = tf.keras.losses.sparse_categorical_crossentropy(\n labels['masked_lm_ids'],\n tf.cast(model_outputs['mlm_logits'], tf.float32),\n from_logits=True)\n lm_label_weights = labels['masked_lm_weights']\n lm_numerator_loss = tf.reduce_sum(lm_prediction_losses *\n lm_label_weights)\n lm_denominator_loss = tf.reduce_sum(lm_label_weights)\n mlm_loss = tf.math.divide_no_nan(lm_numerator_loss, lm_denominator_loss)\n metrics['lm_example_loss'].update_state(mlm_loss)\n if 'next_sentence_labels' in labels:\n sentence_labels = labels['next_sentence_labels']\n sentence_outputs = tf.cast(\n model_outputs['next_sentence'], dtype=tf.float32)\n sentence_loss = tf.reduce_mean(\n tf.keras.losses.sparse_categorical_crossentropy(\n sentence_labels, sentence_outputs, from_logits=True))\n metrics['next_sentence_loss'].update_state(sentence_loss)\n total_loss = mlm_loss + sentence_loss\n else:\n total_loss = mlm_loss\n\n if aux_losses:\n total_loss += tf.add_n(aux_losses)\n return total_loss\n\n def build_inputs(self, params, input_context=None):\n \"\"\"Returns tf.data.Dataset for pretraining.\"\"\"\n if params.input_path == 'dummy':\n\n def dummy_data(_):\n dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)\n dummy_lm = tf.zeros((1, params.max_predictions_per_seq), dtype=tf.int32)\n return dict(\n input_word_ids=dummy_ids,\n input_mask=dummy_ids,\n input_type_ids=dummy_ids,\n masked_lm_positions=dummy_lm,\n masked_lm_ids=dummy_lm,\n masked_lm_weights=tf.cast(dummy_lm, dtype=tf.float32),\n next_sentence_labels=tf.zeros((1, 1), dtype=tf.int32))\n\n dataset = tf.data.Dataset.range(1)\n dataset = dataset.repeat()\n dataset = dataset.map(\n dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n return dataset\n\n return data_loader_factory.get_data_loader(params).load(input_context)\n\n def build_metrics(self, training=None):\n del training\n metrics = [\n tf.keras.metrics.SparseCategoricalAccuracy(name='masked_lm_accuracy'),\n tf.keras.metrics.Mean(name='lm_example_loss')\n ]\n # TODO(hongkuny): rethink how to manage metrics creation with heads.\n if self.task_config.train_data.use_next_sentence_label:\n metrics.append(\n tf.keras.metrics.SparseCategoricalAccuracy(\n name='next_sentence_accuracy'))\n metrics.append(tf.keras.metrics.Mean(name='next_sentence_loss'))\n return metrics\n\n def process_metrics(self, metrics, labels, model_outputs):\n with tf.name_scope('MaskedLMTask/process_metrics'):\n metrics = dict([(metric.name, metric) for metric in metrics])\n if 'masked_lm_accuracy' in metrics:\n metrics['masked_lm_accuracy'].update_state(\n labels['masked_lm_ids'], model_outputs['mlm_logits'],\n labels['masked_lm_weights'])\n if 'next_sentence_accuracy' in metrics:\n metrics['next_sentence_accuracy'].update_state(\n labels['next_sentence_labels'], model_outputs['next_sentence'])\n\n def train_step(self, inputs, model: tf.keras.Model,\n optimizer: tf.keras.optimizers.Optimizer, metrics):\n \"\"\"Does forward and backward.\n\n Args:\n inputs: a dictionary of input tensors.\n model: the model, forward pass definition.\n optimizer: the optimizer for this training step.\n metrics: a nested structure of metrics objects.\n\n Returns:\n A dictionary of logs.\n \"\"\"\n with tf.GradientTape() as tape:\n outputs = model(inputs, training=True)\n # Computes per-replica loss.\n loss = self.build_losses(\n labels=inputs,\n model_outputs=outputs,\n metrics=metrics,\n aux_losses=model.losses)\n if self.task_config.scale_loss:\n # Scales loss as the default gradients allreduce performs sum inside the\n # optimizer.\n scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync\n tvars = model.trainable_variables\n if self.task_config.scale_loss:\n grads = tape.gradient(scaled_loss, tvars)\n else:\n grads = tape.gradient(loss, tvars)\n optimizer.apply_gradients(list(zip(grads, tvars)))\n self.process_metrics(metrics, inputs, outputs)\n return {self.loss: loss}\n\n def validation_step(self, inputs, model: tf.keras.Model, metrics):\n \"\"\"Validatation step.\n\n Args:\n inputs: a dictionary of input tensors.\n model: the keras.Model.\n metrics: a nested structure of metrics objects.\n\n Returns:\n A dictionary of logs.\n \"\"\"\n outputs = self.inference_step(inputs, model)\n loss = self.build_losses(\n labels=inputs,\n model_outputs=outputs,\n metrics=metrics,\n aux_losses=model.losses)\n self.process_metrics(metrics, inputs, outputs)\n return {self.loss: loss}\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for third_party.tensorflow_models.official.nlp.data.classifier_data_lib.\"\"\"\n\nimport os\nimport tempfile\n\nfrom absl.testing import parameterized\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\n\nfrom official.nlp.data import classifier_data_lib\nfrom official.nlp.tools import tokenization\n\n\ndef decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n return tf.io.parse_single_example(record, name_to_features)\n\n\nclass BertClassifierLibTest(tf.test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super(BertClassifierLibTest, self).setUp()\n self.model_dir = self.get_temp_dir()\n self.processors = {\n \"CB\": classifier_data_lib.CBProcessor,\n \"SUPERGLUE-RTE\": classifier_data_lib.SuperGLUERTEProcessor,\n \"BOOLQ\": classifier_data_lib.BoolQProcessor,\n \"WIC\": classifier_data_lib.WiCProcessor,\n }\n\n vocab_tokens = [\n \"[UNK]\", \"[CLS]\", \"[SEP]\", \"want\", \"##want\", \"##ed\", \"wa\", \"un\", \"runn\",\n \"##ing\", \",\"\n ]\n with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:\n vocab_writer.write(\"\".join([x + \"\\n\" for x in vocab_tokens\n ]).encode(\"utf-8\"))\n vocab_file = vocab_writer.name\n self.tokenizer = tokenization.FullTokenizer(vocab_file)\n\n @parameterized.parameters(\n {\"task_type\": \"CB\"},\n {\"task_type\": \"BOOLQ\"},\n {\"task_type\": \"SUPERGLUE-RTE\"},\n {\"task_type\": \"WIC\"},\n )\n def test_generate_dataset_from_tfds_processor(self, task_type):\n with tfds.testing.mock_data(num_examples=5):\n output_path = os.path.join(self.model_dir, task_type)\n\n processor = self.processors[task_type]()\n\n classifier_data_lib.generate_tf_record_from_data_file(\n processor,\n None,\n self.tokenizer,\n train_data_output_path=output_path,\n eval_data_output_path=output_path,\n test_data_output_path=output_path)\n files = tf.io.gfile.glob(output_path)\n self.assertNotEmpty(files)\n\n train_dataset = tf.data.TFRecordDataset(output_path)\n seq_length = 128\n label_type = tf.int64\n name_to_features = {\n \"input_ids\": tf.io.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.io.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.io.FixedLenFeature([seq_length], tf.int64),\n \"label_ids\": tf.io.FixedLenFeature([], label_type),\n }\n train_dataset = train_dataset.map(\n lambda record: decode_record(record, name_to_features))\n\n # If data is retrieved without error, then all requirements\n # including data type/shapes are met.\n _ = next(iter(train_dataset))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Flags and common definitions for Ranking Models.\"\"\"\n\nfrom absl import flags\nimport tensorflow as tf\n\nfrom official.common import flags as tfm_flags\n\nFLAGS = flags.FLAGS\n\n\ndef define_flags() -> None:\n \"\"\"Defines flags for training the Ranking model.\"\"\"\n tfm_flags.define_flags()\n\n FLAGS.set_default(name='experiment', value='dlrm_criteo')\n FLAGS.set_default(name='mode', value='train_and_eval')\n\n flags.DEFINE_integer(\n name='seed',\n default=None,\n help='This value will be used to seed both NumPy and TensorFlow.')\n flags.DEFINE_string(\n name='profile_steps',\n default='20,40',\n help='Save profiling data to model dir at given range of global steps. '\n 'The value must be a comma separated pair of positive integers, '\n 'specifying the first and last step to profile. For example, '\n '\"--profile_steps=2,4\" triggers the profiler to process 3 steps, starting'\n ' from the 2nd step. Note that profiler has a non-trivial performance '\n 'overhead, and the output file can be gigantic if profiling many steps.')\n\n\[email protected]_keras_serializable(package='RANKING')\nclass WarmUpAndPolyDecay(tf.keras.optimizers.schedules.LearningRateSchedule):\n \"\"\"Learning rate callable for the embeddings.\n\n Linear warmup on [0, warmup_steps] then\n Constant on [warmup_steps, decay_start_steps]\n And polynomial decay on [decay_start_steps, decay_start_steps + decay_steps].\n \"\"\"\n\n def __init__(self,\n batch_size: int,\n decay_exp: float = 2.0,\n learning_rate: float = 40.0,\n warmup_steps: int = 8000,\n decay_steps: int = 12000,\n decay_start_steps: int = 10000):\n super(WarmUpAndPolyDecay, self).__init__()\n self.batch_size = batch_size\n self.decay_exp = decay_exp\n self.learning_rate = learning_rate\n self.warmup_steps = warmup_steps\n self.decay_steps = decay_steps\n self.decay_start_steps = decay_start_steps\n\n def __call__(self, step):\n decay_exp = self.decay_exp\n learning_rate = self.learning_rate\n warmup_steps = self.warmup_steps\n decay_steps = self.decay_steps\n decay_start_steps = self.decay_start_steps\n\n scal = self.batch_size / 2048\n\n adj_lr = learning_rate * scal\n if warmup_steps == 0:\n return adj_lr\n\n warmup_lr = step / warmup_steps * adj_lr\n global_step = tf.cast(step, tf.float32)\n decay_steps = tf.cast(decay_steps, tf.float32)\n decay_start_step = tf.cast(decay_start_steps, tf.float32)\n warmup_lr = tf.cast(warmup_lr, tf.float32)\n\n steps_since_decay_start = global_step - decay_start_step\n already_decayed_steps = tf.minimum(steps_since_decay_start, decay_steps)\n decay_lr = adj_lr * (\n (decay_steps - already_decayed_steps) / decay_steps)**decay_exp\n decay_lr = tf.maximum(0.0001, decay_lr)\n\n lr = tf.where(\n global_step < warmup_steps, warmup_lr,\n tf.where(\n tf.logical_and(decay_steps > 0, global_step > decay_start_step),\n decay_lr, adj_lr))\n\n lr = tf.maximum(0.01, lr)\n return lr\n\n def get_config(self):\n return {\n 'batch_size': self.batch_size,\n 'decay_exp': self.decay_exp,\n 'learning_rate': self.learning_rate,\n 'warmup_steps': self.warmup_steps,\n 'decay_steps': self.decay_steps,\n 'decay_start_steps': self.decay_start_steps\n }\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A light weight utilities to train NLP models.\"\"\"\n\nimport json\nimport os\nimport tempfile\n\nfrom absl import logging\nimport tensorflow as tf\nfrom tensorflow.python.util import deprecation\nfrom official.common import distribute_utils\nfrom official.modeling import grad_utils\n\n_SUMMARY_TXT = 'training_summary.txt'\n_MIN_SUMMARY_STEPS = 10\n\n\ndef _should_export_checkpoint(strategy):\n return (not strategy) or strategy.extended.should_checkpoint\n\n\ndef _should_export_summary(strategy):\n return (not strategy) or strategy.extended.should_save_summary\n\n\ndef _save_checkpoint(strategy, checkpoint, model_dir, checkpoint_prefix):\n \"\"\"Saves model to with provided checkpoint prefix.\"\"\"\n\n if _should_export_checkpoint(strategy):\n checkpoint_path = os.path.join(model_dir, checkpoint_prefix)\n saved_path = checkpoint.save(checkpoint_path)\n logging.info('Saving model as TF checkpoint: %s', saved_path)\n else:\n # In multi worker training we need every worker to save checkpoint, because\n # variables can trigger synchronization on read and synchronization needs\n # all workers to participate. To avoid workers overriding each other we save\n # to a temporary directory on non-chief workers.\n tmp_dir = tempfile.mkdtemp()\n checkpoint.save(os.path.join(tmp_dir, 'ckpt'))\n tf.io.gfile.rmtree(tmp_dir)\n return\n\n\ndef _get_input_iterator(input_fn, strategy):\n \"\"\"Returns distributed dataset iterator.\"\"\"\n # When training with TPU pods, datasets needs to be cloned across\n # workers. Since Dataset instance cannot be cloned in eager mode, we instead\n # pass callable that returns a dataset.\n if not callable(input_fn):\n raise ValueError('`input_fn` should be a closure that returns a dataset.')\n iterator = iter(strategy.distribute_datasets_from_function(input_fn))\n return iterator\n\n\ndef _float_metric_value(metric):\n \"\"\"Gets the value of a float-value keras metric.\"\"\"\n return metric.result().numpy().astype(float)\n\n\ndef clip_by_global_norm_callback(grads_and_vars):\n \"\"\"Performs gradient clipping.\"\"\"\n grads, variables = zip(*grads_and_vars)\n (clipped_grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\n return zip(clipped_grads, variables)\n\n\ndef steps_to_run(current_step, steps_per_epoch, steps_per_loop):\n \"\"\"Calculates steps to run on device.\"\"\"\n if steps_per_loop <= 0:\n raise ValueError('steps_per_loop should be positive integer.')\n if steps_per_loop == 1:\n return steps_per_loop\n remainder_in_epoch = current_step % steps_per_epoch\n if remainder_in_epoch != 0:\n return min(steps_per_epoch - remainder_in_epoch, steps_per_loop)\n else:\n return steps_per_loop\n\n\ndef write_txt_summary(training_summary, summary_dir):\n \"\"\"Writes a summary text file to record stats.\"\"\"\n if not tf.io.gfile.exists(summary_dir):\n tf.io.gfile.mkdir(summary_dir)\n summary_path = os.path.join(summary_dir, _SUMMARY_TXT)\n with tf.io.gfile.GFile(summary_path, 'wb') as f:\n logging.info('Training Summary: \\n%s', str(training_summary))\n f.write(json.dumps(training_summary, indent=4))\n\n\[email protected](\n None, 'This function is deprecated and we do not expect adding new '\n 'functionalities. Please do not have your code depending '\n 'on this library.')\ndef run_customized_training_loop(\n # pylint: disable=invalid-name\n _sentinel=None,\n # pylint: enable=invalid-name\n strategy=None,\n model_fn=None,\n loss_fn=None,\n scale_loss=True,\n model_dir=None,\n train_input_fn=None,\n steps_per_epoch=None,\n num_eval_per_epoch=1,\n steps_per_loop=None,\n epochs=1,\n eval_input_fn=None,\n eval_steps=None,\n metric_fn=None,\n init_checkpoint=None,\n custom_callbacks=None,\n run_eagerly=False,\n sub_model_export_name=None,\n explicit_allreduce=False,\n pre_allreduce_callbacks=None,\n post_allreduce_callbacks=None,\n train_summary_interval=0,\n allreduce_bytes_per_pack=0):\n \"\"\"Run BERT pretrain model training using low-level API.\n\n Args:\n _sentinel: Used to prevent positional parameters. Internal, do not use.\n strategy: Distribution strategy on which to run low level training loop.\n model_fn: Function that returns a tuple (model, sub_model). Caller of this\n function should add optimizer to the `model` via calling\n `model.compile()` API or manually setting `model.optimizer` attribute.\n Second element of the returned tuple(sub_model) is an optional sub model\n to be used for initial checkpoint -- if provided.\n loss_fn: Function with signature func(labels, logits) and returns a loss\n tensor.\n scale_loss: Whether to divide the raw loss by number of replicas before\n gradients calculation.\n model_dir: Model directory used during training for restoring/saving model\n weights.\n train_input_fn: Function that returns a tf.data.Dataset used for training.\n steps_per_epoch: Number of steps to run per epoch. At the end of each\n epoch, model checkpoint will be saved and evaluation will be conducted\n if evaluation dataset is provided.\n num_eval_per_epoch: Number of evaluations per epoch.\n steps_per_loop: Number of steps per graph-mode loop. In order to reduce\n communication in eager context, training logs are printed every\n steps_per_loop.\n epochs: Number of epochs to train.\n eval_input_fn: Function that returns evaluation dataset. If none,\n evaluation is skipped.\n eval_steps: Number of steps to run evaluation. Required if `eval_input_fn`\n is not none.\n metric_fn: A metrics function that returns either a Keras Metric object or\n a list of Keras Metric objects to record evaluation result using\n evaluation dataset or with training dataset after every epoch.\n init_checkpoint: Optional checkpoint to load to `sub_model` returned by\n `model_fn`.\n custom_callbacks: A list of Keras Callbacks objects to run during\n training. More specifically, `on_train_begin(), on_train_end(),\n on_batch_begin()`, `on_batch_end()`, `on_epoch_begin()`,\n `on_epoch_end()` methods are invoked during training. Note that some\n metrics may be missing from `logs`.\n run_eagerly: Whether to run model training in pure eager execution. This\n should be disable for TPUStrategy.\n sub_model_export_name: If not None, will export `sub_model` returned by\n `model_fn` into checkpoint files. The name of intermediate checkpoint\n file is {sub_model_export_name}_step_{step}.ckpt and the last\n checkpint's name is {sub_model_export_name}.ckpt; if None, `sub_model`\n will not be exported as checkpoint.\n explicit_allreduce: Whether to explicitly perform gradient allreduce,\n instead of relying on implicit allreduce in optimizer.apply_gradients().\n default is False. For now, if training using FP16 mixed precision,\n explicit allreduce will aggregate gradients in FP16 format. For TPU and\n GPU training using FP32, explicit allreduce will aggregate gradients in\n FP32 format.\n pre_allreduce_callbacks: A list of callback functions that takes gradients\n and model variables pairs as input, manipulate them, and returns a new\n gradients and model variables paris. The callback functions will be\n invoked in the list order and before gradients are allreduced. With\n mixed precision training, the pre_allreduce_allbacks will be applied on\n scaled_gradients. Default is no callbacks. Only used when\n explicit_allreduce=True.\n post_allreduce_callbacks: A list of callback functions that takes\n gradients and model variables pairs as input, manipulate them, and\n returns a new gradients and model variables paris. The callback\n functions will be invoked in the list order and right before gradients\n are applied to variables for updates. Default is no callbacks. Only used\n when explicit_allreduce=True.\n train_summary_interval: Step interval for training summaries. If the value\n is a negative number, then training summaries are not enabled.\n allreduce_bytes_per_pack: A non-negative integer. Breaks collective\n operations into packs of certain size. If it's zero, all gradients are\n in one pack. Breaking gradient into packs could enable overlap between\n allreduce and backprop computation. This flag only takes effect when\n explicit_allreduce is set to True.'\n\n Returns:\n Trained model.\n\n Raises:\n ValueError: (1) When model returned by `model_fn` does not have optimizer\n attribute or when required parameters are set to none. (2) eval args are\n not specified correctly. (3) metric_fn must be a callable if specified.\n (4) sub_model_checkpoint_name is specified, but `sub_model` returned\n by `model_fn` is None.\n \"\"\"\n\n if _sentinel is not None:\n raise ValueError('only call `run_customized_training_loop()` '\n 'with named arguments.')\n\n required_arguments = [\n strategy, model_fn, loss_fn, model_dir, steps_per_epoch, train_input_fn\n ]\n\n steps_between_evals = int(steps_per_epoch / num_eval_per_epoch)\n if [arg for arg in required_arguments if arg is None]:\n raise ValueError('`strategy`, `model_fn`, `loss_fn`, `model_dir`, '\n '`steps_per_epoch` and `train_input_fn` are required '\n 'parameters.')\n if not steps_per_loop:\n if tf.config.list_logical_devices('TPU'):\n # One can't fully utilize a TPU with steps_per_loop=1, so in this case\n # default users to a more useful value.\n steps_per_loop = min(1000, steps_between_evals)\n else:\n steps_per_loop = 1\n logging.info('steps_per_loop not specified. Using steps_per_loop=%d',\n steps_per_loop)\n if steps_per_loop > steps_between_evals:\n logging.warning(\n 'steps_per_loop: %d is specified to be greater than '\n ' steps_between_evals: %d, we will use steps_between_evals as'\n ' steps_per_loop.', steps_per_loop, steps_between_evals)\n steps_per_loop = steps_between_evals\n assert tf.executing_eagerly()\n\n if run_eagerly:\n if isinstance(\n strategy,\n (tf.distribute.TPUStrategy, tf.distribute.experimental.TPUStrategy)):\n raise ValueError(\n 'TPUStrategy should not run eagerly as it heavily relies on graph'\n ' optimization for the distributed system.')\n\n if eval_input_fn and eval_steps is None:\n raise ValueError(\n '`eval_step` is required when `eval_input_fn ` is not none.')\n if metric_fn and not callable(metric_fn):\n raise ValueError(\n 'if `metric_fn` is specified, metric_fn must be a callable.')\n\n total_training_steps = steps_per_epoch * epochs\n train_iterator = _get_input_iterator(train_input_fn, strategy)\n eval_loss_metric = tf.keras.metrics.Mean('training_loss', dtype=tf.float32)\n\n with distribute_utils.get_strategy_scope(strategy):\n # To correctly place the model weights on accelerators,\n # model and optimizer should be created in scope.\n model, sub_model = model_fn()\n if not hasattr(model, 'optimizer'):\n raise ValueError('User should set optimizer attribute to model '\n 'inside `model_fn`.')\n if sub_model_export_name and sub_model is None:\n raise ValueError('sub_model_export_name is specified as %s, but '\n 'sub_model is None.' % sub_model_export_name)\n\n callback_list = tf.keras.callbacks.CallbackList(\n callbacks=custom_callbacks, model=model)\n\n optimizer = model.optimizer\n\n if init_checkpoint:\n logging.info(\n 'Checkpoint file %s found and restoring from '\n 'initial checkpoint for core model.', init_checkpoint)\n checkpoint = tf.train.Checkpoint(model=sub_model, encoder=sub_model)\n checkpoint.read(init_checkpoint).assert_existing_objects_matched()\n logging.info('Loading from checkpoint file completed')\n\n train_loss_metric = tf.keras.metrics.Mean('training_loss', dtype=tf.float32)\n eval_metrics = metric_fn() if metric_fn else []\n if not isinstance(eval_metrics, list):\n eval_metrics = [eval_metrics]\n # If evaluation is required, make a copy of metric as it will be used by\n # both train and evaluation.\n train_metrics = [\n metric.__class__.from_config(metric.get_config())\n for metric in eval_metrics\n ]\n\n # Create summary writers\n if _should_export_summary(strategy):\n summary_dir = os.path.join(model_dir, 'summaries')\n else:\n # In multi worker training we need every worker to write summary, because\n # variables can trigger synchronization on read and synchronization needs\n # all workers to participate.\n summary_dir = tempfile.mkdtemp()\n eval_summary_writer = tf.summary.create_file_writer(\n os.path.join(summary_dir, 'eval'))\n last_summary_step = 0\n if steps_per_loop >= _MIN_SUMMARY_STEPS and train_summary_interval >= 0:\n # Only writes summary when the stats are collected sufficiently over\n # enough steps.\n train_summary_writer = tf.summary.create_file_writer(\n os.path.join(summary_dir, 'train'))\n else:\n train_summary_writer = tf.summary.create_noop_writer()\n\n # Collects training variables.\n training_vars = model.trainable_variables\n\n def _replicated_step(inputs):\n \"\"\"Replicated training step.\"\"\"\n\n inputs, labels = inputs\n with tf.GradientTape() as tape:\n model_outputs = model(inputs, training=True)\n loss = loss_fn(labels, model_outputs)\n # Raw loss is used for reporting in metrics/logs.\n raw_loss = loss\n if scale_loss:\n # Scales down the loss for gradients to be invariant from replicas.\n loss = loss / strategy.num_replicas_in_sync\n\n if explicit_allreduce:\n grad_utils.minimize_using_explicit_allreduce(tape, optimizer, loss,\n training_vars,\n pre_allreduce_callbacks,\n post_allreduce_callbacks,\n allreduce_bytes_per_pack)\n else:\n if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):\n with tape:\n scaled_loss = optimizer.get_scaled_loss(loss)\n scaled_grads = tape.gradient(scaled_loss, training_vars)\n grads = optimizer.get_unscaled_gradients(scaled_grads)\n else:\n grads = tape.gradient(loss, training_vars)\n optimizer.apply_gradients(zip(grads, training_vars))\n # For reporting, the metric takes the mean of losses.\n train_loss_metric.update_state(raw_loss)\n for metric in train_metrics:\n metric.update_state(labels, model_outputs)\n\n @tf.function\n def train_steps(iterator, steps):\n \"\"\"Performs distributed training steps in a loop.\n\n Args:\n iterator: the distributed iterator of training datasets.\n steps: an tf.int32 integer tensor to specify number of steps to run\n inside host training loop.\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.\n \"\"\"\n if not isinstance(steps, tf.Tensor):\n raise ValueError('steps should be an Tensor. Python object may cause '\n 'retracing.')\n\n for _ in tf.range(steps):\n strategy.run(_replicated_step, args=(next(iterator),))\n\n def train_single_step(iterator):\n \"\"\"Performs a distributed training step.\n\n Args:\n iterator: the distributed iterator of training datasets.\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.\n \"\"\"\n strategy.run(_replicated_step, args=(next(iterator),))\n\n def test_step(iterator):\n \"\"\"Calculates evaluation metrics on distributed devices.\"\"\"\n\n def _test_step_fn(inputs):\n \"\"\"Replicated accuracy calculation.\"\"\"\n\n inputs, labels = inputs\n model_outputs = model(inputs, training=False)\n for metric in eval_metrics:\n metric.update_state(labels, model_outputs)\n return model_outputs, labels\n\n outputs, labels = strategy.run(_test_step_fn, args=(next(iterator),))\n outputs = tf.nest.map_structure(strategy.experimental_local_results,\n outputs)\n labels = tf.nest.map_structure(strategy.experimental_local_results,\n labels)\n return outputs, labels\n\n if not run_eagerly:\n train_single_step = tf.function(train_single_step)\n test_step = tf.function(test_step)\n\n def _run_evaluation(current_training_step, test_iterator):\n \"\"\"Runs validation steps and aggregate metrics.\n\n Args:\n current_training_step: tf.int32 tensor containing the current step.\n test_iterator: distributed iterator of test datasets.\n\n Returns:\n A dict of metic names and values.\n \"\"\"\n # The last batch of the evaluation is often smaller than previous ones.\n # Moreover, in some distributed pieces it might even be empty. Therefore,\n # different from the way training_loss is calculated, it is needed to\n # gather all the logits and labels here to calculate the evaluation loss\n # outside.\n loss_list, loss_weights = list(), list()\n for _ in range(eval_steps):\n outputs, labels = test_step(test_iterator)\n for cur_logits, cur_labels in zip(outputs, labels):\n # This is to handle cases when cur_labels is not a single tensor,\n # but a dict of tensors.\n cur_weight = tf.shape(tf.nest.flatten(cur_labels)[0])[0]\n if cur_weight != 0:\n loss_list.append(loss_fn(cur_labels, cur_logits).numpy())\n loss_weights.append(cur_weight)\n # The sample_weights are the actual number of examples in each batch,\n # a summation of numbers of examples in each replica if using\n # distributed training.\n eval_loss_metric.update_state(loss_list, sample_weight=loss_weights)\n\n logs = {}\n with eval_summary_writer.as_default():\n for metric in [eval_loss_metric] + eval_metrics + model.metrics:\n metric_value = _float_metric_value(metric)\n logs[metric.name] = metric_value\n logging.info('Step: [%d] Validation %s = %f', current_training_step,\n metric.name, metric_value)\n tf.summary.scalar(\n metric.name, metric_value, step=current_training_step)\n eval_summary_writer.flush()\n\n return logs\n\n # Training loop starts here.\n checkpoint = tf.train.Checkpoint(\n model=model, optimizer=optimizer, global_step=optimizer.iterations)\n sub_model_checkpoint = tf.train.Checkpoint(\n model=sub_model,\n global_step=optimizer.iterations) if sub_model_export_name else None\n\n latest_checkpoint_file = tf.train.latest_checkpoint(model_dir)\n if latest_checkpoint_file:\n logging.info('Checkpoint file %s found and restoring from '\n 'checkpoint', latest_checkpoint_file)\n checkpoint.restore(latest_checkpoint_file)\n logging.info('Loading from checkpoint file completed')\n\n current_step = optimizer.iterations.numpy()\n checkpoint_name = 'ctl_step_{step}.ckpt'\n\n logs = {}\n callback_list.on_train_begin()\n while current_step < total_training_steps and not model.stop_training:\n if current_step % steps_per_epoch == 0:\n callback_list.on_epoch_begin(int(current_step / steps_per_epoch) + 1)\n\n # Training loss/metric are taking average over steps inside micro\n # training loop. We reset the their values before each round.\n train_loss_metric.reset_states()\n for metric in train_metrics + model.metrics:\n metric.reset_states()\n\n callback_list.on_batch_begin(current_step)\n # Runs several steps in the host while loop.\n steps = steps_to_run(current_step, steps_between_evals, steps_per_loop)\n\n if tf.config.list_physical_devices('GPU'):\n # TODO(zongweiz): merge with train_steps once tf.while_loop\n # GPU performance bugs are fixed.\n for _ in range(steps):\n train_single_step(train_iterator)\n else:\n # Converts steps to a Tensor to avoid tf.function retracing.\n train_steps(train_iterator, tf.convert_to_tensor(steps, dtype=tf.int32))\n train_loss = _float_metric_value(train_loss_metric)\n current_step += steps\n\n # Updates training logging.\n training_status = 'Train Step: %d/%d / loss = %s' % (\n current_step, total_training_steps, train_loss)\n\n if current_step >= last_summary_step + train_summary_interval:\n summary_writer = train_summary_writer\n last_summary_step = current_step\n else:\n summary_writer = tf.summary.create_noop_writer()\n\n with summary_writer.as_default():\n if callable(optimizer.learning_rate):\n tf.summary.scalar(\n 'learning_rate',\n optimizer.learning_rate(current_step),\n step=current_step)\n tf.summary.scalar(train_loss_metric.name, train_loss, step=current_step)\n for metric in train_metrics + model.metrics:\n metric_value = _float_metric_value(metric)\n training_status += ' %s = %f' % (metric.name, metric_value)\n tf.summary.scalar(metric.name, metric_value, step=current_step)\n summary_writer.flush()\n logging.info(training_status)\n\n # If no need for evaluation, we only call on_batch_end with train_loss,\n # this is to ensure we get granular global_step/sec on Tensorboard.\n if current_step % steps_between_evals:\n callback_list.on_batch_end(current_step - 1, {'loss': train_loss})\n else:\n # Save a submodel with the step in the file name after each epoch.\n if sub_model_export_name:\n _save_checkpoint(\n strategy, sub_model_checkpoint, model_dir,\n '%s_step_%d.ckpt' % (sub_model_export_name, current_step))\n\n # Save model checkpoints and run validation steps after each epoch\n # (with the exception of the final epoch which is handled after the\n # training loop).\n if current_step < total_training_steps:\n _save_checkpoint(strategy, checkpoint, model_dir,\n checkpoint_name.format(step=current_step))\n if eval_input_fn:\n # Re-initialize evaluation metric.\n eval_loss_metric.reset_states()\n for metric in eval_metrics + model.metrics:\n metric.reset_states()\n\n logging.info('Running evaluation after step: %s.', current_step)\n logs = _run_evaluation(current_step,\n _get_input_iterator(eval_input_fn, strategy))\n # We add train_loss here rather than call on_batch_end twice to make\n # sure that no duplicated values are generated.\n logs['loss'] = train_loss\n callback_list.on_batch_end(current_step - 1, logs)\n\n # Calls on_epoch_end after each real epoch ends to prevent mis-calculation\n # of training steps.\n if current_step % steps_per_epoch == 0:\n callback_list.on_epoch_end(int(current_step / steps_per_epoch), logs)\n\n if sub_model_export_name:\n _save_checkpoint(strategy, sub_model_checkpoint, model_dir,\n '%s.ckpt' % sub_model_export_name)\n\n _save_checkpoint(strategy, checkpoint, model_dir,\n checkpoint_name.format(step=current_step))\n if eval_input_fn:\n # Re-initialize evaluation metric.\n eval_loss_metric.reset_states()\n for metric in eval_metrics + model.metrics:\n metric.reset_states()\n\n logging.info('Running final evaluation after training is complete.')\n logs = _run_evaluation(current_step,\n _get_input_iterator(eval_input_fn, strategy))\n callback_list.on_epoch_end(int(current_step / steps_per_epoch), logs)\n training_summary = {\n 'total_training_steps': total_training_steps,\n 'train_loss': _float_metric_value(train_loss_metric),\n }\n for metric in model.metrics:\n training_summary[metric.name] = _float_metric_value(metric)\n if eval_metrics:\n training_summary['last_train_metrics'] = _float_metric_value(\n train_metrics[0])\n training_summary['eval_metrics'] = _float_metric_value(eval_metrics[0])\n\n write_txt_summary(training_summary, summary_dir)\n\n if not _should_export_summary(strategy):\n tf.io.gfile.rmtree(summary_dir)\n\n callback_list.on_train_end()\n\n return model\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Image classification task definition.\"\"\"\nfrom typing import Any, Optional, List, Tuple\nfrom absl import logging\nimport tensorflow as tf\n\nfrom official.common import dataset_fn\nfrom official.core import base_task\nfrom official.core import task_factory\nfrom official.modeling import tf_utils\nfrom official.vision.configs import image_classification as exp_cfg\nfrom official.vision.dataloaders import classification_input\nfrom official.vision.dataloaders import input_reader_factory\nfrom official.vision.dataloaders import tfds_factory\nfrom official.vision.modeling import factory\nfrom official.vision.ops import augment\n\n\n@task_factory.register_task_cls(exp_cfg.ImageClassificationTask)\nclass ImageClassificationTask(base_task.Task):\n \"\"\"A task for image classification.\"\"\"\n\n def build_model(self):\n \"\"\"Builds classification model.\"\"\"\n input_specs = tf.keras.layers.InputSpec(\n shape=[None] + self.task_config.model.input_size)\n\n l2_weight_decay = self.task_config.losses.l2_weight_decay\n # Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.\n # (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)\n # (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)\n l2_regularizer = (tf.keras.regularizers.l2(\n l2_weight_decay / 2.0) if l2_weight_decay else None)\n\n model = factory.build_classification_model(\n input_specs=input_specs,\n model_config=self.task_config.model,\n l2_regularizer=l2_regularizer)\n return model\n\n def initialize(self, model: tf.keras.Model):\n \"\"\"Loads pretrained checkpoint.\"\"\"\n if not self.task_config.init_checkpoint:\n return\n\n ckpt_dir_or_file = self.task_config.init_checkpoint\n if tf.io.gfile.isdir(ckpt_dir_or_file):\n ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)\n\n # Restoring checkpoint.\n if self.task_config.init_checkpoint_modules == 'all':\n ckpt = tf.train.Checkpoint(model=model)\n status = ckpt.read(ckpt_dir_or_file)\n status.expect_partial().assert_existing_objects_matched()\n elif self.task_config.init_checkpoint_modules == 'backbone':\n ckpt = tf.train.Checkpoint(backbone=model.backbone)\n status = ckpt.read(ckpt_dir_or_file)\n status.expect_partial().assert_existing_objects_matched()\n else:\n raise ValueError(\n \"Only 'all' or 'backbone' can be used to initialize the model.\")\n\n logging.info('Finished loading pretrained checkpoint from %s',\n ckpt_dir_or_file)\n\n def build_inputs(\n self,\n params: exp_cfg.DataConfig,\n input_context: Optional[tf.distribute.InputContext] = None\n ) -> tf.data.Dataset:\n \"\"\"Builds classification input.\"\"\"\n\n num_classes = self.task_config.model.num_classes\n input_size = self.task_config.model.input_size\n image_field_key = self.task_config.train_data.image_field_key\n label_field_key = self.task_config.train_data.label_field_key\n is_multilabel = self.task_config.train_data.is_multilabel\n\n if params.tfds_name:\n decoder = tfds_factory.get_classification_decoder(params.tfds_name)\n else:\n decoder = classification_input.Decoder(\n image_field_key=image_field_key, label_field_key=label_field_key,\n is_multilabel=is_multilabel)\n\n parser = classification_input.Parser(\n output_size=input_size[:2],\n num_classes=num_classes,\n image_field_key=image_field_key,\n label_field_key=label_field_key,\n decode_jpeg_only=params.decode_jpeg_only,\n aug_rand_hflip=params.aug_rand_hflip,\n aug_type=params.aug_type,\n color_jitter=params.color_jitter,\n random_erasing=params.random_erasing,\n is_multilabel=is_multilabel,\n dtype=params.dtype)\n\n postprocess_fn = None\n if params.mixup_and_cutmix:\n postprocess_fn = augment.MixupAndCutmix(\n mixup_alpha=params.mixup_and_cutmix.mixup_alpha,\n cutmix_alpha=params.mixup_and_cutmix.cutmix_alpha,\n prob=params.mixup_and_cutmix.prob,\n label_smoothing=params.mixup_and_cutmix.label_smoothing,\n num_classes=num_classes)\n\n reader = input_reader_factory.input_reader_generator(\n params,\n dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),\n decoder_fn=decoder.decode,\n parser_fn=parser.parse_fn(params.is_training),\n postprocess_fn=postprocess_fn)\n\n dataset = reader.read(input_context=input_context)\n\n return dataset\n\n def build_losses(self,\n labels: tf.Tensor,\n model_outputs: tf.Tensor,\n aux_losses: Optional[Any] = None) -> tf.Tensor:\n \"\"\"Builds sparse categorical cross entropy loss.\n\n Args:\n labels: Input groundtruth labels.\n model_outputs: Output logits of the classifier.\n aux_losses: The auxiliarly loss tensors, i.e. `losses` in tf.keras.Model.\n\n Returns:\n The total loss tensor.\n \"\"\"\n losses_config = self.task_config.losses\n is_multilabel = self.task_config.train_data.is_multilabel\n\n if not is_multilabel:\n if losses_config.one_hot:\n total_loss = tf.keras.losses.categorical_crossentropy(\n labels,\n model_outputs,\n from_logits=True,\n label_smoothing=losses_config.label_smoothing)\n elif losses_config.soft_labels:\n total_loss = tf.nn.softmax_cross_entropy_with_logits(\n labels, model_outputs)\n else:\n total_loss = tf.keras.losses.sparse_categorical_crossentropy(\n labels, model_outputs, from_logits=True)\n else:\n # Multi-label weighted binary cross entropy loss.\n total_loss = tf.nn.sigmoid_cross_entropy_with_logits(\n labels=labels, logits=model_outputs)\n total_loss = tf.reduce_sum(total_loss, axis=-1)\n\n total_loss = tf_utils.safe_mean(total_loss)\n if aux_losses:\n total_loss += tf.add_n(aux_losses)\n\n total_loss = losses_config.loss_weight * total_loss\n return total_loss\n\n def build_metrics(self,\n training: bool = True) -> List[tf.keras.metrics.Metric]:\n \"\"\"Gets streaming metrics for training/validation.\"\"\"\n is_multilabel = self.task_config.train_data.is_multilabel\n if not is_multilabel:\n k = self.task_config.evaluation.top_k\n if (self.task_config.losses.one_hot or\n self.task_config.losses.soft_labels):\n metrics = [\n tf.keras.metrics.CategoricalAccuracy(name='accuracy'),\n tf.keras.metrics.TopKCategoricalAccuracy(\n k=k, name='top_{}_accuracy'.format(k))]\n else:\n metrics = [\n tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),\n tf.keras.metrics.SparseTopKCategoricalAccuracy(\n k=k, name='top_{}_accuracy'.format(k))]\n else:\n metrics = []\n # These metrics destablize the training if included in training. The jobs\n # fail due to OOM.\n # TODO(arashwan): Investigate adding following metric to train.\n if not training:\n metrics = [\n tf.keras.metrics.AUC(\n name='globalPR-AUC',\n curve='PR',\n multi_label=False,\n from_logits=True),\n tf.keras.metrics.AUC(\n name='meanPR-AUC',\n curve='PR',\n multi_label=True,\n num_labels=self.task_config.model.num_classes,\n from_logits=True),\n ]\n return metrics\n\n def train_step(self,\n inputs: Tuple[Any, Any],\n model: tf.keras.Model,\n optimizer: tf.keras.optimizers.Optimizer,\n metrics: Optional[List[Any]] = None):\n \"\"\"Does forward and backward.\n\n Args:\n inputs: A tuple of of input tensors of (features, labels).\n model: A tf.keras.Model instance.\n optimizer: The optimizer for this training step.\n metrics: A nested structure of metrics objects.\n\n Returns:\n A dictionary of logs.\n \"\"\"\n features, labels = inputs\n is_multilabel = self.task_config.train_data.is_multilabel\n if self.task_config.losses.one_hot and not is_multilabel:\n labels = tf.one_hot(labels, self.task_config.model.num_classes)\n\n num_replicas = tf.distribute.get_strategy().num_replicas_in_sync\n with tf.GradientTape() as tape:\n outputs = model(features, training=True)\n # Casting output layer as float32 is necessary when mixed_precision is\n # mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.\n outputs = tf.nest.map_structure(\n lambda x: tf.cast(x, tf.float32), outputs)\n\n # Computes per-replica loss.\n loss = self.build_losses(\n model_outputs=outputs,\n labels=labels,\n aux_losses=model.losses)\n # Scales loss as the default gradients allreduce performs sum inside the\n # optimizer.\n scaled_loss = loss / num_replicas\n\n # For mixed_precision policy, when LossScaleOptimizer is used, loss is\n # scaled for numerical stability.\n if isinstance(\n optimizer, tf.keras.mixed_precision.LossScaleOptimizer):\n scaled_loss = optimizer.get_scaled_loss(scaled_loss)\n\n tvars = model.trainable_variables\n grads = tape.gradient(scaled_loss, tvars)\n # Scales back gradient before apply_gradients when LossScaleOptimizer is\n # used.\n if isinstance(\n optimizer, tf.keras.mixed_precision.LossScaleOptimizer):\n grads = optimizer.get_unscaled_gradients(grads)\n optimizer.apply_gradients(list(zip(grads, tvars)))\n\n logs = {self.loss: loss}\n if metrics:\n self.process_metrics(metrics, labels, outputs)\n elif model.compiled_metrics:\n self.process_compiled_metrics(model.compiled_metrics, labels, outputs)\n logs.update({m.name: m.result() for m in model.metrics})\n return logs\n\n def validation_step(self,\n inputs: Tuple[Any, Any],\n model: tf.keras.Model,\n metrics: Optional[List[Any]] = None):\n \"\"\"Runs validatation step.\n\n Args:\n inputs: A tuple of of input tensors of (features, labels).\n model: A tf.keras.Model instance.\n metrics: A nested structure of metrics objects.\n\n Returns:\n A dictionary of logs.\n \"\"\"\n features, labels = inputs\n one_hot = self.task_config.losses.one_hot\n soft_labels = self.task_config.losses.soft_labels\n is_multilabel = self.task_config.train_data.is_multilabel\n if (one_hot or soft_labels) and not is_multilabel:\n labels = tf.one_hot(labels, self.task_config.model.num_classes)\n\n outputs = self.inference_step(features, model)\n outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)\n loss = self.build_losses(\n model_outputs=outputs,\n labels=labels,\n aux_losses=model.losses)\n\n logs = {self.loss: loss}\n if metrics:\n self.process_metrics(metrics, labels, outputs)\n elif model.compiled_metrics:\n self.process_compiled_metrics(model.compiled_metrics, labels, outputs)\n logs.update({m.name: m.result() for m in model.metrics})\n return logs\n\n def inference_step(self, inputs: tf.Tensor, model: tf.keras.Model):\n \"\"\"Performs the forward step.\"\"\"\n return model(inputs, training=False)\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for video_classification.\"\"\"\n\n# pylint: disable=unused-import\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\nfrom official import vision\nfrom official.core import config_definitions as cfg\nfrom official.core import exp_factory\nfrom official.vision.configs import video_classification as exp_cfg\n\n\nclass VideoClassificationConfigTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.parameters(('video_classification',),\n ('video_classification_kinetics600',))\n def test_video_classification_configs(self, config_name):\n config = exp_factory.get_exp_config(config_name)\n self.assertIsInstance(config, cfg.ExperimentConfig)\n self.assertIsInstance(config.task, exp_cfg.VideoClassificationTask)\n self.assertIsInstance(config.task.model, exp_cfg.VideoClassificationModel)\n self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)\n config.validate()\n config.task.train_data.is_training = None\n with self.assertRaises(KeyError):\n config.validate()\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Converts '3d_2plus1d' checkpoints into '2plus1d'.\"\"\"\n\nfrom absl import app\nfrom absl import flags\nimport tensorflow as tf\n\nfrom official.projects.movinet.modeling import movinet\nfrom official.projects.movinet.modeling import movinet_model\n\nflags.DEFINE_string(\n 'input_checkpoint_path', None,\n 'Checkpoint path to load.')\nflags.DEFINE_string(\n 'output_checkpoint_path', None,\n 'Export path to save the saved_model file.')\nflags.DEFINE_string(\n 'model_id', 'a0', 'MoViNet model name.')\nflags.DEFINE_string(\n 'se_type', '2plus3d', 'MoViNet model SE type.')\nflags.DEFINE_bool(\n 'causal', True, 'Run the model in causal mode.')\nflags.DEFINE_bool(\n 'use_positional_encoding', False,\n 'Whether to use positional encoding (only applied when causal=True).')\nflags.DEFINE_integer(\n 'num_classes', 600, 'The number of classes for prediction.')\nflags.DEFINE_bool(\n 'verify_output', False, 'Verify the output matches between the models.')\n\nFLAGS = flags.FLAGS\n\n\ndef main(_) -> None:\n backbone_2plus1d = movinet.Movinet(\n model_id=FLAGS.model_id,\n causal=FLAGS.causal,\n conv_type='2plus1d',\n se_type=FLAGS.se_type,\n use_positional_encoding=FLAGS.use_positional_encoding)\n model_2plus1d = movinet_model.MovinetClassifier(\n backbone=backbone_2plus1d,\n num_classes=FLAGS.num_classes)\n model_2plus1d.build([1, 1, 1, 1, 3])\n\n backbone_3d_2plus1d = movinet.Movinet(\n model_id=FLAGS.model_id,\n causal=FLAGS.causal,\n conv_type='3d_2plus1d',\n se_type=FLAGS.se_type,\n use_positional_encoding=FLAGS.use_positional_encoding)\n model_3d_2plus1d = movinet_model.MovinetClassifier(\n backbone=backbone_3d_2plus1d,\n num_classes=FLAGS.num_classes)\n model_3d_2plus1d.build([1, 1, 1, 1, 3])\n\n checkpoint = tf.train.Checkpoint(model=model_3d_2plus1d)\n status = checkpoint.restore(FLAGS.input_checkpoint_path)\n status.assert_existing_objects_matched()\n\n # Ensure both models have the same weights\n weights = []\n for var_2plus1d, var_3d_2plus1d in zip(\n model_2plus1d.get_weights(), model_3d_2plus1d.get_weights()):\n if var_2plus1d.shape == var_3d_2plus1d.shape:\n weights.append(var_3d_2plus1d)\n else:\n if var_3d_2plus1d.shape[0] == 1:\n weight = var_3d_2plus1d[0]\n else:\n weight = var_3d_2plus1d[:, 0]\n if weight.shape[-1] != var_2plus1d.shape[-1]:\n # Transpose any depthwise kernels (conv3d --> depthwise_conv2d)\n weight = tf.transpose(weight, perm=(0, 1, 3, 2))\n weights.append(weight)\n model_2plus1d.set_weights(weights)\n\n if FLAGS.verify_output:\n inputs = tf.random.uniform([1, 6, 64, 64, 3], dtype=tf.float32)\n\n logits_2plus1d = model_2plus1d(inputs)\n logits_3d_2plus1d = model_3d_2plus1d(inputs)\n\n if tf.reduce_mean(logits_2plus1d - logits_3d_2plus1d) > 1e-5:\n raise ValueError('Bad conversion, model outputs do not match.')\n\n save_checkpoint = tf.train.Checkpoint(\n model=model_2plus1d, backbone=backbone_2plus1d)\n save_checkpoint.save(FLAGS.output_checkpoint_path)\n\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('input_checkpoint_path')\n flags.mark_flag_as_required('output_checkpoint_path')\n app.run(main)\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for Keras-based transformer block layer.\"\"\"\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom official.nlp.modeling.layers import reuse_transformer\n\n\[email protected]_parameters(\n ('base', reuse_transformer.ReuseTransformer))\nclass ReuseTransformerLayerTest(tf.test.TestCase, parameterized.TestCase):\n\n def tearDown(self):\n super(ReuseTransformerLayerTest, self).tearDown()\n tf.keras.mixed_precision.set_global_policy('float32')\n\n def test_layer_creation(self, transformer_cls):\n test_layer = transformer_cls(\n num_attention_heads=10, inner_dim=2048, inner_activation='relu')\n sequence_length = 21\n width = 80\n # Create a 3-dimensional input (the first dimension is implicit).\n data_tensor = tf.keras.Input(shape=(sequence_length, width))\n output_tensor, _ = test_layer(data_tensor)\n # The default output of a transformer layer should be the same as the input.\n self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())\n\n def test_layer_creation_with_mask(self, transformer_cls):\n test_layer = transformer_cls(\n num_attention_heads=10, inner_dim=2048, inner_activation='relu')\n sequence_length = 21\n width = 80\n # Create a 3-dimensional input (the first dimension is implicit).\n data_tensor = tf.keras.Input(shape=(sequence_length, width))\n # Create a 2-dimensional input (the first dimension is implicit).\n mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))\n output_tensor, _ = test_layer([data_tensor, mask_tensor])\n # The default output of a transformer layer should be the same as the input.\n self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())\n\n def test_layer_invocation(self, transformer_cls):\n test_layer = transformer_cls(\n num_attention_heads=10, inner_dim=2048, inner_activation='relu')\n sequence_length = 21\n width = 80\n # Create a 3-dimensional input (the first dimension is implicit).\n data_tensor = tf.keras.Input(shape=(sequence_length, width))\n output_tensor = test_layer(data_tensor)\n\n # Create a model from the test layer.\n model = tf.keras.Model(data_tensor, output_tensor)\n\n # Invoke the model on test data. We can't validate the output data itself\n # (the NN is too complex) but this will rule out structural runtime errors.\n batch_size = 6\n input_data = np.random.random_sample(\n (batch_size, sequence_length, width))\n _ = model.predict(input_data)\n\n def test_layer_invocation_with_mask(self, transformer_cls):\n test_layer = transformer_cls(\n num_attention_heads=10, inner_dim=2048, inner_activation='relu')\n sequence_length = 21\n width = 80\n # Create a 3-dimensional input (the first dimension is implicit).\n data_tensor = tf.keras.Input(shape=(sequence_length, width))\n # Create a 2-dimensional input (the first dimension is implicit).\n mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))\n output_tensor = test_layer([data_tensor, mask_tensor])\n\n # Create a model from the test layer.\n model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)\n\n # Invoke the model on test data. We can't validate the output data itself\n # (the NN is too complex) but this will rule out structural runtime errors.\n batch_size = 6\n input_data = np.random.random_sample(\n (batch_size, sequence_length, width))\n # The attention mask should be of shape (batch, from_seq_len, to_seq_len),\n # which here is (batch, sequence_length, sequence_length)\n mask_data = np.random.randint(\n 2, size=(batch_size, sequence_length, sequence_length))\n _ = model.predict([input_data, mask_data])\n\n def test_layer_output_range(self, transformer_cls):\n test_layer = transformer_cls(\n num_attention_heads=10, inner_dim=2048, inner_activation='relu')\n sequence_length = 21\n width = 80\n\n batch_size = 6\n input_data = np.random.random_sample(\n (batch_size, sequence_length, width))\n mask_data = np.random.randint(\n 2, size=(batch_size, sequence_length, sequence_length))\n output_tensor, _ = test_layer([input_data, mask_data])\n\n # The layer only attends to the first token and outputs the first token\n # embedding.\n new_layer = transformer_cls(\n num_attention_heads=10,\n inner_dim=2048,\n inner_activation='relu',\n output_range=1)\n _ = new_layer([input_data, mask_data])\n new_layer.set_weights(test_layer.get_weights())\n new_output_tensor, _ = new_layer([input_data, mask_data])\n self.assertAllClose(\n new_output_tensor, output_tensor[:, 0:1, :], atol=0.002, rtol=0.01)\n\n def test_layer_output_range_with_relative_pe(self, transformer_cls):\n test_layer = transformer_cls(\n num_attention_heads=10, inner_dim=2048, inner_activation='relu',\n use_relative_pe=True)\n sequence_length = 21\n width = 80\n\n batch_size = 6\n input_data = np.random.random_sample(\n (batch_size, sequence_length, width))\n mask_data = np.random.randint(\n 2, size=(batch_size, sequence_length, sequence_length))\n output_tensor, _ = test_layer([input_data, mask_data])\n\n # The layer only attends to the first token and outputs the first token\n # embedding.\n new_layer = transformer_cls(\n num_attention_heads=10,\n inner_dim=2048,\n inner_activation='relu',\n output_range=1,\n use_relative_pe=True)\n _ = new_layer([input_data, mask_data])\n new_layer.set_weights(test_layer.get_weights())\n new_output_tensor, _ = new_layer([input_data, mask_data])\n self.assertAllClose(\n new_output_tensor, output_tensor[:, 0:1, :], atol=0.002, rtol=0.01)\n\n def test_layer_output_range_without_mask(self, transformer_cls):\n test_layer = transformer_cls(\n num_attention_heads=10, inner_dim=2048,\n inner_activation='relu', norm_first=True)\n sequence_length = 21\n width = 80\n\n batch_size = 6\n input_data = np.random.random_sample(\n (batch_size, sequence_length, width))\n output_tensor, _ = test_layer(input_data)\n\n # The layer only attends to the first token and outputs the first token\n # embedding.\n new_layer = transformer_cls(\n num_attention_heads=10,\n inner_dim=2048,\n inner_activation='relu',\n output_range=1,\n norm_first=True)\n _ = new_layer(input_data)\n new_layer.set_weights(test_layer.get_weights())\n new_output_tensor, _ = new_layer(input_data)\n self.assertAllClose(\n new_output_tensor, output_tensor[:, 0:1, :], atol=0.002, rtol=0.01)\n\n def test_layer_output_range_with_pre_norm(self, transformer_cls):\n test_layer = transformer_cls(\n num_attention_heads=10, inner_dim=2048,\n inner_activation='relu', norm_first=True)\n sequence_length = 21\n width = 80\n\n batch_size = 6\n input_data = np.random.random_sample(\n (batch_size, sequence_length, width))\n mask_data = np.random.randint(\n 2, size=(batch_size, sequence_length, sequence_length))\n output_tensor, _ = test_layer([input_data, mask_data])\n\n # The layer only attends to the first token and outputs the first token\n # embedding.\n new_layer = transformer_cls(\n num_attention_heads=10,\n inner_dim=2048,\n inner_activation='relu',\n output_range=1,\n norm_first=True)\n _ = new_layer([input_data, mask_data])\n new_layer.set_weights(test_layer.get_weights())\n new_output_tensor, _ = new_layer([input_data, mask_data])\n self.assertAllClose(\n new_output_tensor, output_tensor[:, 0:1, :], atol=0.002, rtol=0.01)\n\n def test_layer_invocation_with_float16_dtype(self, transformer_cls):\n tf.keras.mixed_precision.set_global_policy('mixed_float16')\n test_layer = transformer_cls(\n num_attention_heads=10, inner_dim=2048, inner_activation='relu')\n sequence_length = 21\n width = 80\n # Create a 3-dimensional input (the first dimension is implicit).\n data_tensor = tf.keras.Input(shape=(sequence_length, width))\n # Create a 2-dimensional input (the first dimension is implicit).\n mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))\n output_tensor = test_layer([data_tensor, mask_tensor])\n\n # Create a model from the test layer.\n model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)\n\n # Invoke the model on test data. We can't validate the output data itself\n # (the NN is too complex) but this will rule out structural runtime errors.\n batch_size = 6\n input_data = (np.random.random_sample(\n (batch_size, sequence_length, width)))\n # The attention mask should be of shape (batch, from_seq_len, to_seq_len),\n # which here is (batch, sequence_length, sequence_length)\n mask_data = np.random.randint(\n 2, size=(batch_size, sequence_length, sequence_length))\n _ = model.predict([input_data, mask_data])\n\n def test_transform_with_initializer(self, transformer_cls):\n test_layer = transformer_cls(\n num_attention_heads=10,\n inner_dim=2048,\n inner_activation='relu',\n kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))\n sequence_length = 21\n width = 80\n # Create a 3-dimensional input (the first dimension is implicit).\n data_tensor = tf.keras.Input(shape=(sequence_length, width))\n output, _ = test_layer(data_tensor)\n # The default output of a transformer layer should be the same as the input.\n self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list())\n\n def test_dynamic_layer_sequence(self, transformer_cls):\n test_layer = transformer_cls(\n num_attention_heads=10,\n inner_dim=2048,\n inner_activation='relu',\n kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))\n # Create a 3-dimensional input (the first dimension is implicit).\n width = 30\n input_tensor = tf.keras.Input(shape=(None, width))\n output_tensor, _ = test_layer(input_tensor)\n model = tf.keras.Model(input_tensor, output_tensor)\n\n input_length = 17\n input_data = np.ones((1, input_length, width))\n output_data = model.predict(input_data)\n\n self.assertAllEqual([1, input_length, width], output_data.shape)\n\n\nclass ReuseTransformerArgumentTest(tf.test.TestCase, parameterized.TestCase):\n\n def test_use_bias_norm_first(self):\n num_attention_heads = 2\n hidden_size = 16\n encoder_block = reuse_transformer.ReuseTransformer(\n num_attention_heads=num_attention_heads,\n inner_dim=32,\n inner_activation='relu',\n output_dropout=0.1,\n attention_dropout=0.1,\n use_bias=False,\n norm_first=True,\n norm_epsilon=1e-6,\n inner_dropout=0.1,\n attention_initializer=tf.keras.initializers.RandomUniform(\n minval=0., maxval=1.))\n # Forward path.\n dummy_tensor = tf.zeros([2, 4, 16], dtype=tf.float32)\n dummy_mask = tf.zeros([2, 4, 4], dtype=tf.float32)\n inputs = [dummy_tensor, dummy_mask]\n output, _ = encoder_block(inputs)\n self.assertEqual(output.shape, (2, 4, hidden_size))\n\n def test_get_config(self):\n num_attention_heads = 2\n encoder_block = reuse_transformer.ReuseTransformer(\n num_attention_heads=num_attention_heads,\n inner_dim=32,\n inner_activation='relu',\n output_dropout=0.1,\n attention_dropout=0.1,\n use_bias=False,\n norm_first=True,\n norm_epsilon=1e-6,\n inner_dropout=0.1,\n attention_initializer=tf.keras.initializers.RandomUniform(\n minval=0., maxval=1.))\n encoder_block_config = encoder_block.get_config()\n new_encoder_block = reuse_transformer.ReuseTransformer.from_config(\n encoder_block_config)\n self.assertEqual(encoder_block_config, new_encoder_block.get_config())\n\n @parameterized.parameters({'attention_axes': None}, {'attention_axes': [1]},\n {'attention_axes': [2]}, {'attention_axes': [1, 2]})\n def test_several_attention_axes(self, attention_axes):\n test_layer = reuse_transformer.ReuseTransformer(\n inner_dim=32,\n inner_activation='relu',\n output_dropout=0.1,\n attention_dropout=0.1,\n use_bias=False,\n norm_first=True,\n norm_epsilon=1e-6,\n inner_dropout=0.1,\n num_attention_heads=10,\n attention_axes=attention_axes)\n num_rows = 21\n num_cols = 13\n width = 80\n # Create a 3-dimensional input (the first dimension is implicit).\n data_tensor = tf.keras.Input(shape=(num_rows, num_cols, width))\n output_tensor, _ = test_layer(data_tensor)\n # The default output of a transformer layer should be the same as the input.\n self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())\n\n @parameterized.named_parameters(\n ('plain', False, False, False),\n ('plain_returnscore', False, True, False),\n ('plain_with_relative_pe', False, False, True),\n ('reuse_all', True, False, False),\n ('reuse_all_returnscore', True, True, False),\n ('reuse_all_with_relative_pe', True, False, True),\n ('reuse_5', 5, False, False),\n ('reuse_5_returnscore', 5, True, False),\n ('reuse_5_with_relative_pe', 5, False, True),)\n def test_layer_invocation_with_mask(self, reuse_attention,\n return_attention_scores, use_relative_pe):\n test_layer = reuse_transformer.ReuseTransformer(\n num_attention_heads=10,\n inner_dim=2048,\n inner_activation='relu',\n reuse_attention=reuse_attention,\n use_relative_pe=use_relative_pe)\n sequence_length = 21\n width = 80\n # Create a 3-dimensional input (the first dimension is implicit).\n data_tensor = tf.keras.Input(shape=(sequence_length, width))\n # Create a 2-dimensional input (the first dimension is implicit).\n mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))\n return_scores_tensor = tf.keras.Input(shape=(1,))\n reuse_attention_scores = tf.keras.Input(\n shape=(10, sequence_length, sequence_length))\n output_tensor, _ = test_layer(\n [data_tensor, mask_tensor, reuse_attention_scores])\n\n # Create a model from the test layer.\n model = tf.keras.Model(\n ([data_tensor, mask_tensor, reuse_attention_scores],\n return_scores_tensor), output_tensor)\n\n # Invoke the model on test data. We can't validate the output data itself\n # (the NN is too complex) but this will rule out structural runtime errors.\n batch_size = 6\n input_data = np.random.random_sample(\n (batch_size, sequence_length, width))\n # The attention mask should be of shape (batch, from_seq_len, to_seq_len),\n # which here is (batch, sequence_length, sequence_length)\n mask_data = np.random.randint(\n 2, size=(batch_size, sequence_length, sequence_length))\n reuse_scores = np.random.rand(\n batch_size, 10, sequence_length, sequence_length)\n _ = model.predict([input_data, mask_data, reuse_scores],\n return_attention_scores)\n\n @parameterized.named_parameters(\n ('without_relative_pe_with_pe_max_seq_length_10', False, 10),\n ('with_relative_pe_with_pe_max_seq_length_10', True, 10),\n ('without_relative_pe_with_pe_max_seq_length_100', False, 100),\n ('with_relative_pe_with_pe_max_seq_length_100', True, 100))\n def test_layer_invocation_with_float16_with_relative_pe(\n self, use_relative_pe, pe_max_seq_length):\n tf.keras.mixed_precision.set_global_policy('mixed_float16')\n test_layer = reuse_transformer.ReuseTransformer(\n num_attention_heads=10, inner_dim=2048, inner_activation='relu',\n use_relative_pe=use_relative_pe, pe_max_seq_length=pe_max_seq_length)\n sequence_length = 21\n width = 80\n # Create a 3-dimensional input (the first dimension is implicit).\n data_tensor = tf.keras.Input(shape=(sequence_length, width))\n # Create a 2-dimensional input (the first dimension is implicit).\n mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))\n output_tensor = test_layer([data_tensor, mask_tensor])\n\n # Create a model from the test layer.\n model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)\n\n # Invoke the model on test data. We can't validate the output data itself\n # (the NN is too complex) but this will rule out structural runtime errors.\n batch_size = 6\n input_data = (np.random.random_sample(\n (batch_size, sequence_length, width)))\n # The attention mask should be of shape (batch, from_seq_len, to_seq_len),\n # which here is (batch, sequence_length, sequence_length)\n mask_data = np.random.randint(\n 2, size=(batch_size, sequence_length, sequence_length))\n _ = model.predict([input_data, mask_data])\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Contains definitions for the AssembleNet [1] models.\n\nRequires the AssembleNet architecture to be specified in\nFLAGS.model_structure (and optionally FLAGS.model_edge_weights).\nThis structure is a list corresponding to a graph representation of the\nnetwork, where a node is a convolutional block and an edge specifies a\nconnection from one block to another as described in [1].\n\nEach node itself (in the structure list) is a list with the following format:\n[block_level, [list_of_input_blocks], number_filter, temporal_dilation,\nspatial_stride]. [list_of_input_blocks] should be the list of node indexes whose\nvalues are less than the index of the node itself. The 'stems' of the network\ndirectly taking raw inputs follow a different node format:\n[stem_type, temporal_dilation]. The stem_type is -1 for RGB stem and is -2 for\noptical flow stem.\n\nAlso note that the codes in this file could be used for one-shot differentiable\nconnection search by (1) giving an overly connected structure as\nFLAGS.model_structure and by (2) setting FLAGS.model_edge_weights to be '[]'.\nThe 'agg_weights' variables will specify which connections are needed and which\nare not, once trained.\n\n[1] Michael S. Ryoo, AJ Piergiovanni, Mingxing Tan, Anelia Angelova,\n AssembleNet: Searching for Multi-Stream Neural Connectivity in Video\n Architectures. ICLR 2020\n https://arxiv.org/abs/1905.13209\n\nIt uses (2+1)D convolutions for video representations. The main AssembleNet\ntakes a 4-D (N*T)HWC tensor as an input (i.e., the batch dim and time dim are\nmixed), and it reshapes a tensor to NT(H*W)C whenever a 1-D temporal conv. is\nnecessary. This is to run this on TPU efficiently.\n\"\"\"\n\nimport functools\nimport math\nfrom typing import Any, Callable, List, Mapping, Optional\n\nfrom absl import logging\nimport numpy as np\nimport tensorflow as tf\n\nfrom official.modeling import hyperparams\nfrom official.projects.assemblenet.configs import assemblenet as cfg\nfrom official.projects.assemblenet.modeling import rep_flow_2d_layer as rf\nfrom official.vision.modeling import factory_3d as model_factory\nfrom official.vision.modeling.backbones import factory as backbone_factory\n\nlayers = tf.keras.layers\nintermediate_channel_size = [64, 128, 256, 512]\n\n\ndef fixed_padding(inputs, kernel_size):\n \"\"\"Pads the input along the spatial dimensions independently of input size.\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height, width]` or `[batch,\n height, width, channels]` depending on `data_format`.\n kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`\n operations. Should be a positive integer.\n\n Returns:\n A padded `Tensor` of the same `data_format` with size either intact\n (if `kernel_size == 1`) or padded (if `kernel_size > 1`).\n \"\"\"\n data_format = tf.keras.backend.image_data_format()\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n if data_format == 'channels_first':\n padded_inputs = tf.pad(\n inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(\n inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\n\n return padded_inputs\n\n\ndef reshape_temporal_conv1d_bn(inputs: tf.Tensor,\n filters: int,\n kernel_size: int,\n num_frames: int = 32,\n temporal_dilation: int = 1,\n bn_decay: float = rf.BATCH_NORM_DECAY,\n bn_epsilon: float = rf.BATCH_NORM_EPSILON,\n use_sync_bn: bool = False):\n \"\"\"Performs 1D temporal conv.\n\n followed by batch normalization with reshaping.\n\n Args:\n inputs: `Tensor` of size `[batch*time, height, width, channels]`. Only\n supports 'channels_last' as the data format.\n filters: `int` number of filters in the convolution.\n kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`\n operations. Should be a positive integer.\n num_frames: `int` number of frames in the input tensor.\n temporal_dilation: `int` temporal dilatioin size for the 1D conv.\n bn_decay: `float` batch norm decay parameter to use.\n bn_epsilon: `float` batch norm epsilon parameter to use.\n use_sync_bn: use synchronized batch norm for TPU.\n\n Returns:\n A padded `Tensor` of the same `data_format` with size either intact\n (if `kernel_size == 1`) or padded (if `kernel_size > 1`).\n \"\"\"\n data_format = tf.keras.backend.image_data_format()\n assert data_format == 'channels_last'\n\n feature_shape = inputs.shape\n\n inputs = tf.reshape(\n inputs,\n [-1, num_frames, feature_shape[1] * feature_shape[2], feature_shape[3]])\n\n if temporal_dilation == 1:\n inputs = tf.keras.layers.Conv2D(\n filters=filters,\n kernel_size=(kernel_size, 1),\n strides=1,\n padding='SAME',\n use_bias=False,\n kernel_initializer=tf.keras.initializers.VarianceScaling())(\n inputs=inputs)\n else:\n inputs = tf.keras.layers.Conv2D(\n filters=filters,\n kernel_size=(kernel_size, 1),\n strides=1,\n padding='SAME',\n dilation_rate=(temporal_dilation, 1),\n use_bias=False,\n kernel_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=math.sqrt(2.0 / (kernel_size * feature_shape[3]))))(\n inputs=inputs)\n\n num_channel = inputs.shape[3]\n inputs = tf.reshape(inputs,\n [-1, feature_shape[1], feature_shape[2], num_channel])\n inputs = rf.build_batch_norm(\n bn_decay=bn_decay, bn_epsilon=bn_epsilon, use_sync_bn=use_sync_bn)(\n inputs)\n inputs = tf.nn.relu(inputs)\n\n return inputs\n\n\ndef conv2d_fixed_padding(inputs: tf.Tensor, filters: int, kernel_size: int,\n strides: int):\n \"\"\"Strided 2-D convolution with explicit padding.\n\n The padding is consistent and is based only on `kernel_size`, not on the\n dimensions of `inputs` (as opposed to using `tf.keras.layers.Conv2D` alone).\n\n Args:\n inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.\n filters: `int` number of filters in the convolution.\n kernel_size: `int` size of the kernel to be used in the convolution.\n strides: `int` strides of the convolution.\n\n Returns:\n A `Tensor` of shape `[batch, filters, height_out, width_out]`.\n \"\"\"\n if strides > 1:\n inputs = fixed_padding(inputs, kernel_size)\n\n return tf.keras.layers.Conv2D(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=('SAME' if strides == 1 else 'VALID'),\n use_bias=False,\n kernel_initializer=tf.keras.initializers.VarianceScaling())(\n inputs=inputs)\n\n\ndef conv3d_same_padding(inputs: tf.Tensor,\n filters: int,\n kernel_size: int,\n strides: int,\n temporal_dilation: int = 1,\n do_2d_conv: bool = False):\n \"\"\"3D convolution layer wrapper.\n\n Uses conv3d function.\n\n Args:\n inputs: 5D `Tensor` following the data_format.\n filters: `int` number of filters in the convolution.\n kernel_size: `int` size of the kernel to be used in the convolution.\n strides: `int` strides of the convolution.\n temporal_dilation: `int` temporal dilatioin size for the 1D conv.\n do_2d_conv: `bool` indicating whether to do 2d conv. If false, do 3D conv.\n\n Returns:\n A `Tensor` of shape `[batch, time_in, height_in, width_in, channels]`.\n \"\"\"\n if isinstance(kernel_size, int):\n if do_2d_conv:\n kernel_size = [1, kernel_size, kernel_size]\n else:\n kernel_size = [kernel_size, kernel_size, kernel_size]\n\n return tf.keras.layers.Conv3D(\n filters=filters,\n kernel_size=kernel_size,\n strides=[1, strides, strides],\n padding='SAME',\n dilation_rate=[temporal_dilation, 1, 1],\n use_bias=False,\n kernel_initializer=tf.keras.initializers.VarianceScaling())(\n inputs=inputs)\n\n\ndef bottleneck_block_interleave(inputs: tf.Tensor,\n filters: int,\n inter_filters: int,\n strides: int,\n use_projection: bool = False,\n num_frames: int = 32,\n temporal_dilation: int = 1,\n bn_decay: float = rf.BATCH_NORM_DECAY,\n bn_epsilon: float = rf.BATCH_NORM_EPSILON,\n use_sync_bn: bool = False,\n step=1):\n \"\"\"Interleaves a standard 2D residual module and (2+1)D residual module.\n\n Bottleneck block variant for residual networks with BN after convolutions.\n\n Args:\n inputs: `Tensor` of size `[batch*time, channels, height, width]`.\n filters: `int` number of filters for the first conv. layer. The last conv.\n layer will use 4 times as many filters.\n inter_filters: `int` number of filters for the second conv. layer.\n strides: `int` block stride. If greater than 1, this block will ultimately\n downsample the input spatially.\n use_projection: `bool` for whether this block should use a projection\n shortcut (versus the default identity shortcut). This is usually `True`\n for the first block of a block group, which may change the number of\n filters and the resolution.\n num_frames: `int` number of frames in the input tensor.\n temporal_dilation: `int` temporal dilatioin size for the 1D conv.\n bn_decay: `float` batch norm decay parameter to use.\n bn_epsilon: `float` batch norm epsilon parameter to use.\n use_sync_bn: use synchronized batch norm for TPU.\n step: `int` to decide whether to put 2D module or (2+1)D module.\n\n Returns:\n The output `Tensor` of the block.\n \"\"\"\n if strides > 1 and not use_projection:\n raise ValueError('strides > 1 requires use_projections=True, otherwise the '\n 'inputs and shortcut will have shape mismatch')\n shortcut = inputs\n if use_projection:\n # Projection shortcut only in first block within a group. Bottleneck blocks\n # end with 4 times the number of filters.\n filters_out = 4 * filters\n shortcut = conv2d_fixed_padding(\n inputs=inputs, filters=filters_out, kernel_size=1, strides=strides)\n shortcut = rf.build_batch_norm(\n bn_decay=bn_decay, bn_epsilon=bn_epsilon, use_sync_bn=use_sync_bn)(\n shortcut)\n\n if step % 2 == 1:\n k = 3\n\n inputs = reshape_temporal_conv1d_bn(\n inputs=inputs,\n filters=filters,\n kernel_size=k,\n num_frames=num_frames,\n temporal_dilation=temporal_dilation,\n bn_decay=bn_decay,\n bn_epsilon=bn_epsilon,\n use_sync_bn=use_sync_bn)\n else:\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=1, strides=1)\n inputs = rf.build_batch_norm(\n bn_decay=bn_decay, bn_epsilon=bn_epsilon, use_sync_bn=use_sync_bn)(\n inputs)\n inputs = tf.nn.relu(inputs)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=inter_filters, kernel_size=3, strides=strides)\n inputs = rf.build_batch_norm(\n bn_decay=bn_decay, bn_epsilon=bn_epsilon, use_sync_bn=use_sync_bn)(\n inputs)\n inputs = tf.nn.relu(inputs)\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=4 * filters, kernel_size=1, strides=1)\n inputs = rf.build_batch_norm(\n init_zero=True,\n bn_decay=bn_decay,\n bn_epsilon=bn_epsilon,\n use_sync_bn=use_sync_bn)(\n inputs)\n\n return tf.nn.relu(inputs + shortcut)\n\n\ndef block_group(inputs: tf.Tensor,\n filters: int,\n block_fn: Callable[..., tf.Tensor],\n blocks: int,\n strides: int,\n name,\n block_level,\n num_frames=32,\n temporal_dilation=1):\n \"\"\"Creates one group of blocks for the AssembleNett model.\n\n Args:\n inputs: `Tensor` of size `[batch*time, channels, height, width]`.\n filters: `int` number of filters for the first convolution of the layer.\n block_fn: `function` for the block to use within the model\n blocks: `int` number of blocks contained in the layer.\n strides: `int` stride to use for the first convolution of the layer. If\n greater than 1, this layer will downsample the input.\n name: `str` name for the Tensor output of the block layer.\n block_level: `int` block level in AssembleNet.\n num_frames: `int` number of frames in the input tensor.\n temporal_dilation: `int` temporal dilatioin size for the 1D conv.\n\n Returns:\n The output `Tensor` of the block layer.\n \"\"\"\n # Only the first block per block_group uses projection shortcut and strides.\n inputs = block_fn(\n inputs,\n filters,\n intermediate_channel_size[block_level],\n strides,\n use_projection=True,\n num_frames=num_frames,\n temporal_dilation=temporal_dilation,\n step=0)\n\n for i in range(1, blocks):\n inputs = block_fn(\n inputs,\n filters,\n intermediate_channel_size[block_level],\n 1,\n num_frames=num_frames,\n temporal_dilation=temporal_dilation,\n step=i)\n\n return tf.identity(inputs, name)\n\n\ndef spatial_resize_and_concat(inputs):\n \"\"\"Concatenates multiple different sized tensors channel-wise.\n\n Args:\n inputs: A list of `Tensors` of size `[batch*time, channels, height, width]`.\n\n Returns:\n The output `Tensor` after concatenation.\n \"\"\"\n data_format = tf.keras.backend.image_data_format()\n assert data_format == 'channels_last'\n\n # Do nothing if only 1 input\n if len(inputs) == 1:\n return inputs[0]\n if data_format != 'channels_last':\n return inputs\n\n # get smallest spatial size and largest channels\n sm_size = [1000, 1000]\n for inp in inputs:\n # assume batch X height x width x channels\n sm_size[0] = min(sm_size[0], inp.shape[1])\n sm_size[1] = min(sm_size[1], inp.shape[2])\n\n for i in range(len(inputs)):\n if inputs[i].shape[1] != sm_size[0] or inputs[i].shape[2] != sm_size[1]:\n ratio = (inputs[i].shape[1] + 1) // sm_size[0]\n inputs[i] = tf.keras.layers.MaxPool2D([ratio, ratio],\n ratio,\n padding='same')(\n inputs[i])\n\n return tf.concat(inputs, 3)\n\n\nclass _ApplyEdgeWeight(layers.Layer):\n \"\"\"Multiply weight on each input tensor.\n\n A weight is assigned for each connection (i.e., each input tensor). This layer\n is used by the multi_connection_fusion to compute the weighted inputs.\n \"\"\"\n\n def __init__(self,\n weights_shape,\n index: Optional[int] = None,\n use_5d_mode: bool = False,\n model_edge_weights: Optional[List[Any]] = None,\n **kwargs):\n \"\"\"Constructor.\n\n Args:\n weights_shape: shape of the weights. Should equals to [len(inputs)].\n index: `int` index of the block within the AssembleNet architecture. Used\n for summation weight initial loading.\n use_5d_mode: `bool` indicating whether the inputs are in 5D tensor or 4D.\n model_edge_weights: AssembleNet model structure connection weights in the\n string format.\n **kwargs: pass through arguments.\n \"\"\"\n super(_ApplyEdgeWeight, self).__init__(**kwargs)\n\n self._weights_shape = weights_shape\n self._index = index\n self._use_5d_mode = use_5d_mode\n self._model_edge_weights = model_edge_weights\n data_format = tf.keras.backend.image_data_format()\n assert data_format == 'channels_last'\n\n def get_config(self):\n config = {\n 'weights_shape': self._weights_shape,\n 'index': self._index,\n 'use_5d_mode': self._use_5d_mode,\n 'model_edge_weights': self._model_edge_weights,\n }\n base_config = super(_ApplyEdgeWeight, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def build(self, input_shape: tf.TensorShape):\n if self._weights_shape[0] == 1:\n self._edge_weights = 1.0\n return\n\n if self._index is None or not self._model_edge_weights:\n self._edge_weights = self.add_weight(\n shape=self._weights_shape,\n initializer=tf.keras.initializers.TruncatedNormal(\n mean=0.0, stddev=0.01),\n trainable=True,\n name='agg_weights')\n else:\n initial_weights_after_sigmoid = np.asarray(\n self._model_edge_weights[self._index][0]).astype('float32')\n # Initial_weights_after_sigmoid is never 0, as the initial weights are\n # based the results of a successful connectivity search.\n initial_weights = -np.log(1. / initial_weights_after_sigmoid - 1.)\n self._edge_weights = self.add_weight(\n shape=self._weights_shape,\n initializer=tf.constant_initializer(initial_weights),\n trainable=False,\n name='agg_weights')\n\n def call(self,\n inputs: List[tf.Tensor],\n training: Optional[bool] = None) -> Mapping[Any, List[tf.Tensor]]:\n use_5d_mode = self._use_5d_mode\n dtype = inputs[0].dtype\n assert len(inputs) > 1\n\n if use_5d_mode:\n h_channel_loc = 2\n else:\n h_channel_loc = 1\n\n # get smallest spatial size and largest channels\n sm_size = [10000, 10000]\n lg_channel = 0\n for inp in inputs:\n # assume batch X height x width x channels\n sm_size[0] = min(sm_size[0], inp.shape[h_channel_loc])\n sm_size[1] = min(sm_size[1], inp.shape[h_channel_loc + 1])\n lg_channel = max(lg_channel, inp.shape[-1])\n\n # loads or creates weight variables to fuse multiple inputs\n weights = tf.math.sigmoid(tf.cast(self._edge_weights, dtype))\n\n # Compute weighted inputs. We group inputs with the same channels.\n per_channel_inps = dict({0: []})\n for i, inp in enumerate(inputs):\n if inp.shape[h_channel_loc] != sm_size[0] or inp.shape[h_channel_loc + 1] != sm_size[1]: # pylint: disable=line-too-long\n assert sm_size[0] != 0\n ratio = (inp.shape[h_channel_loc] + 1) // sm_size[0]\n if use_5d_mode:\n inp = tf.keras.layers.MaxPool3D([1, ratio, ratio], [1, ratio, ratio],\n padding='same')(\n inp)\n else:\n inp = tf.keras.layers.MaxPool2D([ratio, ratio], ratio,\n padding='same')(\n inp)\n\n weights = tf.cast(weights, inp.dtype)\n if inp.shape[-1] in per_channel_inps:\n per_channel_inps[inp.shape[-1]].append(weights[i] * inp)\n else:\n per_channel_inps.update({inp.shape[-1]: [weights[i] * inp]})\n return per_channel_inps\n\n\ndef multi_connection_fusion(inputs: List[tf.Tensor],\n index: Optional[int] = None,\n use_5d_mode: bool = False,\n model_edge_weights: Optional[List[Any]] = None):\n \"\"\"Do weighted summation of multiple different sized tensors.\n\n A weight is assigned for each connection (i.e., each input tensor), and their\n summation weights are learned. Uses spatial max pooling and 1x1 conv.\n to match their sizes.\n\n Args:\n inputs: A `Tensor`. Either 4D or 5D, depending of use_5d_mode.\n index: `int` index of the block within the AssembleNet architecture. Used\n for summation weight initial loading.\n use_5d_mode: `bool` indicating whether the inputs are in 5D tensor or 4D.\n model_edge_weights: AssembleNet model structure connection weights in the\n string format.\n\n Returns:\n The output `Tensor` after concatenation.\n \"\"\"\n\n if use_5d_mode:\n h_channel_loc = 2\n conv_function = conv3d_same_padding\n else:\n h_channel_loc = 1\n conv_function = conv2d_fixed_padding\n\n # If only 1 input.\n if len(inputs) == 1:\n return inputs[0]\n\n # get smallest spatial size and largest channels\n sm_size = [10000, 10000]\n lg_channel = 0\n for inp in inputs:\n # assume batch X height x width x channels\n sm_size[0] = min(sm_size[0], inp.shape[h_channel_loc])\n sm_size[1] = min(sm_size[1], inp.shape[h_channel_loc + 1])\n lg_channel = max(lg_channel, inp.shape[-1])\n\n per_channel_inps = _ApplyEdgeWeight(\n weights_shape=[len(inputs)],\n index=index,\n use_5d_mode=use_5d_mode,\n model_edge_weights=model_edge_weights)(\n inputs)\n\n # Adding 1x1 conv layers (to match channel size) and fusing all inputs.\n # We add inputs with the same channels first before applying 1x1 conv to save\n # memory.\n inps = []\n for key, channel_inps in per_channel_inps.items():\n if len(channel_inps) < 1:\n continue\n if len(channel_inps) == 1:\n if key == lg_channel:\n inp = channel_inps[0]\n else:\n inp = conv_function(\n channel_inps[0], lg_channel, kernel_size=1, strides=1)\n inps.append(inp)\n else:\n if key == lg_channel:\n inp = tf.add_n(channel_inps)\n else:\n inp = conv_function(\n tf.add_n(channel_inps), lg_channel, kernel_size=1, strides=1)\n inps.append(inp)\n\n return tf.add_n(inps)\n\n\ndef rgb_conv_stem(inputs,\n num_frames,\n filters,\n temporal_dilation,\n bn_decay: float = rf.BATCH_NORM_DECAY,\n bn_epsilon: float = rf.BATCH_NORM_EPSILON,\n use_sync_bn: bool = False):\n \"\"\"Layers for a RGB stem.\n\n Args:\n inputs: A `Tensor` of size `[batch*time, height, width, channels]`.\n num_frames: `int` number of frames in the input tensor.\n filters: `int` number of filters in the convolution.\n temporal_dilation: `int` temporal dilatioin size for the 1D conv.\n bn_decay: `float` batch norm decay parameter to use.\n bn_epsilon: `float` batch norm epsilon parameter to use.\n use_sync_bn: use synchronized batch norm for TPU.\n\n Returns:\n The output `Tensor`.\n \"\"\"\n data_format = tf.keras.backend.image_data_format()\n assert data_format == 'channels_last'\n\n if temporal_dilation < 1:\n temporal_dilation = 1\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=7, strides=2)\n inputs = tf.identity(inputs, 'initial_conv')\n inputs = rf.build_batch_norm(\n bn_decay=bn_decay, bn_epsilon=bn_epsilon, use_sync_bn=use_sync_bn)(\n inputs)\n inputs = tf.nn.relu(inputs)\n\n inputs = reshape_temporal_conv1d_bn(\n inputs=inputs,\n filters=filters,\n kernel_size=5,\n num_frames=num_frames,\n temporal_dilation=temporal_dilation,\n bn_decay=bn_decay,\n bn_epsilon=bn_epsilon,\n use_sync_bn=use_sync_bn)\n\n inputs = tf.keras.layers.MaxPool2D(\n pool_size=3, strides=2, padding='SAME')(\n inputs=inputs)\n inputs = tf.identity(inputs, 'initial_max_pool')\n\n return inputs\n\n\ndef flow_conv_stem(inputs,\n filters,\n temporal_dilation,\n bn_decay: float = rf.BATCH_NORM_DECAY,\n bn_epsilon: float = rf.BATCH_NORM_EPSILON,\n use_sync_bn: bool = False):\n \"\"\"Layers for an optical flow stem.\n\n Args:\n inputs: A `Tensor` of size `[batch*time, height, width, channels]`.\n filters: `int` number of filters in the convolution.\n temporal_dilation: `int` temporal dilatioin size for the 1D conv.\n bn_decay: `float` batch norm decay parameter to use.\n bn_epsilon: `float` batch norm epsilon parameter to use.\n use_sync_bn: use synchronized batch norm for TPU.\n\n Returns:\n The output `Tensor`.\n \"\"\"\n\n if temporal_dilation < 1:\n temporal_dilation = 1\n\n inputs = conv2d_fixed_padding(\n inputs=inputs, filters=filters, kernel_size=7, strides=2)\n inputs = tf.identity(inputs, 'initial_conv')\n inputs = rf.build_batch_norm(\n bn_decay=bn_decay, bn_epsilon=bn_epsilon, use_sync_bn=use_sync_bn)(\n inputs)\n inputs = tf.nn.relu(inputs)\n\n inputs = tf.keras.layers.MaxPool2D(\n pool_size=2, strides=2, padding='SAME')(\n inputs=inputs)\n inputs = tf.identity(inputs, 'initial_max_pool')\n\n return inputs\n\n\ndef multi_stream_heads(streams,\n final_nodes,\n num_frames,\n num_classes,\n max_pool_predictions: bool = False):\n \"\"\"Layers for the classification heads.\n\n Args:\n streams: A list of 4D `Tensors` following the data_format.\n final_nodes: A list of `int` where classification heads will be added.\n num_frames: `int` number of frames in the input tensor.\n num_classes: `int` number of possible classes for video classification.\n max_pool_predictions: Use max-pooling on predictions instead of mean\n pooling on features. It helps if you have more than 32 frames.\n\n Returns:\n The output `Tensor`.\n \"\"\"\n inputs = streams[final_nodes[0]]\n num_channels = inputs.shape[-1]\n\n def _pool_and_reshape(net):\n # The activation is 7x7 so this is a global average pool.\n net = tf.keras.layers.GlobalAveragePooling2D()(inputs=net)\n net = tf.identity(net, 'final_avg_pool0')\n\n net = tf.reshape(net, [-1, num_frames, num_channels])\n if not max_pool_predictions:\n net = tf.reduce_mean(net, 1)\n return net\n\n outputs = _pool_and_reshape(inputs)\n\n for i in range(1, len(final_nodes)):\n inputs = streams[final_nodes[i]]\n\n inputs = _pool_and_reshape(inputs)\n\n outputs = outputs + inputs\n\n if len(final_nodes) > 1:\n outputs = outputs / len(final_nodes)\n\n outputs = tf.keras.layers.Dense(\n units=num_classes,\n kernel_initializer=tf.random_normal_initializer(stddev=.01))(\n inputs=outputs)\n outputs = tf.identity(outputs, 'final_dense0')\n if max_pool_predictions:\n pre_logits = outputs / np.sqrt(num_frames)\n acts = tf.nn.softmax(pre_logits, axis=1)\n outputs = tf.math.multiply(outputs, acts)\n\n outputs = tf.reduce_sum(outputs, 1)\n\n return outputs\n\n\nclass AssembleNet(tf.keras.Model):\n \"\"\"AssembleNet backbone.\"\"\"\n\n def __init__(\n self,\n block_fn,\n num_blocks: List[int],\n num_frames: int,\n model_structure: List[Any],\n input_specs: layers.InputSpec = layers.InputSpec(\n shape=[None, None, None, None, 3]),\n model_edge_weights: Optional[List[Any]] = None,\n bn_decay: float = rf.BATCH_NORM_DECAY,\n bn_epsilon: float = rf.BATCH_NORM_EPSILON,\n use_sync_bn: bool = False,\n combine_method: str = 'sigmoid',\n **kwargs):\n \"\"\"Generator for AssembleNet v1 models.\n\n Args:\n block_fn: `function` for the block to use within the model. Currently only\n has `bottleneck_block_interleave as its option`.\n num_blocks: list of 4 `int`s denoting the number of blocks to include in\n each of the 4 block groups. Each group consists of blocks that take\n inputs of the same resolution.\n num_frames: the number of frames in the input tensor.\n model_structure: AssembleNet model structure in the string format.\n input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.\n Dimension should be `[batch*time, height, width, channels]`.\n model_edge_weights: AssembleNet model structure connection weights in the\n string format.\n bn_decay: `float` batch norm decay parameter to use.\n bn_epsilon: `float` batch norm epsilon parameter to use.\n use_sync_bn: use synchronized batch norm for TPU.\n combine_method: 'str' for the weighted summation to fuse different blocks.\n **kwargs: pass through arguments.\n \"\"\"\n inputs = tf.keras.Input(shape=input_specs.shape[1:])\n data_format = tf.keras.backend.image_data_format()\n\n # Creation of the model graph.\n logging.info('model_structure=%r', model_structure)\n logging.info('model_structure=%r', model_structure)\n logging.info('model_edge_weights=%r', model_edge_weights)\n structure = model_structure\n\n original_num_frames = num_frames\n assert num_frames > 0, f'Invalid num_frames {num_frames}'\n\n grouping = {-3: [], -2: [], -1: [], 0: [], 1: [], 2: [], 3: []}\n for i in range(len(structure)):\n grouping[structure[i][0]].append(i)\n\n stem_count = len(grouping[-3]) + len(grouping[-2]) + len(grouping[-1])\n\n assert stem_count != 0\n stem_filters = 128 // stem_count\n\n original_inputs = inputs\n if len(input_specs.shape) == 5:\n first_dim = (\n input_specs.shape[0] * input_specs.shape[1]\n if input_specs.shape[0] and input_specs.shape[1] else -1)\n reshape_inputs = tf.reshape(inputs, (first_dim,) + input_specs.shape[2:])\n elif len(input_specs.shape) == 4:\n reshape_inputs = original_inputs\n else:\n raise ValueError(\n f'Expect input spec to be 4 or 5 dimensions {input_specs.shape}')\n if grouping[-2]:\n # Instead of loading optical flows as inputs from data pipeline, we are\n # applying the \"Representation Flow\" to RGB frames so that we can compute\n # the flow within TPU/GPU on fly. It's essentially optical flow since we\n # do it with RGBs.\n axis = 3 if data_format == 'channels_last' else 1\n flow_inputs = rf.RepresentationFlow(\n original_num_frames,\n depth=reshape_inputs.shape.as_list()[axis],\n num_iter=40,\n bottleneck=1)(\n reshape_inputs)\n streams = []\n\n for i in range(len(structure)):\n with tf.name_scope('Node_' + str(i)):\n if structure[i][0] == -1:\n inputs = rgb_conv_stem(\n reshape_inputs,\n original_num_frames,\n stem_filters,\n temporal_dilation=structure[i][1],\n bn_decay=bn_decay,\n bn_epsilon=bn_epsilon,\n use_sync_bn=use_sync_bn)\n streams.append(inputs)\n elif structure[i][0] == -2:\n inputs = flow_conv_stem(\n flow_inputs,\n stem_filters,\n temporal_dilation=structure[i][1],\n bn_decay=bn_decay,\n bn_epsilon=bn_epsilon,\n use_sync_bn=use_sync_bn)\n streams.append(inputs)\n\n else:\n num_frames = original_num_frames\n block_number = structure[i][0]\n\n combined_inputs = []\n if combine_method == 'concat':\n combined_inputs = [\n streams[structure[i][1][j]]\n for j in range(0, len(structure[i][1]))\n ]\n\n combined_inputs = spatial_resize_and_concat(combined_inputs)\n\n else:\n combined_inputs = [\n streams[structure[i][1][j]]\n for j in range(0, len(structure[i][1]))\n ]\n\n combined_inputs = multi_connection_fusion(\n combined_inputs, index=i, model_edge_weights=model_edge_weights)\n\n graph = block_group(\n inputs=combined_inputs,\n filters=structure[i][2],\n block_fn=block_fn,\n blocks=num_blocks[block_number],\n strides=structure[i][4],\n name='block_group' + str(i),\n block_level=structure[i][0],\n num_frames=num_frames,\n temporal_dilation=structure[i][3])\n\n streams.append(graph)\n\n super(AssembleNet, self).__init__(\n inputs=original_inputs, outputs=streams, **kwargs)\n\n\nclass AssembleNetModel(tf.keras.Model):\n \"\"\"An AssembleNet model builder.\"\"\"\n\n def __init__(self,\n backbone,\n num_classes,\n num_frames: int,\n model_structure: List[Any],\n input_specs: Optional[Mapping[str,\n tf.keras.layers.InputSpec]] = None,\n max_pool_predictions: bool = False,\n **kwargs):\n if not input_specs:\n input_specs = {\n 'image': layers.InputSpec(shape=[None, None, None, None, 3])\n }\n self._self_setattr_tracking = False\n self._config_dict = {\n 'backbone': backbone,\n 'num_classes': num_classes,\n 'num_frames': num_frames,\n 'input_specs': input_specs,\n 'model_structure': model_structure,\n }\n self._input_specs = input_specs\n self._backbone = backbone\n grouping = {-3: [], -2: [], -1: [], 0: [], 1: [], 2: [], 3: []}\n for i in range(len(model_structure)):\n grouping[model_structure[i][0]].append(i)\n\n inputs = {\n k: tf.keras.Input(shape=v.shape[1:]) for k, v in input_specs.items()\n }\n streams = self._backbone(inputs['image'])\n\n outputs = multi_stream_heads(\n streams,\n grouping[3],\n num_frames,\n num_classes,\n max_pool_predictions=max_pool_predictions)\n\n super(AssembleNetModel, self).__init__(\n inputs=inputs, outputs=outputs, **kwargs)\n\n @property\n def checkpoint_items(self):\n \"\"\"Returns a dictionary of items to be additionally checkpointed.\"\"\"\n return dict(backbone=self.backbone)\n\n @property\n def backbone(self):\n return self._backbone\n\n def get_config(self):\n return self._config_dict\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n\n\nASSEMBLENET_SPECS = {\n 26: {\n 'block': bottleneck_block_interleave,\n 'num_blocks': [2, 2, 2, 2]\n },\n 38: {\n 'block': bottleneck_block_interleave,\n 'num_blocks': [2, 4, 4, 2]\n },\n 50: {\n 'block': bottleneck_block_interleave,\n 'num_blocks': [3, 4, 6, 3]\n },\n 68: {\n 'block': bottleneck_block_interleave,\n 'num_blocks': [3, 4, 12, 3]\n },\n 77: {\n 'block': bottleneck_block_interleave,\n 'num_blocks': [3, 4, 15, 3]\n },\n 101: {\n 'block': bottleneck_block_interleave,\n 'num_blocks': [3, 4, 23, 3]\n },\n}\n\n\ndef assemblenet_v1(assemblenet_depth: int,\n num_classes: int,\n num_frames: int,\n model_structure: List[Any],\n input_specs: layers.InputSpec = layers.InputSpec(\n shape=[None, None, None, None, 3]),\n model_edge_weights: Optional[List[Any]] = None,\n max_pool_predictions: bool = False,\n combine_method: str = 'sigmoid',\n **kwargs):\n \"\"\"Returns the AssembleNet model for a given size and number of output classes.\"\"\"\n\n data_format = tf.keras.backend.image_data_format()\n assert data_format == 'channels_last'\n\n if assemblenet_depth not in ASSEMBLENET_SPECS:\n raise ValueError('Not a valid assemblenet_depth:', assemblenet_depth)\n\n input_specs_dict = {'image': input_specs}\n params = ASSEMBLENET_SPECS[assemblenet_depth]\n backbone = AssembleNet(\n block_fn=params['block'],\n num_blocks=params['num_blocks'],\n num_frames=num_frames,\n model_structure=model_structure,\n input_specs=input_specs,\n model_edge_weights=model_edge_weights,\n combine_method=combine_method,\n **kwargs)\n return AssembleNetModel(\n backbone,\n num_classes=num_classes,\n num_frames=num_frames,\n model_structure=model_structure,\n input_specs=input_specs_dict,\n max_pool_predictions=max_pool_predictions,\n **kwargs)\n\n\n@backbone_factory.register_backbone_builder('assemblenet')\ndef build_assemblenet_v1(\n input_specs: tf.keras.layers.InputSpec,\n backbone_config: hyperparams.Config,\n norm_activation_config: hyperparams.Config,\n l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None\n) -> tf.keras.Model:\n \"\"\"Builds assemblenet backbone.\"\"\"\n del l2_regularizer\n\n backbone_type = backbone_config.type\n backbone_cfg = backbone_config.get()\n assert 'assemblenet' in backbone_type\n\n assemblenet_depth = int(backbone_cfg.model_id)\n if assemblenet_depth not in ASSEMBLENET_SPECS:\n raise ValueError('Not a valid assemblenet_depth:', assemblenet_depth)\n model_structure, model_edge_weights = cfg.blocks_to_flat_lists(\n backbone_cfg.blocks)\n params = ASSEMBLENET_SPECS[assemblenet_depth]\n block_fn = functools.partial(\n params['block'],\n use_sync_bn=norm_activation_config.use_sync_bn,\n bn_decay=norm_activation_config.norm_momentum,\n bn_epsilon=norm_activation_config.norm_epsilon)\n backbone = AssembleNet(\n block_fn=block_fn,\n num_blocks=params['num_blocks'],\n num_frames=backbone_cfg.num_frames,\n model_structure=model_structure,\n input_specs=input_specs,\n model_edge_weights=model_edge_weights,\n combine_method=backbone_cfg.combine_method,\n use_sync_bn=norm_activation_config.use_sync_bn,\n bn_decay=norm_activation_config.norm_momentum,\n bn_epsilon=norm_activation_config.norm_epsilon)\n logging.info('Number of parameters in AssembleNet backbone: %f M.',\n backbone.count_params() / 10.**6)\n return backbone\n\n\n@model_factory.register_model_builder('assemblenet')\ndef build_assemblenet_model(\n input_specs: tf.keras.layers.InputSpec,\n model_config: cfg.AssembleNetModel,\n num_classes: int,\n l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None):\n \"\"\"Builds assemblenet model.\"\"\"\n input_specs_dict = {'image': input_specs}\n backbone = build_assemblenet_v1(input_specs, model_config.backbone,\n model_config.norm_activation, l2_regularizer)\n backbone_cfg = model_config.backbone.get()\n model_structure, _ = cfg.blocks_to_flat_lists(backbone_cfg.blocks)\n model = AssembleNetModel(\n backbone,\n num_classes=num_classes,\n num_frames=backbone_cfg.num_frames,\n model_structure=model_structure,\n input_specs=input_specs_dict,\n max_pool_predictions=model_config.max_pool_predictions)\n return model\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for official.core.export_saved_model_lib.\"\"\"\n\nimport os\nfrom unittest import mock\n\nimport tensorflow as tf\n\nfrom official.core import export_base\nfrom official.vision.beta import configs\nfrom official.vision.beta.serving import export_saved_model_lib\n\n\nclass WriteModelFlopsAndParamsTest(tf.test.TestCase):\n\n def setUp(self):\n super().setUp()\n self.tempdir = self.create_tempdir()\n self.enter_context(\n mock.patch.object(export_base, 'export', autospec=True, spec_set=True))\n\n def _export_model_with_log_model_flops_and_params(self, params):\n export_saved_model_lib.export_inference_graph(\n input_type='image_tensor',\n batch_size=1,\n input_image_size=[64, 64],\n params=params,\n checkpoint_path=os.path.join(self.tempdir, 'unused-ckpt'),\n export_dir=self.tempdir,\n log_model_flops_and_params=True)\n\n def assertModelAnalysisFilesExist(self):\n self.assertTrue(\n tf.io.gfile.exists(os.path.join(self.tempdir, 'model_params.txt')))\n self.assertTrue(\n tf.io.gfile.exists(os.path.join(self.tempdir, 'model_flops.txt')))\n\n def test_retinanet_task(self):\n params = configs.retinanet.retinanet_resnetfpn_coco()\n params.task.model.backbone.resnet.model_id = 18\n params.task.model.num_classes = 2\n params.task.model.max_level = 6\n self._export_model_with_log_model_flops_and_params(params)\n self.assertModelAnalysisFilesExist()\n\n def test_maskrcnn_task(self):\n params = configs.maskrcnn.maskrcnn_resnetfpn_coco()\n params.task.model.backbone.resnet.model_id = 18\n params.task.model.num_classes = 2\n params.task.model.max_level = 6\n self._export_model_with_log_model_flops_and_params(params)\n self.assertModelAnalysisFilesExist()\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"TriviaQA training script.\"\"\"\nimport collections\nimport contextlib\nimport functools\nimport json\nimport operator\nimport os\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport gin\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\n\nimport sentencepiece as spm\nfrom official.nlp import optimization as nlp_optimization\nfrom official.nlp.configs import encoders\nfrom official.projects.triviaqa import evaluation\nfrom official.projects.triviaqa import inputs\nfrom official.projects.triviaqa import modeling\nfrom official.projects.triviaqa import prediction\n\nflags.DEFINE_string('data_dir', None, 'Data directory for TensorFlow Datasets.')\n\nflags.DEFINE_string(\n 'validation_gold_path', None,\n 'Path to golden validation. Usually, the wikipedia-dev.json file.')\n\nflags.DEFINE_string('model_dir', None,\n 'Directory for checkpoints and summaries.')\n\nflags.DEFINE_string('model_config_path', None,\n 'JSON file containing model coniguration.')\n\nflags.DEFINE_string('sentencepiece_model_path', None,\n 'Path to sentence piece model.')\n\nflags.DEFINE_enum('encoder', 'bigbird',\n ['bert', 'bigbird', 'albert', 'mobilebert'],\n 'Which transformer encoder model to use.')\n\nflags.DEFINE_integer('bigbird_block_size', 64,\n 'Size of blocks for sparse block attention.')\n\nflags.DEFINE_string('init_checkpoint_path', None,\n 'Path from which to initialize weights.')\n\nflags.DEFINE_integer('train_sequence_length', 4096,\n 'Maximum number of tokens for training.')\n\nflags.DEFINE_integer('train_global_sequence_length', 320,\n 'Maximum number of global tokens for training.')\n\nflags.DEFINE_integer('validation_sequence_length', 4096,\n 'Maximum number of tokens for validation.')\n\nflags.DEFINE_integer('validation_global_sequence_length', 320,\n 'Maximum number of global tokens for validation.')\n\nflags.DEFINE_integer('batch_size', 32, 'Size of batch.')\n\nflags.DEFINE_string('master', '', 'Address of the TPU master.')\n\nflags.DEFINE_integer('decode_top_k', 8,\n 'Maximum number of tokens to consider for begin/end.')\n\nflags.DEFINE_integer('decode_max_size', 16,\n 'Maximum number of sentence pieces in an answer.')\n\nflags.DEFINE_float('dropout_rate', 0.1, 'Dropout rate for hidden layers.')\n\nflags.DEFINE_float('attention_dropout_rate', 0.3,\n 'Dropout rate for attention layers.')\n\nflags.DEFINE_float('label_smoothing', 1e-1, 'Degree of label smoothing.')\n\nflags.DEFINE_multi_string(\n 'gin_bindings', [],\n 'Gin bindings to override the values set in the config files')\n\nFLAGS = flags.FLAGS\n\n\[email protected]\ndef worker_context():\n if FLAGS.master:\n with tf.device('/job:worker') as d:\n yield d\n else:\n yield\n\n\ndef read_sentencepiece_model(path):\n with tf.io.gfile.GFile(path, 'rb') as file:\n processor = spm.SentencePieceProcessor()\n processor.LoadFromSerializedProto(file.read())\n return processor\n\n\n# Rename old BERT v1 configuration parameters.\n_MODEL_CONFIG_REPLACEMENTS = {\n 'num_hidden_layers': 'num_layers',\n 'attention_probs_dropout_prob': 'attention_dropout_rate',\n 'hidden_dropout_prob': 'dropout_rate',\n 'hidden_act': 'hidden_activation',\n 'window_size': 'block_size',\n}\n\n\ndef read_model_config(encoder,\n path,\n bigbird_block_size=None) -> encoders.EncoderConfig:\n \"\"\"Merges the JSON configuration into the encoder configuration.\"\"\"\n with tf.io.gfile.GFile(path) as f:\n model_config = json.load(f)\n for key, value in _MODEL_CONFIG_REPLACEMENTS.items():\n if key in model_config:\n model_config[value] = model_config.pop(key)\n model_config['attention_dropout_rate'] = FLAGS.attention_dropout_rate\n model_config['dropout_rate'] = FLAGS.dropout_rate\n model_config['block_size'] = bigbird_block_size\n encoder_config = encoders.EncoderConfig(type=encoder)\n # Override the default config with those loaded from the JSON file.\n encoder_config_keys = encoder_config.get().as_dict().keys()\n overrides = {}\n for key, value in model_config.items():\n if key in encoder_config_keys:\n overrides[key] = value\n else:\n logging.warning('Ignoring config parameter %s=%s', key, value)\n encoder_config.get().override(overrides)\n return encoder_config\n\n\[email protected](denylist=[\n 'model',\n 'strategy',\n 'train_dataset',\n 'model_dir',\n 'init_checkpoint_path',\n 'evaluate_fn',\n])\ndef fit(model,\n strategy,\n train_dataset,\n model_dir,\n init_checkpoint_path=None,\n evaluate_fn=None,\n learning_rate=1e-5,\n learning_rate_polynomial_decay_rate=1.,\n weight_decay_rate=1e-1,\n num_warmup_steps=5000,\n num_decay_steps=51000,\n num_epochs=6):\n \"\"\"Train and evaluate.\"\"\"\n hparams = dict(\n learning_rate=learning_rate,\n num_decay_steps=num_decay_steps,\n num_warmup_steps=num_warmup_steps,\n num_epochs=num_epochs,\n weight_decay_rate=weight_decay_rate,\n dropout_rate=FLAGS.dropout_rate,\n attention_dropout_rate=FLAGS.attention_dropout_rate,\n label_smoothing=FLAGS.label_smoothing)\n logging.info(hparams)\n learning_rate_schedule = nlp_optimization.WarmUp(\n learning_rate,\n tf.keras.optimizers.schedules.PolynomialDecay(\n learning_rate,\n num_decay_steps,\n end_learning_rate=0.,\n power=learning_rate_polynomial_decay_rate), num_warmup_steps)\n with strategy.scope():\n optimizer = nlp_optimization.AdamWeightDecay(\n learning_rate_schedule,\n weight_decay_rate=weight_decay_rate,\n epsilon=1e-6,\n exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'])\n model.compile(optimizer, loss=modeling.SpanOrCrossEntropyLoss())\n\n def init_fn(init_checkpoint_path):\n ckpt = tf.train.Checkpoint(encoder=model.encoder)\n ckpt.restore(init_checkpoint_path).assert_existing_objects_matched()\n\n with worker_context():\n ckpt_manager = tf.train.CheckpointManager(\n tf.train.Checkpoint(model=model, optimizer=optimizer),\n model_dir,\n max_to_keep=None,\n init_fn=(functools.partial(init_fn, init_checkpoint_path)\n if init_checkpoint_path else None))\n with strategy.scope():\n ckpt_manager.restore_or_initialize()\n val_summary_writer = tf.summary.create_file_writer(\n os.path.join(model_dir, 'val'))\n best_exact_match = 0.\n for epoch in range(len(ckpt_manager.checkpoints), num_epochs):\n model.fit(\n train_dataset,\n callbacks=[\n tf.keras.callbacks.TensorBoard(model_dir, write_graph=False),\n ])\n ckpt_path = ckpt_manager.save()\n if evaluate_fn is None:\n continue\n metrics = evaluate_fn()\n logging.info('Epoch %d: %s', epoch + 1, metrics)\n if best_exact_match < metrics['exact_match']:\n best_exact_match = metrics['exact_match']\n model.save(os.path.join(model_dir, 'export'), include_optimizer=False)\n logging.info('Exporting %s as SavedModel.', ckpt_path)\n with val_summary_writer.as_default():\n for name, data in metrics.items():\n tf.summary.scalar(name, data, epoch + 1)\n\n\ndef evaluate(sp_processor, features_map_fn, labels_map_fn, logits_fn,\n decode_logits_fn, split_and_pad_fn, distribute_strategy,\n validation_dataset, ground_truth):\n \"\"\"Run evaluation.\"\"\"\n loss_metric = tf.keras.metrics.Mean()\n\n @tf.function\n def update_loss(y, logits):\n loss_fn = modeling.SpanOrCrossEntropyLoss(\n reduction=tf.keras.losses.Reduction.NONE)\n return loss_metric(loss_fn(y, logits))\n\n predictions = collections.defaultdict(list)\n for _, (features, labels) in validation_dataset.enumerate():\n token_ids = features['token_ids']\n y = labels_map_fn(token_ids, labels)\n x = split_and_pad_fn(features_map_fn(features))\n logits = tf.concat(\n distribute_strategy.experimental_local_results(logits_fn(x)), 0)\n logits = logits[:features['token_ids'].shape[0]]\n update_loss(y, logits)\n end_limit = token_ids.row_lengths() - 1 # inclusive\n begin, end, scores = decode_logits_fn(logits, end_limit)\n answers = prediction.decode_answer(features['context'], begin, end,\n features['token_offsets'],\n end_limit).numpy()\n for _, (qid, token_id, offset, score, answer) in enumerate(\n zip(features['qid'].numpy(),\n tf.gather(features['token_ids'], begin, batch_dims=1).numpy(),\n tf.gather(features['token_offsets'], begin, batch_dims=1).numpy(),\n scores, answers)):\n if not answer:\n continue\n if sp_processor.IdToPiece(int(token_id)).startswith('▁') and offset > 0:\n answer = answer[1:]\n predictions[qid.decode('utf-8')].append((score, answer.decode('utf-8')))\n predictions = {\n qid: evaluation.normalize_answer(\n sorted(answers, key=operator.itemgetter(0), reverse=True)[0][1])\n for qid, answers in predictions.items()\n }\n metrics = evaluation.evaluate_triviaqa(ground_truth, predictions, mute=True)\n metrics['loss'] = loss_metric.result().numpy()\n return metrics\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n gin.parse_config(FLAGS.gin_bindings)\n model_config = read_model_config(\n FLAGS.encoder,\n FLAGS.model_config_path,\n bigbird_block_size=FLAGS.bigbird_block_size)\n logging.info(model_config.get().as_dict())\n # Configure input processing.\n sp_processor = read_sentencepiece_model(FLAGS.sentencepiece_model_path)\n features_map_fn = functools.partial(\n inputs.features_map_fn,\n local_radius=FLAGS.bigbird_block_size,\n relative_pos_max_distance=24,\n use_hard_g2l_mask=True,\n padding_id=sp_processor.PieceToId('<pad>'),\n eos_id=sp_processor.PieceToId('</s>'),\n null_id=sp_processor.PieceToId('<empty>'),\n cls_id=sp_processor.PieceToId('<ans>'),\n sep_id=sp_processor.PieceToId('<sep_0>'))\n train_features_map_fn = tf.function(\n functools.partial(\n features_map_fn,\n sequence_length=FLAGS.train_sequence_length,\n global_sequence_length=FLAGS.train_global_sequence_length),\n autograph=False)\n train_labels_map_fn = tf.function(\n functools.partial(\n inputs.labels_map_fn, sequence_length=FLAGS.train_sequence_length))\n # Connect to TPU cluster.\n if FLAGS.master:\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(FLAGS.master)\n tf.config.experimental_connect_to_cluster(resolver)\n tf.tpu.experimental.initialize_tpu_system(resolver)\n strategy = tf.distribute.TPUStrategy(resolver)\n else:\n strategy = tf.distribute.MirroredStrategy()\n # Initialize datasets.\n with worker_context():\n _ = tf.random.get_global_generator()\n train_dataset = inputs.read_batches(\n FLAGS.data_dir,\n tfds.Split.TRAIN,\n FLAGS.batch_size,\n shuffle=True,\n drop_final_batch=True)\n validation_dataset = inputs.read_batches(FLAGS.data_dir,\n tfds.Split.VALIDATION,\n FLAGS.batch_size)\n\n def train_map_fn(x, y):\n features = train_features_map_fn(x)\n labels = modeling.smooth_labels(FLAGS.label_smoothing,\n train_labels_map_fn(x['token_ids'], y),\n features['question_lengths'],\n features['token_ids'])\n return features, labels\n\n train_dataset = train_dataset.map(train_map_fn, 16).prefetch(16)\n # Initialize model and compile.\n with strategy.scope():\n model = modeling.TriviaQaModel(model_config, FLAGS.train_sequence_length)\n logits_fn = tf.function(\n functools.partial(prediction.distributed_logits_fn, model))\n decode_logits_fn = tf.function(\n functools.partial(prediction.decode_logits, FLAGS.decode_top_k,\n FLAGS.decode_max_size))\n split_and_pad_fn = tf.function(\n functools.partial(prediction.split_and_pad, strategy, FLAGS.batch_size))\n # Evaluation strategy.\n with tf.io.gfile.GFile(FLAGS.validation_gold_path) as f:\n ground_truth = {\n datum['QuestionId']: datum['Answer'] for datum in json.load(f)['Data']\n }\n validation_features_map_fn = tf.function(\n functools.partial(\n features_map_fn,\n sequence_length=FLAGS.validation_sequence_length,\n global_sequence_length=FLAGS.validation_global_sequence_length),\n autograph=False)\n validation_labels_map_fn = tf.function(\n functools.partial(\n inputs.labels_map_fn,\n sequence_length=FLAGS.validation_sequence_length))\n evaluate_fn = functools.partial(\n evaluate,\n sp_processor=sp_processor,\n features_map_fn=validation_features_map_fn,\n labels_map_fn=validation_labels_map_fn,\n logits_fn=logits_fn,\n decode_logits_fn=decode_logits_fn,\n split_and_pad_fn=split_and_pad_fn,\n distribute_strategy=strategy,\n validation_dataset=validation_dataset,\n ground_truth=ground_truth)\n logging.info('Model initialized. Beginning training fit loop.')\n fit(model, strategy, train_dataset, FLAGS.model_dir,\n FLAGS.init_checkpoint_path, evaluate_fn)\n\n\nif __name__ == '__main__':\n flags.mark_flags_as_required([\n 'model_config_path', 'model_dir', 'sentencepiece_model_path',\n 'validation_gold_path'\n ])\n app.run(main)\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Misc for Transformer.\"\"\"\n\n# pylint: disable=g-bad-import-order\n\nfrom absl import flags\nimport tensorflow as tf\n\nfrom official.legacy.transformer import model_params\nfrom official.utils.flags import core as flags_core\nfrom official.utils.misc import keras_utils\n\nFLAGS = flags.FLAGS\n\nPARAMS_MAP = {\n 'tiny': model_params.TINY_PARAMS,\n 'base': model_params.BASE_PARAMS,\n 'big': model_params.BIG_PARAMS,\n}\n\n\ndef get_model_params(param_set, num_gpus):\n \"\"\"Gets predefined model params.\"\"\"\n if num_gpus > 1:\n if param_set == 'big':\n return model_params.BIG_MULTI_GPU_PARAMS.copy()\n elif param_set == 'base':\n return model_params.BASE_MULTI_GPU_PARAMS.copy()\n else:\n raise ValueError('Not valid params: param_set={} num_gpus={}'.format(\n param_set, num_gpus))\n\n return PARAMS_MAP[param_set].copy()\n\n\ndef define_transformer_flags():\n \"\"\"Add flags and flag validators for running transformer_main.\"\"\"\n # Add common flags (data_dir, model_dir, etc.).\n flags_core.define_base(num_gpu=True, distribution_strategy=True)\n flags_core.define_performance(\n num_parallel_calls=True,\n inter_op=False,\n intra_op=False,\n synthetic_data=True,\n max_train_steps=False,\n dtype=True,\n loss_scale=True,\n all_reduce_alg=True,\n num_packs=True,\n tf_gpu_thread_mode=True,\n datasets_num_private_threads=True,\n enable_xla=True,\n fp16_implementation=True)\n\n flags_core.define_benchmark()\n flags_core.define_device(tpu=True)\n\n flags.DEFINE_integer(\n name='train_steps',\n short_name='ts',\n default=300000,\n help=flags_core.help_wrap('The number of steps used to train.'))\n flags.DEFINE_integer(\n name='steps_between_evals',\n short_name='sbe',\n default=5000,\n help=flags_core.help_wrap(\n 'The Number of training steps to run between evaluations. This is '\n 'used if --train_steps is defined.'))\n flags.DEFINE_boolean(\n name='enable_time_history',\n default=True,\n help='Whether to enable TimeHistory callback.')\n flags.DEFINE_boolean(\n name='enable_tensorboard',\n default=False,\n help='Whether to enable Tensorboard callback.')\n flags.DEFINE_boolean(\n name='enable_metrics_in_training',\n default=False,\n help='Whether to enable metrics during training.')\n flags.DEFINE_boolean(\n name='enable_mlir_bridge',\n default=False,\n help='Whether to enable the TF to XLA bridge.')\n # Set flags from the flags_core module as 'key flags' so they're listed when\n # the '-h' flag is used. Without this line, the flags defined above are\n # only shown in the full `--helpful` help text.\n flags.adopt_module_key_flags(flags_core)\n\n # Add transformer-specific flags\n flags.DEFINE_enum(\n name='param_set',\n short_name='mp',\n default='big',\n enum_values=PARAMS_MAP.keys(),\n help=flags_core.help_wrap(\n 'Parameter set to use when creating and training the model. The '\n 'parameters define the input shape (batch size and max length), '\n 'model configuration (size of embedding, # of hidden layers, etc.), '\n 'and various other settings. The big parameter set increases the '\n 'default batch size, embedding/hidden size, and filter size. For a '\n 'complete list of parameters, please see model/model_params.py.'))\n\n flags.DEFINE_bool(\n name='static_batch',\n short_name='sb',\n default=False,\n help=flags_core.help_wrap(\n 'Whether the batches in the dataset should have static shapes. In '\n 'general, this setting should be False. Dynamic shapes allow the '\n 'inputs to be grouped so that the number of padding tokens is '\n 'minimized, and helps model training. In cases where the input shape '\n 'must be static (e.g. running on TPU), this setting will be ignored '\n 'and static batching will always be used.'))\n flags.DEFINE_integer(\n name='max_length',\n short_name='ml',\n default=256,\n help=flags_core.help_wrap(\n 'Max sentence length for Transformer. Default is 256. Note: Usually '\n 'it is more effective to use a smaller max length if static_batch is '\n 'enabled, e.g. 64.'))\n\n # Flags for training with steps (may be used for debugging)\n flags.DEFINE_integer(\n name='validation_steps',\n short_name='vs',\n default=64,\n help=flags_core.help_wrap('The number of steps used in validation.'))\n\n # BLEU score computation\n flags.DEFINE_string(\n name='bleu_source',\n short_name='bls',\n default=None,\n help=flags_core.help_wrap(\n 'Path to source file containing text translate when calculating the '\n 'official BLEU score. Both --bleu_source and --bleu_ref must be set. '\n ))\n flags.DEFINE_string(\n name='bleu_ref',\n short_name='blr',\n default=None,\n help=flags_core.help_wrap(\n 'Path to source file containing text translate when calculating the '\n 'official BLEU score. Both --bleu_source and --bleu_ref must be set. '\n ))\n flags.DEFINE_string(\n name='vocab_file',\n short_name='vf',\n default=None,\n help=flags_core.help_wrap(\n 'Path to subtoken vocabulary file. If data_download.py was used to '\n 'download and encode the training data, look in the data_dir to find '\n 'the vocab file.'))\n flags.DEFINE_string(\n name='mode',\n default='train',\n help=flags_core.help_wrap('mode: train, eval, or predict'))\n flags.DEFINE_bool(\n name='use_ctl',\n default=False,\n help=flags_core.help_wrap(\n 'Whether the model runs with custom training loop.'))\n flags.DEFINE_integer(\n name='decode_batch_size',\n default=32,\n help=flags_core.help_wrap(\n 'Global batch size used for Transformer autoregressive decoding on '\n 'TPU.'))\n flags.DEFINE_integer(\n name='decode_max_length',\n default=97,\n help=flags_core.help_wrap(\n 'Max sequence length of the decode/eval data. This is used by '\n 'Transformer autoregressive decoding on TPU to have minimum '\n 'paddings.'))\n flags.DEFINE_bool(\n name='padded_decode',\n default=False,\n help=flags_core.help_wrap(\n 'Whether the autoregressive decoding runs with input data padded to '\n 'the decode_max_length. For TPU/XLA-GPU runs, this flag has to be '\n 'set due the static shape requirement. Although CPU/GPU could also '\n 'use padded_decode, it has not been tested. In addition, this method '\n 'will introduce unnecessary overheads which grow quadratically with '\n 'the max sequence length.'))\n flags.DEFINE_bool(\n name='enable_checkpointing',\n default=True,\n help=flags_core.help_wrap(\n 'Whether to do checkpointing during training. When running under '\n 'benchmark harness, we will avoid checkpointing.'))\n flags.DEFINE_bool(\n name='save_weights_only',\n default=True,\n help=flags_core.help_wrap(\n 'Only used when above `enable_checkpointing` is True. '\n 'If True, then only the model\\'s weights will be saved '\n '(`model.save_weights(filepath)`), else the full model is saved '\n '(`model.save(filepath)`)'))\n\n flags_core.set_defaults(\n data_dir='/tmp/translate_ende',\n model_dir='/tmp/transformer_model',\n batch_size=None)\n\n # pylint: disable=unused-variable\n @flags.multi_flags_validator(\n ['bleu_source', 'bleu_ref'],\n message='Both or neither --bleu_source and --bleu_ref must be defined.')\n def _check_bleu_files(flags_dict):\n return (flags_dict['bleu_source'] is None) == (\n flags_dict['bleu_ref'] is None)\n\n @flags.multi_flags_validator(\n ['bleu_source', 'bleu_ref', 'vocab_file'],\n message='--vocab_file must be defined if --bleu_source and --bleu_ref '\n 'are defined.')\n def _check_bleu_vocab_file(flags_dict):\n if flags_dict['bleu_source'] and flags_dict['bleu_ref']:\n return flags_dict['vocab_file'] is not None\n return True\n\n # pylint: enable=unused-variable\n\n\ndef get_callbacks():\n \"\"\"Returns common callbacks.\"\"\"\n callbacks = []\n if FLAGS.enable_time_history:\n time_callback = keras_utils.TimeHistory(\n FLAGS.batch_size,\n FLAGS.log_steps,\n logdir=FLAGS.model_dir if FLAGS.enable_tensorboard else None)\n callbacks.append(time_callback)\n\n if FLAGS.enable_tensorboard:\n tensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=FLAGS.model_dir)\n callbacks.append(tensorboard_callback)\n\n return callbacks\n\n\ndef update_stats(history, stats, callbacks):\n \"\"\"Normalizes and updates dictionary of stats.\n\n Args:\n history: Results of the training step.\n stats: Dict with pre-existing training stats.\n callbacks: a list of callbacks which might include a time history callback\n used during keras.fit.\n \"\"\"\n\n if history and history.history:\n train_hist = history.history\n # Gets final loss from training.\n stats['loss'] = float(train_hist['loss'][-1])\n\n if not callbacks:\n return\n\n # Look for the time history callback which was used during keras.fit\n for callback in callbacks:\n if isinstance(callback, keras_utils.TimeHistory):\n timestamp_log = callback.timestamp_log\n stats['step_timestamp_log'] = timestamp_log\n stats['train_finish_time'] = callback.train_finish_time\n if len(timestamp_log) > 1:\n stats['avg_exp_per_second'] = (\n callback.batch_size * callback.log_steps *\n (len(callback.timestamp_log) - 1) /\n (timestamp_log[-1].timestamp - timestamp_log[0].timestamp))\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Preprocessing ops.\"\"\"\nimport functools\nimport tensorflow as tf\n\nCROP_PROPORTION = 0.875 # Standard for ImageNet.\n\n\ndef random_apply(func, p, x):\n \"\"\"Randomly apply function func to x with probability p.\"\"\"\n return tf.cond(\n tf.less(\n tf.random.uniform([], minval=0, maxval=1, dtype=tf.float32),\n tf.cast(p, tf.float32)), lambda: func(x), lambda: x)\n\n\ndef random_brightness(image, max_delta, impl='simclrv2'):\n \"\"\"A multiplicative vs additive change of brightness.\"\"\"\n if impl == 'simclrv2':\n factor = tf.random.uniform([], tf.maximum(1.0 - max_delta, 0),\n 1.0 + max_delta)\n image = image * factor\n elif impl == 'simclrv1':\n image = tf.image.random_brightness(image, max_delta=max_delta)\n else:\n raise ValueError('Unknown impl {} for random brightness.'.format(impl))\n return image\n\n\ndef to_grayscale(image, keep_channels=True):\n image = tf.image.rgb_to_grayscale(image)\n if keep_channels:\n image = tf.tile(image, [1, 1, 3])\n return image\n\n\ndef color_jitter_nonrand(image,\n brightness=0,\n contrast=0,\n saturation=0,\n hue=0,\n impl='simclrv2'):\n \"\"\"Distorts the color of the image (jittering order is fixed).\n\n Args:\n image: The input image tensor.\n brightness: A float, specifying the brightness for color jitter.\n contrast: A float, specifying the contrast for color jitter.\n saturation: A float, specifying the saturation for color jitter.\n hue: A float, specifying the hue for color jitter.\n impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's\n version of random brightness.\n\n Returns:\n The distorted image tensor.\n \"\"\"\n with tf.name_scope('distort_color'):\n def apply_transform(i, x, brightness, contrast, saturation, hue):\n \"\"\"Apply the i-th transformation.\"\"\"\n if brightness != 0 and i == 0:\n x = random_brightness(x, max_delta=brightness, impl=impl)\n elif contrast != 0 and i == 1:\n x = tf.image.random_contrast(\n x, lower=1 - contrast, upper=1 + contrast)\n elif saturation != 0 and i == 2:\n x = tf.image.random_saturation(\n x, lower=1 - saturation, upper=1 + saturation)\n elif hue != 0:\n x = tf.image.random_hue(x, max_delta=hue)\n return x\n\n for i in range(4):\n image = apply_transform(i, image, brightness, contrast, saturation, hue)\n image = tf.clip_by_value(image, 0., 1.)\n return image\n\n\ndef color_jitter_rand(image,\n brightness=0,\n contrast=0,\n saturation=0,\n hue=0,\n impl='simclrv2'):\n \"\"\"Distorts the color of the image (jittering order is random).\n\n Args:\n image: The input image tensor.\n brightness: A float, specifying the brightness for color jitter.\n contrast: A float, specifying the contrast for color jitter.\n saturation: A float, specifying the saturation for color jitter.\n hue: A float, specifying the hue for color jitter.\n impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's\n version of random brightness.\n\n Returns:\n The distorted image tensor.\n \"\"\"\n with tf.name_scope('distort_color'):\n def apply_transform(i, x):\n \"\"\"Apply the i-th transformation.\"\"\"\n\n def brightness_foo():\n if brightness == 0:\n return x\n else:\n return random_brightness(x, max_delta=brightness, impl=impl)\n\n def contrast_foo():\n if contrast == 0:\n return x\n else:\n return tf.image.random_contrast(x, lower=1 - contrast,\n upper=1 + contrast)\n\n def saturation_foo():\n if saturation == 0:\n return x\n else:\n return tf.image.random_saturation(\n x, lower=1 - saturation, upper=1 + saturation)\n\n def hue_foo():\n if hue == 0:\n return x\n else:\n return tf.image.random_hue(x, max_delta=hue)\n\n x = tf.cond(tf.less(i, 2),\n lambda: tf.cond(tf.less(i, 1), brightness_foo, contrast_foo),\n lambda: tf.cond(tf.less(i, 3), saturation_foo, hue_foo))\n return x\n\n perm = tf.random.shuffle(tf.range(4))\n for i in range(4):\n image = apply_transform(perm[i], image)\n image = tf.clip_by_value(image, 0., 1.)\n return image\n\n\ndef color_jitter(image, strength, random_order=True, impl='simclrv2'):\n \"\"\"Distorts the color of the image.\n\n Args:\n image: The input image tensor.\n strength: the floating number for the strength of the color augmentation.\n random_order: A bool, specifying whether to randomize the jittering order.\n impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's\n version of random brightness.\n\n Returns:\n The distorted image tensor.\n \"\"\"\n brightness = 0.8 * strength\n contrast = 0.8 * strength\n saturation = 0.8 * strength\n hue = 0.2 * strength\n if random_order:\n return color_jitter_rand(\n image, brightness, contrast, saturation, hue, impl=impl)\n else:\n return color_jitter_nonrand(\n image, brightness, contrast, saturation, hue, impl=impl)\n\n\ndef random_color_jitter(image,\n p=1.0,\n color_jitter_strength=1.0,\n impl='simclrv2'):\n \"\"\"Perform random color jitter.\"\"\"\n def _transform(image):\n color_jitter_t = functools.partial(\n color_jitter, strength=color_jitter_strength, impl=impl)\n image = random_apply(color_jitter_t, p=0.8, x=image)\n return random_apply(to_grayscale, p=0.2, x=image)\n\n return random_apply(_transform, p=p, x=image)\n\n\ndef gaussian_blur(image, kernel_size, sigma, padding='SAME'):\n \"\"\"Blurs the given image with separable convolution.\n\n\n Args:\n image: Tensor of shape [height, width, channels] and dtype float to blur.\n kernel_size: Integer Tensor for the size of the blur kernel. This is should\n be an odd number. If it is an even number, the actual kernel size will be\n size + 1.\n sigma: Sigma value for gaussian operator.\n padding: Padding to use for the convolution. Typically 'SAME' or 'VALID'.\n\n Returns:\n A Tensor representing the blurred image.\n \"\"\"\n radius = tf.cast(kernel_size / 2, dtype=tf.int32)\n kernel_size = radius * 2 + 1\n x = tf.cast(tf.range(-radius, radius + 1), dtype=tf.float32)\n blur_filter = tf.exp(-tf.pow(x, 2.0) /\n (2.0 * tf.pow(tf.cast(sigma, dtype=tf.float32), 2.0)))\n blur_filter /= tf.reduce_sum(blur_filter)\n # One vertical and one horizontal filter.\n blur_v = tf.reshape(blur_filter, [kernel_size, 1, 1, 1])\n blur_h = tf.reshape(blur_filter, [1, kernel_size, 1, 1])\n num_channels = tf.shape(image)[-1]\n blur_h = tf.tile(blur_h, [1, 1, num_channels, 1])\n blur_v = tf.tile(blur_v, [1, 1, num_channels, 1])\n expand_batch_dim = image.shape.ndims == 3\n if expand_batch_dim:\n # Tensorflow requires batched input to convolutions, which we can fake with\n # an extra dimension.\n image = tf.expand_dims(image, axis=0)\n blurred = tf.nn.depthwise_conv2d(\n image, blur_h, strides=[1, 1, 1, 1], padding=padding)\n blurred = tf.nn.depthwise_conv2d(\n blurred, blur_v, strides=[1, 1, 1, 1], padding=padding)\n if expand_batch_dim:\n blurred = tf.squeeze(blurred, axis=0)\n return blurred\n\n\ndef random_blur(image, height, width, p=0.5):\n \"\"\"Randomly blur an image.\n\n Args:\n image: `Tensor` representing an image of arbitrary size.\n height: Height of output image.\n width: Width of output image.\n p: probability of applying this transformation.\n\n Returns:\n A preprocessed image `Tensor`.\n \"\"\"\n del width\n\n def _transform(image):\n sigma = tf.random.uniform([], 0.1, 2.0, dtype=tf.float32)\n return gaussian_blur(\n image, kernel_size=height // 10, sigma=sigma, padding='SAME')\n\n return random_apply(_transform, p=p, x=image)\n\n\ndef distorted_bounding_box_crop(image,\n bbox,\n min_object_covered=0.1,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0),\n max_attempts=100,\n scope=None):\n \"\"\"Generates cropped_image using one of the bboxes randomly distorted.\n\n See `tf.image.sample_distorted_bounding_box` for more documentation.\n\n Args:\n image: `Tensor` of image data.\n bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`\n where each coordinate is [0, 1) and the coordinates are arranged\n as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole\n image.\n min_object_covered: An optional `float`. Defaults to `0.1`. The cropped\n area of the image must contain at least this fraction of any bounding\n box supplied.\n aspect_ratio_range: An optional list of `float`s. The cropped area of the\n image must have an aspect ratio = width / height within this range.\n area_range: An optional list of `float`s. The cropped area of the image\n must contain a fraction of the supplied image within in this range.\n max_attempts: An optional `int`. Number of attempts at generating a cropped\n region of the image of the specified constraints. After `max_attempts`\n failures, return the entire image.\n scope: Optional `str` for name scope.\n Returns:\n (cropped image `Tensor`, distorted bbox `Tensor`).\n \"\"\"\n with tf.name_scope(scope or 'distorted_bounding_box_crop'):\n shape = tf.shape(image)\n sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(\n shape,\n bounding_boxes=bbox,\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range,\n max_attempts=max_attempts,\n use_image_if_no_bounding_boxes=True)\n bbox_begin, bbox_size, _ = sample_distorted_bounding_box\n\n # Crop the image to the specified bounding box.\n offset_y, offset_x, _ = tf.unstack(bbox_begin)\n target_height, target_width, _ = tf.unstack(bbox_size)\n image = tf.image.crop_to_bounding_box(\n image, offset_y, offset_x, target_height, target_width)\n\n return image\n\n\ndef crop_and_resize(image, height, width):\n \"\"\"Make a random crop and resize it to height `height` and width `width`.\n\n Args:\n image: Tensor representing the image.\n height: Desired image height.\n width: Desired image width.\n\n Returns:\n A `height` x `width` x channels Tensor holding a random crop of `image`.\n \"\"\"\n bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])\n aspect_ratio = width / height\n image = distorted_bounding_box_crop(\n image,\n bbox,\n min_object_covered=0.1,\n aspect_ratio_range=(3. / 4 * aspect_ratio, 4. / 3. * aspect_ratio),\n area_range=(0.08, 1.0),\n max_attempts=100,\n scope=None)\n return tf.image.resize([image], [height, width],\n method=tf.image.ResizeMethod.BICUBIC)[0]\n\n\ndef random_crop_with_resize(image, height, width, p=1.0):\n \"\"\"Randomly crop and resize an image.\n\n Args:\n image: `Tensor` representing an image of arbitrary size.\n height: Height of output image.\n width: Width of output image.\n p: Probability of applying this transformation.\n\n Returns:\n A preprocessed image `Tensor`.\n \"\"\"\n\n def _transform(image): # pylint: disable=missing-docstring\n image = crop_and_resize(image, height, width)\n return image\n\n return random_apply(_transform, p=p, x=image)\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for targets generations of centernet.\"\"\"\n\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\nfrom official.vision.beta.projects.centernet.ops import target_assigner\nfrom official.vision.ops import preprocess_ops\n\n\nclass TargetAssignerTest(tf.test.TestCase, parameterized.TestCase):\n\n def check_labels_correct(self,\n boxes,\n classes,\n output_size,\n input_size):\n max_num_instances = 128\n num_detections = len(boxes)\n boxes = tf.constant(boxes, dtype=tf.float32)\n classes = tf.constant(classes, dtype=tf.float32)\n\n boxes = preprocess_ops.clip_or_pad_to_fixed_size(\n boxes, max_num_instances, 0)\n classes = preprocess_ops.clip_or_pad_to_fixed_size(\n classes, max_num_instances, 0)\n\n # pylint: disable=g-long-lambda\n labels = target_assigner.assign_centernet_targets(\n labels={\n 'boxes': boxes,\n 'classes': classes,\n 'groundtruths': {\n 'num_detections': num_detections,\n }\n },\n output_size=output_size,\n input_size=input_size)\n\n ct_heatmaps = labels['ct_heatmaps']\n ct_offset = labels['ct_offset']\n size = labels['size']\n box_mask = labels['box_mask']\n box_indices = labels['box_indices']\n\n boxes = tf.cast(boxes, tf.float32)\n classes = tf.cast(classes, tf.float32)\n height_ratio = output_size[0] / input_size[0]\n width_ratio = output_size[1] / input_size[1]\n\n # Shape checks\n self.assertEqual(ct_heatmaps.shape, (output_size[0], output_size[1], 90))\n\n self.assertEqual(ct_offset.shape, (max_num_instances, 2))\n\n self.assertEqual(size.shape, (max_num_instances, 2))\n self.assertEqual(box_mask.shape, (max_num_instances,))\n self.assertEqual(box_indices.shape, (max_num_instances, 2))\n\n self.assertAllInRange(ct_heatmaps, 0, 1)\n\n for i in range(len(boxes)):\n # Check sizes\n self.assertAllEqual(size[i],\n [(boxes[i][2] - boxes[i][0]) * height_ratio,\n (boxes[i][3] - boxes[i][1]) * width_ratio,\n ])\n\n # Check box indices\n y = tf.math.floor((boxes[i][0] + boxes[i][2]) / 2 * height_ratio)\n x = tf.math.floor((boxes[i][1] + boxes[i][3]) / 2 * width_ratio)\n self.assertAllEqual(box_indices[i], [y, x])\n\n # check offsets\n true_y = (boxes[i][0] + boxes[i][2]) / 2 * height_ratio\n true_x = (boxes[i][1] + boxes[i][3]) / 2 * width_ratio\n self.assertAllEqual(ct_offset[i], [true_y - y, true_x - x])\n\n for i in range(len(boxes), max_num_instances):\n # Make sure rest are zero\n self.assertAllEqual(size[i], [0, 0])\n self.assertAllEqual(box_indices[i], [0, 0])\n self.assertAllEqual(ct_offset[i], [0, 0])\n\n # Check mask indices\n self.assertAllEqual(tf.cast(box_mask[3:], tf.int32),\n tf.repeat(0, repeats=max_num_instances - 3))\n self.assertAllEqual(tf.cast(box_mask[:3], tf.int32),\n tf.repeat(1, repeats=3))\n\n def test_generate_targets_no_scale(self):\n boxes = [\n (10, 300, 15, 370),\n (100, 300, 150, 370),\n (15, 100, 200, 170),\n ]\n classes = (1, 2, 3)\n sizes = [512, 512]\n\n self.check_labels_correct(boxes=boxes,\n classes=classes,\n output_size=sizes,\n input_size=sizes)\n\n def test_generate_targets_stride_4(self):\n boxes = [\n (10, 300, 15, 370),\n (100, 300, 150, 370),\n (15, 100, 200, 170),\n ]\n classes = (1, 2, 3)\n output_size = [128, 128]\n input_size = [512, 512]\n\n self.check_labels_correct(boxes=boxes,\n classes=classes,\n output_size=output_size,\n input_size=input_size)\n\n def test_generate_targets_stride_8(self):\n boxes = [\n (10, 300, 15, 370),\n (100, 300, 150, 370),\n (15, 100, 200, 170),\n ]\n classes = (1, 2, 3)\n output_size = [128, 128]\n input_size = [1024, 1024]\n\n self.check_labels_correct(boxes=boxes,\n classes=classes,\n output_size=output_size,\n input_size=input_size)\n\n def test_batch_generate_targets(self):\n\n input_size = [512, 512]\n output_size = [128, 128]\n max_num_instances = 128\n\n boxes = tf.constant([\n (10, 300, 15, 370), # center (y, x) = (12, 335)\n (100, 300, 150, 370), # center (y, x) = (125, 335)\n (15, 100, 200, 170), # center (y, x) = (107, 135)\n ], dtype=tf.float32)\n\n classes = tf.constant((1, 1, 1), dtype=tf.float32)\n\n boxes = preprocess_ops.clip_or_pad_to_fixed_size(\n boxes, max_num_instances, 0)\n classes = preprocess_ops.clip_or_pad_to_fixed_size(\n classes, max_num_instances, 0)\n\n boxes = tf.stack([boxes, boxes], axis=0)\n classes = tf.stack([classes, classes], axis=0)\n\n # pylint: disable=g-long-lambda\n labels = tf.map_fn(\n fn=lambda x: target_assigner.assign_centernet_targets(\n labels=x,\n output_size=output_size,\n input_size=input_size),\n elems={\n 'boxes': boxes,\n 'classes': classes,\n 'groundtruths': {\n 'num_detections': tf.constant([3, 3]),\n }\n },\n dtype={\n 'ct_heatmaps': tf.float32,\n 'ct_offset': tf.float32,\n 'size': tf.float32,\n 'box_mask': tf.int32,\n 'box_indices': tf.int32\n }\n )\n\n ct_heatmaps = labels['ct_heatmaps']\n ct_offset = labels['ct_offset']\n size = labels['size']\n box_mask = labels['box_mask']\n box_indices = labels['box_indices']\n\n self.assertEqual(ct_heatmaps.shape, (2, output_size[0], output_size[1], 90))\n\n self.assertEqual(ct_offset.shape, (2, max_num_instances, 2))\n\n self.assertEqual(size.shape, (2, max_num_instances, 2))\n self.assertEqual(box_mask.shape, (2, max_num_instances))\n self.assertEqual(box_indices.shape, (2, max_num_instances, 2))\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for projects.nhnet.decoder.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nfrom official.nlp.modeling import layers\nfrom official.projects.nhnet import configs\nfrom official.projects.nhnet import decoder\nfrom official.projects.nhnet import utils\n\n\nclass DecoderTest(tf.test.TestCase):\n\n def setUp(self):\n super(DecoderTest, self).setUp()\n self._config = utils.get_test_params()\n\n def test_transformer_decoder(self):\n decoder_block = decoder.TransformerDecoder(\n num_hidden_layers=self._config.num_hidden_layers,\n hidden_size=self._config.hidden_size,\n num_attention_heads=self._config.num_attention_heads,\n intermediate_size=self._config.intermediate_size,\n intermediate_activation=self._config.hidden_act,\n hidden_dropout_prob=self._config.hidden_dropout_prob,\n attention_probs_dropout_prob=self._config.attention_probs_dropout_prob,\n initializer_range=self._config.initializer_range)\n decoder_block.build(None)\n self.assertEqual(len(decoder_block.layers), self._config.num_hidden_layers)\n\n def test_bert_decoder(self):\n seq_length = 10\n encoder_input_ids = tf.keras.layers.Input(\n shape=(seq_length,), name=\"encoder_input_ids\", dtype=tf.int32)\n target_ids = tf.keras.layers.Input(\n shape=(seq_length,), name=\"target_ids\", dtype=tf.int32)\n encoder_outputs = tf.keras.layers.Input(\n shape=(seq_length, self._config.hidden_size),\n name=\"all_encoder_outputs\",\n dtype=tf.float32)\n embedding_lookup = layers.OnDeviceEmbedding(\n vocab_size=self._config.vocab_size,\n embedding_width=self._config.hidden_size,\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=self._config.initializer_range),\n name=\"word_embeddings\")\n cross_attention_bias = decoder.AttentionBias(bias_type=\"single_cross\")(\n encoder_input_ids)\n self_attention_bias = decoder.AttentionBias(bias_type=\"decoder_self\")(\n target_ids)\n inputs = dict(\n attention_bias=cross_attention_bias,\n self_attention_bias=self_attention_bias,\n target_ids=target_ids,\n all_encoder_outputs=encoder_outputs)\n decoder_layer = decoder.Decoder(self._config, embedding_lookup)\n outputs = decoder_layer(inputs)\n model_inputs = dict(\n encoder_input_ids=encoder_input_ids,\n target_ids=target_ids,\n all_encoder_outputs=encoder_outputs)\n model = tf.keras.Model(inputs=model_inputs, outputs=outputs, name=\"test\")\n self.assertLen(decoder_layer.trainable_weights, 30)\n # Forward path.\n fake_inputs = {\n \"encoder_input_ids\": np.zeros((2, 10), dtype=np.int32),\n \"target_ids\": np.zeros((2, 10), dtype=np.int32),\n \"all_encoder_outputs\": np.zeros((2, 10, 16), dtype=np.float32),\n }\n output_tensor = model(fake_inputs)\n self.assertEqual(output_tensor.shape, (2, 10, 16))\n\n def test_multi_doc_decoder(self):\n self._config = utils.get_test_params(cls=configs.NHNetConfig)\n seq_length = 10\n num_docs = 5\n encoder_input_ids = tf.keras.layers.Input(\n shape=(num_docs, seq_length), name=\"encoder_input_ids\", dtype=tf.int32)\n target_ids = tf.keras.layers.Input(\n shape=(seq_length,), name=\"target_ids\", dtype=tf.int32)\n encoder_outputs = tf.keras.layers.Input(\n shape=(num_docs, seq_length, self._config.hidden_size),\n name=\"all_encoder_outputs\",\n dtype=tf.float32)\n embedding_lookup = layers.OnDeviceEmbedding(\n vocab_size=self._config.vocab_size,\n embedding_width=self._config.hidden_size,\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=self._config.initializer_range),\n name=\"word_embeddings\")\n doc_attention_probs = tf.keras.layers.Input(\n shape=(self._config.num_decoder_attn_heads, seq_length, num_docs),\n name=\"doc_attention_probs\",\n dtype=tf.float32)\n cross_attention_bias = decoder.AttentionBias(bias_type=\"multi_cross\")(\n encoder_input_ids)\n self_attention_bias = decoder.AttentionBias(bias_type=\"decoder_self\")(\n target_ids)\n\n inputs = dict(\n attention_bias=cross_attention_bias,\n self_attention_bias=self_attention_bias,\n target_ids=target_ids,\n all_encoder_outputs=encoder_outputs,\n doc_attention_probs=doc_attention_probs)\n\n decoder_layer = decoder.Decoder(self._config, embedding_lookup)\n outputs = decoder_layer(inputs)\n model_inputs = dict(\n encoder_input_ids=encoder_input_ids,\n target_ids=target_ids,\n all_encoder_outputs=encoder_outputs,\n doc_attention_probs=doc_attention_probs)\n model = tf.keras.Model(inputs=model_inputs, outputs=outputs, name=\"test\")\n self.assertLen(decoder_layer.trainable_weights, 30)\n # Forward path.\n fake_inputs = {\n \"encoder_input_ids\":\n np.zeros((2, num_docs, seq_length), dtype=np.int32),\n \"target_ids\":\n np.zeros((2, seq_length), dtype=np.int32),\n \"all_encoder_outputs\":\n np.zeros((2, num_docs, seq_length, 16), dtype=np.float32),\n \"doc_attention_probs\":\n np.zeros(\n (2, self._config.num_decoder_attn_heads, seq_length, num_docs),\n dtype=np.float32)\n }\n output_tensor = model(fake_inputs)\n self.assertEqual(output_tensor.shape, (2, seq_length, 16))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom official.vision.beta.projects.yolo.modeling.layers import nn_blocks\n\n\nclass CSPConnectTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(('same', 224, 224, 64, 1),\n ('downsample', 224, 224, 64, 2))\n def test_pass_through(self, width, height, filters, mod):\n x = tf.keras.Input(shape=(width, height, filters))\n test_layer = nn_blocks.CSPRoute(filters=filters, filter_scale=mod)\n test_layer2 = nn_blocks.CSPConnect(filters=filters, filter_scale=mod)\n outx, px = test_layer(x)\n outx = test_layer2([outx, px])\n print(outx)\n print(outx.shape.as_list())\n self.assertAllEqual(\n outx.shape.as_list(),\n [None, np.ceil(width // 2),\n np.ceil(height // 2), (filters)])\n\n @parameterized.named_parameters(('same', 224, 224, 64, 1),\n ('downsample', 224, 224, 128, 2))\n def test_gradient_pass_though(self, filters, width, height, mod):\n loss = tf.keras.losses.MeanSquaredError()\n optimizer = tf.keras.optimizers.SGD()\n test_layer = nn_blocks.CSPRoute(filters, filter_scale=mod)\n path_layer = nn_blocks.CSPConnect(filters, filter_scale=mod)\n\n init = tf.random_normal_initializer()\n x = tf.Variable(\n initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))\n y = tf.Variable(\n initial_value=init(\n shape=(1, int(np.ceil(width // 2)), int(np.ceil(height // 2)),\n filters),\n dtype=tf.float32))\n\n with tf.GradientTape() as tape:\n x_hat, x_prev = test_layer(x)\n x_hat = path_layer([x_hat, x_prev])\n grad_loss = loss(x_hat, y)\n grad = tape.gradient(grad_loss, test_layer.trainable_variables)\n optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))\n\n self.assertNotIn(None, grad)\n\n\nclass CSPRouteTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(('same', 224, 224, 64, 1),\n ('downsample', 224, 224, 64, 2))\n def test_pass_through(self, width, height, filters, mod):\n x = tf.keras.Input(shape=(width, height, filters))\n test_layer = nn_blocks.CSPRoute(filters=filters, filter_scale=mod)\n outx, _ = test_layer(x)\n print(outx)\n print(outx.shape.as_list())\n self.assertAllEqual(\n outx.shape.as_list(),\n [None, np.ceil(width // 2),\n np.ceil(height // 2), (filters / mod)])\n\n @parameterized.named_parameters(('same', 224, 224, 64, 1),\n ('downsample', 224, 224, 128, 2))\n def test_gradient_pass_though(self, filters, width, height, mod):\n loss = tf.keras.losses.MeanSquaredError()\n optimizer = tf.keras.optimizers.SGD()\n test_layer = nn_blocks.CSPRoute(filters, filter_scale=mod)\n path_layer = nn_blocks.CSPConnect(filters, filter_scale=mod)\n\n init = tf.random_normal_initializer()\n x = tf.Variable(\n initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))\n y = tf.Variable(\n initial_value=init(\n shape=(1, int(np.ceil(width // 2)), int(np.ceil(height // 2)),\n filters),\n dtype=tf.float32))\n\n with tf.GradientTape() as tape:\n x_hat, x_prev = test_layer(x)\n x_hat = path_layer([x_hat, x_prev])\n grad_loss = loss(x_hat, y)\n grad = tape.gradient(grad_loss, test_layer.trainable_variables)\n optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))\n\n self.assertNotIn(None, grad)\n\n\nclass ConvBNTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(\n ('valid', (3, 3), 'valid', (1, 1)), ('same', (3, 3), 'same', (1, 1)),\n ('downsample', (3, 3), 'same', (2, 2)), ('test', (1, 1), 'valid', (1, 1)))\n def test_pass_through(self, kernel_size, padding, strides):\n if padding == 'same':\n pad_const = 1\n else:\n pad_const = 0\n x = tf.keras.Input(shape=(224, 224, 3))\n test_layer = nn_blocks.ConvBN(\n filters=64,\n kernel_size=kernel_size,\n padding=padding,\n strides=strides,\n trainable=False)\n outx = test_layer(x)\n print(outx.shape.as_list())\n test = [\n None,\n int((224 - kernel_size[0] + (2 * pad_const)) / strides[0] + 1),\n int((224 - kernel_size[1] + (2 * pad_const)) / strides[1] + 1), 64\n ]\n print(test)\n self.assertAllEqual(outx.shape.as_list(), test)\n\n @parameterized.named_parameters(('filters', 3))\n def test_gradient_pass_though(self, filters):\n loss = tf.keras.losses.MeanSquaredError()\n optimizer = tf.keras.optimizers.SGD()\n with tf.device('/CPU:0'):\n test_layer = nn_blocks.ConvBN(filters, kernel_size=(3, 3), padding='same')\n\n init = tf.random_normal_initializer()\n x = tf.Variable(\n initial_value=init(shape=(1, 224, 224, 3), dtype=tf.float32))\n y = tf.Variable(\n initial_value=init(shape=(1, 224, 224, filters), dtype=tf.float32))\n\n with tf.GradientTape() as tape:\n x_hat = test_layer(x)\n grad_loss = loss(x_hat, y)\n grad = tape.gradient(grad_loss, test_layer.trainable_variables)\n optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))\n self.assertNotIn(None, grad)\n\n\nclass DarkResidualTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(('same', 224, 224, 64, False),\n ('downsample', 223, 223, 32, True),\n ('oddball', 223, 223, 32, False))\n def test_pass_through(self, width, height, filters, downsample):\n mod = 1\n if downsample:\n mod = 2\n x = tf.keras.Input(shape=(width, height, filters))\n test_layer = nn_blocks.DarkResidual(filters=filters, downsample=downsample)\n outx = test_layer(x)\n print(outx)\n print(outx.shape.as_list())\n self.assertAllEqual(\n outx.shape.as_list(),\n [None, np.ceil(width / mod),\n np.ceil(height / mod), filters])\n\n @parameterized.named_parameters(('same', 64, 224, 224, False),\n ('downsample', 32, 223, 223, True),\n ('oddball', 32, 223, 223, False))\n def test_gradient_pass_though(self, filters, width, height, downsample):\n loss = tf.keras.losses.MeanSquaredError()\n optimizer = tf.keras.optimizers.SGD()\n test_layer = nn_blocks.DarkResidual(filters, downsample=downsample)\n\n if downsample:\n mod = 2\n else:\n mod = 1\n\n init = tf.random_normal_initializer()\n x = tf.Variable(\n initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))\n y = tf.Variable(\n initial_value=init(\n shape=(1, int(np.ceil(width / mod)), int(np.ceil(height / mod)),\n filters),\n dtype=tf.float32))\n\n with tf.GradientTape() as tape:\n x_hat = test_layer(x)\n grad_loss = loss(x_hat, y)\n grad = tape.gradient(grad_loss, test_layer.trainable_variables)\n optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))\n\n self.assertNotIn(None, grad)\n\n\nclass DarkSppTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(('RouteProcessSpp', 224, 224, 3, [5, 9, 13]),\n ('test1', 300, 300, 10, [2, 3, 4, 5]),\n ('test2', 256, 256, 5, [10]))\n def test_pass_through(self, width, height, channels, sizes):\n x = tf.keras.Input(shape=(width, height, channels))\n test_layer = nn_blocks.SPP(sizes=sizes)\n outx = test_layer(x)\n self.assertAllEqual(outx.shape.as_list(),\n [None, width, height, channels * (len(sizes) + 1)])\n return\n\n @parameterized.named_parameters(('RouteProcessSpp', 224, 224, 3, [5, 9, 13]),\n ('test1', 300, 300, 10, [2, 3, 4, 5]),\n ('test2', 256, 256, 5, [10]))\n def test_gradient_pass_though(self, width, height, channels, sizes):\n loss = tf.keras.losses.MeanSquaredError()\n optimizer = tf.keras.optimizers.SGD()\n test_layer = nn_blocks.SPP(sizes=sizes)\n\n init = tf.random_normal_initializer()\n x = tf.Variable(\n initial_value=init(\n shape=(1, width, height, channels), dtype=tf.float32))\n y = tf.Variable(\n initial_value=init(\n shape=(1, width, height, channels * (len(sizes) + 1)),\n dtype=tf.float32))\n\n with tf.GradientTape() as tape:\n x_hat = test_layer(x)\n grad_loss = loss(x_hat, y)\n grad = tape.gradient(grad_loss, test_layer.trainable_variables)\n optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))\n\n self.assertNotIn(None, grad)\n return\n\n\nclass DarkRouteProcessTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.named_parameters(\n ('test1', 224, 224, 64, 7, False), ('test2', 223, 223, 32, 3, False),\n ('tiny', 223, 223, 16, 1, False), ('spp', 224, 224, 64, 7, False))\n def test_pass_through(self, width, height, filters, repetitions, spp):\n x = tf.keras.Input(shape=(width, height, filters))\n test_layer = nn_blocks.DarkRouteProcess(\n filters=filters, repetitions=repetitions, insert_spp=spp)\n outx = test_layer(x)\n self.assertLen(outx, 2, msg='len(outx) != 2')\n if repetitions == 1:\n filter_y1 = filters\n else:\n filter_y1 = filters // 2\n self.assertAllEqual(\n outx[1].shape.as_list(), [None, width, height, filter_y1])\n self.assertAllEqual(\n filters % 2,\n 0,\n msg='Output of a DarkRouteProcess layer has an odd number of filters')\n self.assertAllEqual(outx[0].shape.as_list(), [None, width, height, filters])\n\n @parameterized.named_parameters(\n ('test1', 224, 224, 64, 7, False), ('test2', 223, 223, 32, 3, False),\n ('tiny', 223, 223, 16, 1, False), ('spp', 224, 224, 64, 7, False))\n def test_gradient_pass_though(self, width, height, filters, repetitions, spp):\n loss = tf.keras.losses.MeanSquaredError()\n optimizer = tf.keras.optimizers.SGD()\n test_layer = nn_blocks.DarkRouteProcess(\n filters=filters, repetitions=repetitions, insert_spp=spp)\n\n if repetitions == 1:\n filter_y1 = filters\n else:\n filter_y1 = filters // 2\n\n init = tf.random_normal_initializer()\n x = tf.Variable(\n initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))\n y_0 = tf.Variable(\n initial_value=init(shape=(1, width, height, filters), dtype=tf.float32))\n y_1 = tf.Variable(\n initial_value=init(\n shape=(1, width, height, filter_y1), dtype=tf.float32))\n\n with tf.GradientTape() as tape:\n x_hat_0, x_hat_1 = test_layer(x)\n grad_loss_0 = loss(x_hat_0, y_0)\n grad_loss_1 = loss(x_hat_1, y_1)\n grad = tape.gradient([grad_loss_0, grad_loss_1],\n test_layer.trainable_variables)\n optimizer.apply_gradients(zip(grad, test_layer.trainable_variables))\n\n self.assertNotIn(None, grad)\n return\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Losses utilities for detection models.\"\"\"\n\nimport tensorflow as tf\n\n\ndef multi_level_flatten(multi_level_inputs, last_dim=None):\n \"\"\"Flattens a multi-level input.\n\n Args:\n multi_level_inputs: Ordered Dict with level to [batch, d1, ..., dm].\n last_dim: Whether the output should be [batch_size, None], or [batch_size,\n None, last_dim]. Defaults to `None`.\n\n Returns:\n Concatenated output [batch_size, None], or [batch_size, None, dm]\n \"\"\"\n flattened_inputs = []\n batch_size = None\n for level in multi_level_inputs.keys():\n single_input = multi_level_inputs[level]\n if batch_size is None:\n batch_size = single_input.shape[0] or tf.shape(single_input)[0]\n if last_dim is not None:\n flattened_input = tf.reshape(single_input, [batch_size, -1, last_dim])\n else:\n flattened_input = tf.reshape(single_input, [batch_size, -1])\n flattened_inputs.append(flattened_input)\n return tf.concat(flattened_inputs, axis=1)\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for semantic_segmentation.\"\"\"\n\n# pylint: disable=unused-import\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\nfrom official.core import config_definitions as cfg\nfrom official.core import exp_factory\nfrom official.vision import beta\nfrom official.vision.beta.configs import semantic_segmentation as exp_cfg\n\n\nclass ImageSegmentationConfigTest(tf.test.TestCase, parameterized.TestCase):\n\n @parameterized.parameters(('seg_deeplabv3_pascal',),\n ('seg_deeplabv3plus_pascal',))\n def test_semantic_segmentation_configs(self, config_name):\n config = exp_factory.get_exp_config(config_name)\n self.assertIsInstance(config, cfg.ExperimentConfig)\n self.assertIsInstance(config.task, exp_cfg.SemanticSegmentationTask)\n self.assertIsInstance(config.task.model,\n exp_cfg.SemanticSegmentationModel)\n self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)\n config.validate()\n config.task.train_data.is_training = None\n with self.assertRaises(KeyError):\n config.validate()\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Classification decoder and parser.\"\"\"\n# Import libraries\nimport tensorflow as tf\n\nfrom official.vision.dataloaders import classification_input\nfrom official.vision.ops import preprocess_ops\n\nMEAN_RGB = (0.5 * 255, 0.5 * 255, 0.5 * 255)\nSTDDEV_RGB = (0.5 * 255, 0.5 * 255, 0.5 * 255)\n\n\ndef random_crop_image(image,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0),\n max_attempts=100):\n \"\"\"Randomly crop an arbitrary shaped slice from the input image.\n\n Args:\n image: a Tensor of shape [height, width, 3] representing the input image.\n aspect_ratio_range: a list of floats. The cropped area of the image must\n have an aspect ratio = width / height within this range.\n area_range: a list of floats. The cropped reas of the image must contain\n a fraction of the input image within this range.\n max_attempts: the number of attempts at generating a cropped region of the\n image of the specified constraints. After max_attempts failures, return\n the entire image.\n\n Returns:\n cropped_image: a Tensor representing the random cropped image. Can be the\n original image if max_attempts is exhausted.\n \"\"\"\n with tf.name_scope('random_crop_image'):\n crop_offset, crop_size, _ = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]),\n min_object_covered=0.1,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range,\n max_attempts=max_attempts,\n use_image_if_no_bounding_boxes=True)\n cropped_image = tf.slice(image, crop_offset, crop_size)\n return cropped_image\n\n\ndef random_crop_image_v2(image_bytes,\n image_shape,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.05, 1.0),\n max_attempts=100):\n \"\"\"Randomly crop an arbitrary shaped slice from the input image.\n\n This is a faster version of `random_crop_image` which takes the original\n image bytes and image size as the inputs, and partially decode the JPEG\n bytes according to the generated crop.\n\n Args:\n image_bytes: a Tensor of type string representing the raw image bytes.\n image_shape: a Tensor specifying the shape of the raw image.\n aspect_ratio_range: a list of floats. The cropped area of the image must\n have an aspect ratio = width / height within this range.\n area_range: a list of floats. The cropped reas of the image must contain\n a fraction of the input image within this range.\n max_attempts: the number of attempts at generating a cropped region of the\n image of the specified constraints. After max_attempts failures, return\n the entire image.\n\n Returns:\n cropped_image: a Tensor representing the random cropped image. Can be the\n original image if max_attempts is exhausted.\n \"\"\"\n with tf.name_scope('random_crop_image_v2'):\n crop_offset, crop_size, _ = tf.image.sample_distorted_bounding_box(\n image_shape,\n tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]),\n min_object_covered=0.1,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range,\n max_attempts=max_attempts,\n use_image_if_no_bounding_boxes=True)\n offset_y, offset_x, _ = tf.unstack(crop_offset)\n crop_height, crop_width, _ = tf.unstack(crop_size)\n crop_window = tf.stack([offset_y, offset_x, crop_height, crop_width])\n cropped_image = tf.image.decode_and_crop_jpeg(\n image_bytes, crop_window, channels=3)\n return cropped_image\n\n\nclass Decoder(classification_input.Decoder):\n \"\"\"A tf.Example decoder for classification task.\"\"\"\n pass\n\n\nclass Parser(classification_input.Parser):\n \"\"\"Parser to parse an image and its annotations into a dictionary of tensors.\"\"\"\n\n def _parse_train_image(self, decoded_tensors):\n \"\"\"Parses image data for training.\"\"\"\n image_bytes = decoded_tensors[self._image_field_key]\n\n if self._decode_jpeg_only:\n image_shape = tf.image.extract_jpeg_shape(image_bytes)\n\n # Crops image.\n cropped_image = random_crop_image_v2(\n image_bytes, image_shape)\n image = tf.cond(\n tf.reduce_all(tf.equal(tf.shape(cropped_image), image_shape)),\n lambda: preprocess_ops.center_crop_image_v2(image_bytes, image_shape),\n lambda: cropped_image)\n else:\n # Decodes image.\n image = tf.io.decode_image(image_bytes, channels=3)\n image.set_shape([None, None, 3])\n\n # Crops image.\n cropped_image = random_crop_image(image)\n\n image = tf.cond(\n tf.reduce_all(tf.equal(tf.shape(cropped_image), tf.shape(image))),\n lambda: preprocess_ops.center_crop_image(image),\n lambda: cropped_image)\n\n if self._aug_rand_hflip:\n image = tf.image.random_flip_left_right(image)\n\n # Resizes image.\n image = tf.image.resize(\n image, self._output_size, method=tf.image.ResizeMethod.BILINEAR)\n\n # Apply autoaug or randaug.\n if self._augmenter is not None:\n image = self._augmenter.distort(image)\n\n # Normalizes image with mean and std pixel values.\n image = preprocess_ops.normalize_image(image,\n offset=MEAN_RGB,\n scale=STDDEV_RGB)\n\n # Convert image to self._dtype.\n image = tf.image.convert_image_dtype(image, self._dtype)\n\n return image\n\n def _parse_eval_image(self, decoded_tensors):\n \"\"\"Parses image data for evaluation.\"\"\"\n image_bytes = decoded_tensors[self._image_field_key]\n\n if self._decode_jpeg_only:\n image_shape = tf.image.extract_jpeg_shape(image_bytes)\n\n # Center crops.\n image = preprocess_ops.center_crop_image_v2(image_bytes, image_shape)\n else:\n # Decodes image.\n image = tf.io.decode_image(image_bytes, channels=3)\n image.set_shape([None, None, 3])\n\n # Center crops.\n image = preprocess_ops.center_crop_image(image)\n\n image = tf.image.resize(\n image, self._output_size, method=tf.image.ResizeMethod.BILINEAR)\n\n # Normalizes image with mean and std pixel values.\n image = preprocess_ops.normalize_image(image,\n offset=MEAN_RGB,\n scale=STDDEV_RGB)\n\n # Convert image to self._dtype.\n image = tf.image.convert_image_dtype(image, self._dtype)\n\n return image\n",
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Gaussian error linear unit.\"\"\"\n\nimport tensorflow as tf\n\n\[email protected]_keras_serializable(package='Text')\ndef gelu(x):\n \"\"\"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https://arxiv.org/abs/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.\n \"\"\"\n return tf.keras.activations.gelu(x, approximate=True)\n",
"# Copyright 2022 The Orbit Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Provides the `ExportSavedModel` action and associated helper classes.\"\"\"\n\nimport re\n\nfrom typing import Callable, Optional\n\nimport tensorflow as tf\n\n\ndef _id_key(filename):\n _, id_num = filename.rsplit('-', maxsplit=1)\n return int(id_num)\n\n\ndef _find_managed_files(base_name):\n r\"\"\"Returns all files matching '{base_name}-\\d+', in sorted order.\"\"\"\n managed_file_regex = re.compile(rf'{re.escape(base_name)}-\\d+$')\n filenames = tf.io.gfile.glob(f'{base_name}-*')\n filenames = filter(managed_file_regex.match, filenames)\n return sorted(filenames, key=_id_key)\n\n\nclass _CounterIdFn:\n \"\"\"Implements a counter-based ID function for `ExportFileManager`.\"\"\"\n\n def __init__(self, base_name: str):\n managed_files = _find_managed_files(base_name)\n self.value = _id_key(managed_files[-1]) + 1 if managed_files else 0\n\n def __call__(self):\n output = self.value\n self.value += 1\n return output\n\n\nclass ExportFileManager:\n \"\"\"Utility class that manages a group of files with a shared base name.\n\n For actions like SavedModel exporting, there are potentially many different\n file naming and cleanup strategies that may be desirable. This class provides\n a basic interface allowing SavedModel export to be decoupled from these\n details, and a default implementation that should work for many basic\n scenarios. Users may subclass this class to alter behavior and define more\n customized naming and cleanup strategies.\n \"\"\"\n\n def __init__(self,\n base_name: str,\n max_to_keep: int = 5,\n next_id_fn: Optional[Callable[[], int]] = None):\n \"\"\"Initializes the instance.\n\n Args:\n base_name: A shared base name for file names generated by this class.\n max_to_keep: The maximum number of files matching `base_name` to keep\n after each call to `cleanup`. The most recent (as determined by file\n modification time) `max_to_keep` files are preserved; the rest are\n deleted. If < 0, all files are preserved.\n next_id_fn: An optional callable that returns integer IDs to append to\n base name (formatted as `'{base_name}-{id}'`). The order of integers is\n used to sort files to determine the oldest ones deleted by `clean_up`.\n If not supplied, a default ID based on an incrementing counter is used.\n One common alternative maybe be to use the current global step count,\n for instance passing `next_id_fn=global_step.numpy`.\n \"\"\"\n self._base_name = base_name\n self._max_to_keep = max_to_keep\n self._next_id_fn = next_id_fn or _CounterIdFn(base_name)\n\n @property\n def managed_files(self):\n \"\"\"Returns all files managed by this instance, in sorted order.\n\n Returns:\n The list of files matching the `base_name` provided when constructing this\n `ExportFileManager` instance, sorted in increasing integer order of the\n IDs returned by `next_id_fn`.\n \"\"\"\n return _find_managed_files(self._base_name)\n\n def clean_up(self):\n \"\"\"Cleans up old files matching `{base_name}-*`.\n\n The most recent `max_to_keep` files are preserved.\n \"\"\"\n if self._max_to_keep < 0:\n return\n\n for filename in self.managed_files[:-self._max_to_keep]:\n tf.io.gfile.rmtree(filename)\n\n def next_name(self) -> str:\n \"\"\"Returns a new file name based on `base_name` and `next_id_fn()`.\"\"\"\n return f'{self._base_name}-{self._next_id_fn()}'\n\n\nclass ExportSavedModel:\n \"\"\"Action that exports the given model as a SavedModel.\"\"\"\n\n def __init__(self,\n model: tf.Module,\n file_manager: ExportFileManager,\n signatures,\n options: Optional[tf.saved_model.SaveOptions] = None):\n \"\"\"Initializes the instance.\n\n Args:\n model: The model to export.\n file_manager: An instance of `ExportFileManager` (or a subclass), that\n provides file naming and cleanup functionality.\n signatures: The signatures to forward to `tf.saved_model.save()`.\n options: Optional options to forward to `tf.saved_model.save()`.\n \"\"\"\n self.model = model\n self.file_manager = file_manager\n self.signatures = signatures\n self.options = options\n\n def __call__(self, _):\n \"\"\"Exports the SavedModel.\"\"\"\n export_dir = self.file_manager.next_name()\n tf.saved_model.save(self.model, export_dir, self.signatures, self.options)\n self.file_manager.clean_up()\n"
] | [
[
"tensorflow.keras.mixed_precision.set_global_policy",
"tensorflow.keras.Input",
"tensorflow.test.main",
"tensorflow.keras.Model",
"tensorflow.keras.activations.get",
"tensorflow.keras.initializers.get",
"numpy.random.randint"
],
[
"tensorflow.io.gfile.GFile"
],
[
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.keras.losses.sparse_categorical_crossentropy",
"tensorflow.distribute.get_strategy",
"tensorflow.name_scope",
"tensorflow.data.Dataset.range",
"tensorflow.keras.initializers.TruncatedNormal",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.math.divide_no_nan",
"tensorflow.add_n",
"tensorflow.keras.metrics.Mean",
"tensorflow.GradientTape"
],
[
"tensorflow.data.TFRecordDataset",
"tensorflow.io.parse_single_example",
"tensorflow.io.gfile.glob",
"tensorflow.test.main",
"tensorflow.io.FixedLenFeature"
],
[
"tensorflow.maximum",
"tensorflow.minimum",
"tensorflow.cast",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.logical_and"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.summary.create_noop_writer",
"tensorflow.config.list_physical_devices",
"tensorflow.io.gfile.mkdir",
"tensorflow.nest.flatten",
"tensorflow.summary.scalar",
"tensorflow.keras.callbacks.CallbackList",
"tensorflow.io.gfile.GFile",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.keras.metrics.Mean",
"tensorflow.executing_eagerly",
"tensorflow.io.gfile.exists",
"tensorflow.train.Checkpoint",
"tensorflow.config.list_logical_devices",
"tensorflow.function",
"tensorflow.io.gfile.rmtree",
"tensorflow.GradientTape",
"tensorflow.train.latest_checkpoint",
"tensorflow.range",
"tensorflow.clip_by_global_norm",
"tensorflow.nest.map_structure"
],
[
"tensorflow.io.gfile.isdir",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.train.latest_checkpoint",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.train.Checkpoint",
"tensorflow.keras.regularizers.l2",
"tensorflow.reduce_sum",
"tensorflow.keras.losses.categorical_crossentropy",
"tensorflow.cast",
"tensorflow.keras.losses.sparse_categorical_crossentropy",
"tensorflow.keras.metrics.AUC",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.add_n",
"tensorflow.one_hot",
"tensorflow.keras.metrics.CategoricalAccuracy",
"tensorflow.distribute.get_strategy",
"tensorflow.keras.layers.InputSpec",
"tensorflow.GradientTape"
],
[
"tensorflow.test.main"
],
[
"tensorflow.train.Checkpoint",
"tensorflow.random.uniform",
"tensorflow.transpose",
"tensorflow.reduce_mean"
],
[
"tensorflow.keras.mixed_precision.set_global_policy",
"tensorflow.keras.Input",
"tensorflow.keras.initializers.RandomUniform",
"tensorflow.zeros",
"numpy.random.random_sample",
"tensorflow.test.main",
"tensorflow.keras.Model",
"numpy.ones",
"numpy.random.rand",
"tensorflow.keras.initializers.TruncatedNormal",
"numpy.random.randint"
],
[
"tensorflow.concat",
"numpy.sqrt",
"numpy.asarray",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.pad",
"tensorflow.keras.initializers.VarianceScaling",
"tensorflow.add_n",
"tensorflow.keras.Input",
"tensorflow.keras.initializers.TruncatedNormal",
"tensorflow.random_normal_initializer",
"numpy.log",
"tensorflow.keras.backend.image_data_format",
"tensorflow.identity",
"tensorflow.math.multiply",
"tensorflow.keras.layers.MaxPool3D",
"tensorflow.nn.relu",
"tensorflow.nn.softmax",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.constant_initializer"
],
[
"tensorflow.test.main"
],
[
"tensorflow.device",
"tensorflow.keras.optimizers.schedules.PolynomialDecay",
"tensorflow.random.get_global_generator",
"tensorflow.train.Checkpoint",
"tensorflow.io.gfile.GFile",
"tensorflow.tpu.experimental.initialize_tpu_system",
"tensorflow.distribute.cluster_resolver.TPUClusterResolver",
"tensorflow.distribute.TPUStrategy",
"tensorflow.gather",
"tensorflow.config.experimental_connect_to_cluster",
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.summary.scalar",
"tensorflow.keras.metrics.Mean",
"tensorflow.distribute.MirroredStrategy"
],
[
"tensorflow.keras.callbacks.TensorBoard"
],
[
"tensorflow.image.random_contrast",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.image.random_saturation",
"tensorflow.image.sample_distorted_bounding_box",
"tensorflow.nn.depthwise_conv2d",
"tensorflow.image.random_hue",
"tensorflow.squeeze",
"tensorflow.image.rgb_to_grayscale",
"tensorflow.name_scope",
"tensorflow.tile",
"tensorflow.image.random_brightness",
"tensorflow.unstack",
"tensorflow.shape",
"tensorflow.less",
"tensorflow.pow",
"tensorflow.random.uniform",
"tensorflow.clip_by_value",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.image.crop_to_bounding_box",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.image.resize"
],
[
"tensorflow.constant",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.test.main",
"tensorflow.repeat",
"tensorflow.math.floor"
],
[
"tensorflow.test.main",
"tensorflow.keras.Model",
"tensorflow.keras.initializers.TruncatedNormal",
"numpy.zeros",
"tensorflow.keras.layers.Input"
],
[
"tensorflow.device",
"tensorflow.keras.Input",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.test.main",
"numpy.ceil",
"tensorflow.GradientTape",
"tensorflow.random_normal_initializer",
"tensorflow.keras.optimizers.SGD"
],
[
"tensorflow.reshape",
"tensorflow.concat",
"tensorflow.shape"
],
[
"tensorflow.test.main"
],
[
"tensorflow.constant",
"tensorflow.image.random_flip_left_right",
"tensorflow.unstack",
"tensorflow.shape",
"tensorflow.slice",
"tensorflow.stack",
"tensorflow.io.decode_image",
"tensorflow.image.extract_jpeg_shape",
"tensorflow.image.decode_and_crop_jpeg",
"tensorflow.image.resize",
"tensorflow.name_scope",
"tensorflow.image.convert_image_dtype"
],
[
"tensorflow.keras.activations.gelu",
"tensorflow.keras.utils.register_keras_serializable"
],
[
"tensorflow.saved_model.save",
"tensorflow.io.gfile.glob",
"tensorflow.io.gfile.rmtree"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.4",
"2.5",
"2.6",
"2.7"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kouroshHakha/circuit-fewshot-code | [
"32007e119da30632736868a3f643027624bf08d2",
"32007e119da30632736868a3f643027624bf08d2",
"32007e119da30632736868a3f643027624bf08d2"
] | [
"configs/opamp/biased_pmos_gain/15-layer-ft-all-pool-0.5/config.py",
"configs/opamp/biased_pmos/15-layer-scratch-5/config.py",
"scripts/test_rdiv_topology_generalization.py"
] | [
"import time\n\nimport hashlib\nimport torch\nfrom torch_geometric.data import DataLoader\n\nfrom cgl.utils.params import ParamDict\nfrom cgl.data.graph_data import CircuitInMemDataset, CircuitGraphDataset\n\n# from cgl.models.gnn import DeepGENNet\n\ns = time.time()\nprint('Loading the dataset ...')\nroot = '/store/nosnap/results/ngspice_biased_pmos_gain/two_stage_biased_pmos'\ncir_dset = CircuitGraphDataset(root=root, mode='train', circuit_type='opamp_biased_pmos')\nnode_output_idx = next(iter(cir_dset.graph_nodes.values()))['V_net6']\nvout_idx = torch.where((torch.where(cir_dset[0].output_node_mask)[0] == node_output_idx))[0].item()\n\n# gain mean and variance\ngmean, gstd = -1.1057, 0.6559\n\ndef transform_fn(data):\n data.gain = (data.vac_mag[vout_idx, 0].float() - gmean) / gstd\n return data\n\ndset = CircuitInMemDataset(root=root, mode='train', transform=transform_fn)\nprint(f'Dataset was loaded in {time.time() - s:.6f} seconds.')\n\nsample_data = dset[0]\n\nfract = 0.05\nsplits = dset.splits\ntrain_idx = int(fract * len(splits['train']))\ntrain_dset = dset[splits['train'][:train_idx]]\nvalid_dset = dset[splits['valid']]\ntest_dset = dset[splits['test']]\n\nbackbone_config = 'configs/opamp/dc/deep_gen_net/15-layer/config.py'\nbb_id = hashlib.sha256(backbone_config.encode('utf-8')).hexdigest()[:6]\n\n\nlr = 1e-3\nactivation = 'relu'\nhidden_channels = 128\nnum_layers = 15\ntrain_batch_size = min(256, len(train_dset))\nvalid_batch_size = min(256, len(valid_dset)) \ntest_batch_size = min(256, len(test_dset)) \n\nexp_name = f'GAIN_PMOS_FT_Pool_{fract*10:.1f}_DeepGEN_h{hidden_channels}_nl{num_layers}_bs{train_batch_size}_lr{lr:.0e}_{activation}'\n\nmdl_config = ParamDict(\n exp_name=exp_name,\n num_nodes=sample_data.vdc.shape[0],\n in_channels=sample_data.x.shape[-1] + sample_data.type_tens.shape[-1],\n hidden_channels=hidden_channels,\n num_layers=num_layers,\n dropout=0,\n activation=activation,\n bins=50,\n lr=lr,\n freeze_backbone=False,\n use_pooling=True,\n output_label='gain',\n output_sigmoid=False,\n lr_warmup={'peak_lr': lr, 'weight_decay': 0, \n 'warmup_updates': 50, 'tot_updates': 20000, 'end_lr': 5e-5},\n)\n\ntrain_dloader = DataLoader(train_dset, batch_size=train_batch_size, shuffle=True, num_workers=0)\nvalid_dloader = DataLoader(valid_dset, batch_size=valid_batch_size, num_workers=0)\ntest_dloader = DataLoader(test_dset, batch_size=test_batch_size, num_workers=0)\n\n# .to converts the weight dtype to match input\n# model = DeepGENNet(mdl_config).to(sample_data.x.dtype)\n\n",
"import time\n\nimport torch\nfrom torch_geometric.data import DataLoader\n\nfrom cgl.utils.params import ParamDict\nfrom cgl.data.graph_data import CircuitInMemDataset, CircuitGraphDataset\n\nfrom cgl.models.gnn import DeepGENNet\n\ns = time.time()\nprint('Loading the dataset ...')\nroot = '/store/nosnap/results/ngspice_biased_pmos_gain/two_stage_biased_pmos'\ncir_dset = CircuitGraphDataset(root=root, mode='train', circuit_type='opamp_biased_pmos')\nnode_output_idx = next(iter(cir_dset.graph_nodes.values()))['V_net6']\nvout_idx = torch.where((torch.where(cir_dset[0].output_node_mask)[0] == node_output_idx))[0].item()\n\ndef transform_fn(data):\n data.gain = data.vac_mag[vout_idx, 0]\n return data\n\ndset = CircuitInMemDataset(root=root, mode='train', transform=transform_fn)\nprint(f'Dataset was loaded in {time.time() - s:.6f} seconds.')\n\n# gains = []\n# for idx in range(len(dset)):\n# dset[idx].gain = dset[idx].vac_mag[vout_idx, 0]\n\nsample_data = dset[0]\n\nfract = 0.5\nsplits = dset.splits\ntrain_idx = int(fract * len(splits['train']))\ntrain_dset = dset[splits['train'][:train_idx]]\nvalid_dset = dset[splits['valid']]\ntest_dset = dset[splits['test']]\n\n\nlr = 3e-4\nactivation = 'relu'\nhidden_channels = 128\nnum_layers = 15\ntrain_batch_size = min(256, len(train_dset))\nvalid_batch_size = min(256, len(valid_dset)) \ntest_batch_size = min(256, len(test_dset)) \n\nexp_name = f'BIASED_PMOS_Scratch_5_DeepGEN_h{hidden_channels}_nl{num_layers}_bs{train_batch_size}_lr{lr:.0e}_{activation}'\n\nmdl_config = ParamDict(\n exp_name=exp_name,\n num_nodes=sample_data.vdc.shape[0],\n in_channels=sample_data.x.shape[-1] + sample_data.type_tens.shape[-1],\n hidden_channels=hidden_channels,\n num_layers=num_layers,\n dropout=0,\n lr=lr,\n activation=activation,\n bins=200,\n freeze_backbone=False,\n # lr_warmup={'peak_lr': lr, 'weight_decay': 0, \n # 'warmup_updates': 100, 'tot_updates': 600, 'end_lr': 1e-4},\n lr_warmup={'warmup': 0, 'max_iters': 1000},\n output_labels={'vdc': 1},\n proj_n_layers=3,\n)\n\ntrain_dloader = DataLoader(train_dset, batch_size=train_batch_size, shuffle=True, num_workers=0)\nvalid_dloader = DataLoader(valid_dset, batch_size=valid_batch_size, num_workers=0)\ntest_dloader = DataLoader(test_dset, batch_size=test_batch_size, num_workers=0)\n\n# .to converts the weight dtype to match input\nmodel = DeepGENNet(mdl_config).to(sample_data.x.dtype)\n\n",
"\n\n# load the config file and the model\n# load weights on to the model\n# run validation error checker\n# run test on new topology\n# create new topology graphs (locally first and then in another repo)\n\n# Note: methodologically generate diverse topologies for rdiv\n\n\nfrom collections import defaultdict\nfrom pathlib import Path\nimport argparse\nimport imp\nimport numpy as np\nfrom pprint import pprint\nimport matplotlib.pyplot as plt\n\nfrom tqdm import tqdm \n\nimport torch\nfrom torch_geometric.data import DataLoader\nfrom torch_geometric.utils import to_networkx\n\nimport pytorch_lightning as pl\n\n# This should be imported since the checkpoint loader needs it to reconstruct the state_dict\nfrom scripts.pretrain import ModelCheckpointNoOverride\nfrom cgl.utils.pdb import register_pdb_hook\nfrom cgl.utils.params import ParamDict\n\nfrom rdiv.gen_test_data import get_dataset\nfrom cgl.eval.evaluator import NodeEvaluator\n\n\nregister_pdb_hook()\n\nclass Tester:\n\n def __init__(self, pargs) -> None:\n \n pl.seed_everything(10)\n self.pargs = pargs\n conf = self.get_conf(pargs)\n model = conf.model\n\n ckpt_path = pargs.ckpt\n\n # test_dset = get_dataset(1000)\n # tdloader = DataLoader(test_dset, batch_size=100, drop_last=False, num_workers=0)\n tdloader = None\n\n evaluator = NodeEvaluator(bins=conf.mdl_conf.bins)\n\n if not ckpt_path:\n raise ValueError('Checkpoint path is not given.')\n else:\n print(f'Loading the checkpoint {Path(ckpt_path).absolute()} ...')\n model = model.load_from_checkpoint(ckpt_path).to(device=model.device, dtype=model.dtype)\n print('Checkpoint Loaded.')\n # HACK\n model.config = conf.mdl_conf\n\n # loaders = dict(valid=conf.valid_dloader, test=tdloader)\n loaders = dict(train=conf.train_dloader, valid=conf.valid_dloader, test=conf.test_dloader)#, valid=conf.valid_dloader, test=tdloader)\n acc_dict = defaultdict(lambda: [])\n results_loader = {}\n for mode, loader in loaders.items():\n if loader is None:\n continue\n results_batch = ParamDict({'vdc_pred': [], 'vdc_target': [], 'graph_id': [], 'graph_list': []})\n for batch in tqdm(loader):\n device = torch.device(pargs.device)\n model = model.to(device)\n if isinstance(batch, dict):\n batch = ParamDict({k: v.to(device) for k, v in batch.items()})\n else:\n batch = batch.to(device)\n results = model.predict(batch, compute_loss=True)\n \n mask = results.input.data.output_node_mask\n batch_id = results.input.data.batch[mask]\n\n results_batch['graph_id'].append(batch_id.cpu().numpy() + len(results_batch['graph_list']))\n results_batch['vdc_pred'].append(results.output.vdc.cpu().numpy())\n results_batch['vdc_target'].append(results.input.data.vdc.cpu().numpy())\n\n results_batch['graph_list'] += results.input.data.to_data_list()\n acc_dict[mode].append(results['eval']['vdc_acc'])\n \n results_loader[mode] = ParamDict()\n for k, v in results_batch.items():\n if k not in ['graph_list']:\n results_loader[mode][k] = np.concatenate(v, axis=0)\n else:\n results_loader[mode][k] = v\n \n\n train_dict = {'y_true': results_loader['train'].vdc_target, 'y_pred': results_loader['train'].vdc_pred}\n labels = evaluator.eval(train_dict, return_cond=True).flatten()\n print(f'Train Label accuracy: {labels.sum() / labels.flatten().shape[0]}')\n \n for k in acc_dict:\n acc_dict[k] = np.mean(acc_dict[k])\n acc_dict = dict(acc_dict)\n\n # pprint(acc_dict)\n print(f'{acc_dict[\"train\"]:.4f}/{acc_dict[\"valid\"]:.4f}/{acc_dict[\"test\"]:.4f}')\n\n\n #################################### Plotting pre-training insights\n\n # ## Plot histogram of target and predicted values and their distribution over wrong data points\n # min_val, max_val = min(results_loader['train'].vdc_target), max(results_loader['train'].vdc_target)\n # min_val, max_val = float(min_val), float(max_val)\n \n # plt.close()\n # _, (ax1, ax2) = plt.subplots(2, 1, figsize=(15, 10))\n # ax1.hist(results_loader['train'].vdc_target.flatten(), range=[min_val, max_val], bins=conf.mdl_conf.bins)\n # ax1.set_title('Histogram of the ground truth for all data points')\n\n # ax2.hist(results_loader['train'].vdc_target[~labels].flatten(), range=[min_val, max_val], bins=conf.mdl_conf.bins)\n # ax2.hist(results_loader['train'].vdc_pred[~labels].flatten(), range=[min_val, max_val], bins=conf.mdl_conf.bins, color='orange', alpha=0.2)\n # ax2.set_title('Histogram of the ground truth for False data points')\n\n # plt.savefig('rladder_assets/train_hist_neg.png')\n # # plt.savefig('opamp_train_hist_neg.png') # testing the opamp results\n\n # ## Plot histogram of distance from ground trough for wrong data points\n # plt.close()\n\n # _, ax = plt.subplots(1, 1, figsize=(8, 6))\n # threshold = (max_val - min_val) / conf.mdl_conf.bins\n # norm_dist = (results_loader['train'].vdc_pred - results_loader['train'].vdc_target) / threshold\n\n # bins = conf.mdl_conf.bins\n # min_range, max_range = -bins/2, bins/2\n # ax.hist(norm_dist[~labels].flatten(), range=[min_range, max_range], bins=bins)\n # ax.set_title('Histogram of normalized distance for False data points')\n\n # plt.savefig('rladder_assets/train_hist_dist_neg.png')\n\n # ## For those graphs that have False predictions, what are the False nodes, what is the # nodes, and the percentage of False nodes?\n # graph_to_false_node_map = defaultdict(lambda: [])\n # for node_idx, graph_idx in enumerate(results_loader['train']['graph_id'][~labels]):\n # graph_to_false_node_map[graph_idx].append(node_idx)\n \n # n_branch_map = {x: 0 for x in range(11, 12)}\n # for graph_idx in graph_to_false_node_map:\n # graph_data = results_loader['train']['graph_list'][graph_idx]\n # # this relationship is true only for rladder\n # n_branch = (len(graph_data.vdc) - 1) // 2\n # n_branch_map[n_branch] += 1\n\n # plt.close()\n # plt.pie(list(n_branch_map.values()), labels=n_branch_map.keys())\n # plt.savefig('rladder_assets/train_false_graph_pie.png')\n\n\n #################################### Plotting Validation set insights\n\n # result_key = 'train'\n # for node_id in [0, 1, 2]:\n # vdc_target = results_loader[result_key].vdc_target[node_id::3]\n # vdc_pred = results_loader[result_key].vdc_pred[node_id::3]\n # valid_dict = {'y_true': vdc_target, 'y_pred': vdc_pred}\n # labels = evaluator.eval(valid_dict, return_cond=True).flatten()\n # acc = labels.sum() / labels.flatten().shape[0]\n # if node_id == 1:\n # print(f'[{node_id}] {result_key} Label accuracy: {acc}')\n # min_val, max_val = min(vdc_target), max(vdc_target)\n # min_val, max_val = float(min_val), float(max_val)\n\n # plt.close()\n # ax = plt.gca()\n # ax.scatter(vdc_target.flatten(), vdc_pred.flatten(), s=5)\n # ax.set_ylabel('Prediciton')\n # ax.set_xlabel('Target')\n # ax.plot(np.arange(0, 1, 0.01), np.arange(0, 1, 0.01), linestyle='--')\n # plt.savefig(f'rladder_assets/{result_key}_pred_vs_target.png')\n\n \n \n # plt.close()\n # _, (ax1, ax2) = plt.subplots(2, 1, figsize=(15, 10))\n # ax1.hist(vdc_target.flatten(), range=[min_val, max_val], bins=conf.mdl_conf.bins)\n # ax1.set_title('Histogram of the ground truth for all data points')\n\n # ax2.hist(vdc_target[~labels].flatten(), range=[min_val, max_val], bins=conf.mdl_conf.bins)\n # ax2.hist(vdc_pred[~labels].flatten(), range=[min_val, max_val], bins=conf.mdl_conf.bins, color='orange', alpha=0.2)\n # ax2.set_title('Histogram of the ground truth for False data points')\n\n # plt.savefig(f'rladder_assets/{result_key}_hist_neg_node_{node_id}.png')\n\n # ## Plot histogram of distance from ground trough for wrong data points\n # plt.close()\n\n # _, ax = plt.subplots(1, 1, figsize=(8, 6))\n # threshold = (max_val - min_val) / conf.mdl_conf.bins\n # norm_dist = (vdc_pred - vdc_target) / threshold\n\n # bins = conf.mdl_conf.bins\n # min_range, max_range = -bins/2, bins/2\n # ax.hist(norm_dist[~labels].flatten(), range=[min_range, max_range], bins=bins)\n # ax.set_title('Histogram of normalized distance for False data points')\n\n # plt.savefig(f'rladder_assets/{result_key}_hist_dist_neg_node_{node_id}.png')\n\n\n \n def get_conf(self, pargs):\n conf_path = pargs.path\n print(f'Loading from the config file {conf_path}')\n conf_module = imp.load_source('conf', conf_path)\n\n conf = ParamDict(\n exp_name=conf_module.exp_name, \n model=conf_module.model,\n train_dloader=conf_module.train_dloader,\n valid_dloader=conf_module.valid_dloader,\n test_dloader=conf_module.test_dloader,\n mdl_conf=conf_module.mdl_config,\n )\n\n return conf\n\ndef parse_args():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--path', type=str, help='config file path')\n parser.add_argument('--device', type=str, default='cuda')\n # parser.add_argument('--output', type=str, default='/store/nosnap/results/cgl', \n # help='The output directory')\n # parser.add_argument('--max_epochs', type=int, default=None, \n # help='The maximum number of training epochs (if earlier than max_steps)')\n # parser.add_argument('--max_steps', type=int, default=None, \n # help='The maximum number of training steps (if earlier than max_epochs)')\n parser.add_argument('--ckpt', type=str, help='Resume from this checkpoint if valid.')\n # parser.add_argument('--seed', type=int, default=0, help='Random seed')\n # parser.add_argument('--project', type=str, default='cgl-PT', help='project name for wandb')\n # parser.add_argument('--wandb_id', type=str, help='Wandb id to allow for resuming')\n # parser.add_argument('--run_name', type=str, default=None, help='Wandb run name, if not provided the deafult of wandb is used')\n # parser.add_argument('--profile', type=int, default=0, help='Set to 1 to profile individual steps during training')\n # parser.add_argument('--test', type=int, default=1, help='Set to 0 to skip running the test scripts')\n # parser.add_argument('--log_freq', type=int, default=10, help='Wandb log every n steps')\n # parser.add_argument('--ckpt_intervel_steps', type=int, default=100, \n # help='The frequency in terms of steps, at which checkpoint callback run at')\n # parser.add_argument('--val_intervel_steps', type=int, default=100, \n # help='The frequency in terms of steps, at which validaiton loop run at')\n \n return parser.parse_args()\n\n\nif __name__ == '__main__':\n Tester(parse_args())"
] | [
[
"torch.where"
],
[
"torch.where"
],
[
"torch.device",
"numpy.mean",
"numpy.concatenate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Zamwell/pandapower | [
"ce51946342109e969b87b60c8883d7eec02d3060",
"630e3278ca012535f78282ae73f1b86f3fe932fc",
"630e3278ca012535f78282ae73f1b86f3fe932fc"
] | [
"pandapower/plotting/plotly/traces.py",
"pandapower/pypower/pqcost.py",
"pandapower/pypower/polycost.py"
] | [
"# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\n\nimport math\n\nimport numpy as np\nimport pandas as pd\nfrom packaging import version\nfrom collections.abc import Iterable\n\nfrom pandapower.plotting.plotly.get_colors import get_plotly_color, get_plotly_cmap\nfrom pandapower.plotting.plotly.mapbox_plot import _on_map_test, _get_mapbox_token, MapboxTokenMissing\n\ntry:\n import pplog as logging\nexcept ImportError:\n import logging\nlogger = logging.getLogger(__name__)\n\ntry:\n from plotly import __version__ as plotly_version\n from plotly.graph_objs.scatter.marker import ColorBar\n from plotly.graph_objs import Figure, Layout\n from plotly.graph_objs.layout import XAxis, YAxis\n from plotly.graph_objs.scatter import Line, Marker\n from plotly.graph_objs.scattermapbox import Line as scmLine\n from plotly.graph_objs.scattermapbox import Marker as scmMarker\nexcept ImportError:\n logger.info(\"Failed to import plotly - interactive plotting will not be available\")\n\n\ndef version_check():\n if version.parse(plotly_version) < version.parse(\"3.1.1\"):\n raise UserWarning(\"Your plotly version {} is no longer supported.\\r\\n\"\n \"Please upgrade your python-plotly installation, \"\n \"e.g., via pip install --upgrade plotly\".format(__version__))\n\n\ndef _in_ipynb():\n \"\"\"\n an auxiliary function which checks if plot is called from a jupyter-notebook or not\n \"\"\"\n import __main__ as main\n return not hasattr(main, '__file__')\n\n\ndef sum_line_length(pts):\n pt_diff = lambda p: (p[0][0] - p[1][0], p[0][1] - p[1][1])\n diffs = map(pt_diff, zip(pts[:-1], pts[1:]))\n line_length = sum(math.hypot(d1, d2) for d1, d2 in diffs)\n return line_length\n\n\ndef get_line_neutral(coord):\n if len(coord) == 1:\n return coord[0]\n half_length = sum_line_length(coord) / 2.0\n length = 0.0\n ind = 0\n while length < half_length:\n ind += 1\n length = sum_line_length(coord[:ind])\n\n start_coord = coord[ind - 2]\n end_coord = coord[ind - 1]\n mid = [(a1 + a2) / 2.0 for a1, a2 in zip(start_coord, end_coord)]\n\n return mid\n\n\ndef create_edge_center_trace(line_trace, size=1, patch_type=\"circle\", color=\"white\", infofunc=None,\n trace_name='edge_center', use_line_geodata=False):\n \"\"\"\n Creates a plotly trace of pandapower buses.\n\n INPUT:\n **line traces** (from pandapowerNet) - The already generated line traces with center geodata\n\n OPTIONAL:\n\n **size** (int, 5) - patch size\n\n **patch_type** (str, \"circle\") - patch type, can be\n\n - \"circle\" for a circle\n - \"square\" for a rectangle\n - \"diamond\" for a diamond\n - much more pathc types at https://plot.ly/python/reference/#scatter-marker\n\n **infofunc** (pd.Series, None) - hoverinfo for each trace element. Indices should correspond to the pandapower element indices\n\n **trace_name** (String, \"buses\") - name of the trace which will appear in the legend\n\n **color** (String, \"blue\") - color of buses in the trace\n\n \"\"\"\n # color = get_plotly_color(color)\n\n center_trace = dict(type='scatter', text=[], mode='markers', hoverinfo='text', name=trace_name,\n marker=dict(color=color, size=size, symbol=patch_type))\n\n if not use_line_geodata:\n center_trace['x'], center_trace['y'] = (line_trace[0][\"x\"][1::4], line_trace[0][\"y\"][1::4])\n else:\n x, y = [], []\n for trace in line_trace:\n coord = list(zip(trace[\"x\"], trace[\"y\"]))\n mid_coord = get_line_neutral(coord)\n x.append(mid_coord[0])\n y.append(mid_coord[1])\n\n center_trace['x'], center_trace['y'] = (x, y)\n\n center_trace['text'] = infofunc\n\n return center_trace\n\n\ndef create_bus_trace(net, buses=None, size=5, patch_type=\"circle\", color=\"blue\", infofunc=None,\n trace_name='buses', legendgroup=None, cmap=None, cmap_vals=None,\n cbar_title=None, cmin=None, cmax=None, cpos=1.0, colormap_column=\"vm_pu\"):\n \"\"\"\n Creates a plotly trace of pandapower buses.\n\n INPUT:\n **net** (pandapowerNet) - The pandapower network\n\n OPTIONAL:\n **buses** (list, None) - The buses for which the collections are created.\n If None, all buses in the network are considered.\n\n **size** (int, 5) - patch size\n\n **patch_type** (str, \"circle\") - patch type, can be\n\n - \"circle\" for a circle\n - \"square\" for a rectangle\n - \"diamond\" for a diamond\n - much more pathc types at https://plot.ly/python/reference/#scatter-marker\n\n **infofunc** (pd.Series, None) - hoverinfo for bus elements. Indices should correspond to the pandapower element indices\n\n **trace_name** (String, \"buses\") - name of the trace which will appear in the legend\n\n **color** (String, \"blue\") - color of buses in the trace\n\n **cmap** (String, None) - name of a colormap which exists within plotly (Greys, YlGnBu, Greens, YlOrRd,\n Bluered, RdBu, Reds, Blues, Picnic, Rainbow, Portland, Jet, Hot, Blackbody, Earth, Electric, Viridis)\n alternatively a custom discrete colormap can be used\n\n **cmap_vals** (list, None) - values used for coloring using colormap\n\n **cbar_title** (String, None) - title for the colorbar\n\n **cmin** (float, None) - colorbar range minimum\n\n **cmax** (float, None) - colorbar range maximum\n\n **cpos** (float, 1.1) - position of the colorbar\n\n **colormap_column** (str, \"vm_pu\") - set color of bus according to this variable\n\n \"\"\"\n color = get_plotly_color(color)\n\n bus_trace = dict(type='scatter', text=[], mode='markers', hoverinfo='text', name=trace_name,\n marker=dict(color=color, size=size, symbol=patch_type))\n\n buses = net.bus.index.tolist() if buses is None else list(buses)\n bus_plot_index = [b for b in buses if b in list(set(buses) & set(net.bus_geodata.index))]\n\n bus_trace['x'], bus_trace['y'] = (net.bus_geodata.loc[bus_plot_index, 'x'].tolist(),\n net.bus_geodata.loc[bus_plot_index, 'y'].tolist())\n\n if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(buses):\n infofunc = pd.Series(index=buses, data=infofunc)\n\n bus_trace['text'] = net.bus.loc[bus_plot_index, 'name'] if infofunc is None else infofunc.loc[buses]\n\n if legendgroup:\n bus_trace['legendgroup'] = legendgroup\n\n # if color map is set\n if cmap is not None:\n # TODO introduce discrete colormaps (see contour plots in plotly)\n # if cmap_vals are not given\n\n cmap = 'Jet' if cmap is True else cmap\n\n if cmap_vals is not None:\n cmap_vals = cmap_vals\n else:\n if net.res_line.shape[0] == 0:\n logger.error(\"There are no power flow results for buses voltage magnitudes which are default for bus \"\n \"colormap coloring...\"\n \"set cmap_vals input argument if you want colormap according to some specific values...\")\n cmap_vals = net.res_bus.loc[bus_plot_index, colormap_column].values\n\n cmap_vals = net.res_bus.loc[bus_plot_index, colormap_column] if cmap_vals is None else cmap_vals\n\n cmin = cmin if cmin else cmap_vals.min()\n cmax = cmax if cmax else cmap_vals.max()\n\n bus_trace['marker'] = Marker(size=size,\n color=cmap_vals, cmin=cmin, cmax=cmax,\n colorscale=cmap,\n colorbar=ColorBar(thickness=10,\n x=cpos),\n symbol=patch_type\n )\n\n if cbar_title:\n bus_trace['marker']['colorbar']['title'] = cbar_title\n\n bus_trace['marker']['colorbar']['title']['side'] = 'right'\n\n return [bus_trace]\n\n\ndef _get_line_geodata_plotly(net, lines, use_line_geodata):\n xs = []\n ys = []\n if use_line_geodata:\n for line_ind, _ in lines.iterrows():\n line_coords = net.line_geodata.loc[line_ind, 'coords']\n linex, liney = list(zip(*line_coords))\n xs += linex\n xs += [None]\n ys += liney\n ys += [None]\n else:\n # getting x and y values from bus_geodata for from and to side of each line\n\n from_bus = net.bus_geodata.loc[lines.from_bus, 'x'].tolist()\n to_bus = net.bus_geodata.loc[lines.to_bus, 'x'].tolist()\n # center point added because of the hovertool\n center = (np.array(from_bus) + np.array(to_bus)) / 2\n none_list = [None] * len(from_bus)\n xs = np.array([from_bus, center, to_bus, none_list]).T.flatten().tolist()\n\n from_bus = net.bus_geodata.loc[lines.from_bus, 'y'].tolist()\n to_bus = net.bus_geodata.loc[lines.to_bus, 'y'].tolist()\n # center point added because of the hovertool\n center = (np.array(from_bus) + np.array(to_bus)) / 2\n none_list = [None] * len(from_bus)\n ys = np.array([from_bus, center, to_bus, none_list]).T.flatten().tolist()\n\n # [:-1] is because the trace will not appear on maps if None is at the end\n return xs[:-1], ys[:-1]\n\n\ndef create_line_trace(net, lines=None, use_line_geodata=True, respect_switches=False, width=1.0,\n color='grey', infofunc=None, trace_name='lines', legendgroup=None,\n cmap=None, cbar_title=None, show_colorbar=True, cmap_vals=None, cmin=None,\n cmax=None, cpos=1.1):\n \"\"\"\n Creates a plotly trace of pandapower lines.\n\n INPUT:\n **net** (pandapowerNet) - The pandapower network\n\n OPTIONAL:\n **lines** (list, None) - The lines for which the collections are created.\n If None, all lines in the network are considered.\n\n **width** (int, 1) - line width\n\n **respect_switches** (bool, False) - flag for consideration of disconnected lines\n\n **infofunc** (pd.Series, None) - hoverinfo for line elements. Indices should correspond to the pandapower element indices\n\n **trace_name** (String, \"lines\") - name of the trace which will appear in the legend\n\n **color** (String, \"grey\") - color of lines in the trace\n\n **legendgroup** (String, None) - defines groups of layers that will be displayed in a legend\n e.g. groups according to voltage level (as used in `vlevel_plotly`)\n\n **cmap** (String, None) - name of a colormap which exists within plotly if set to True default `Jet`\n colormap is used, alternative colormaps : Greys, YlGnBu, Greens, YlOrRd,\n Bluered, RdBu, Reds, Blues, Picnic, Rainbow, Portland, Jet, Hot, Blackbody, Earth, Electric, Viridis\n\n **cmap_vals** (list, None) - values used for coloring using colormap\n\n **show_colorbar** (bool, False) - flag for showing or not corresponding colorbar\n\n **cbar_title** (String, None) - title for the colorbar\n\n **cmin** (float, None) - colorbar range minimum\n\n **cmax** (float, None) - colorbar range maximum\n\n **cpos** (float, 1.1) - position of the colorbar\n\n \"\"\"\n\n color = get_plotly_color(color)\n\n # defining lines to be plot\n lines = net.line.index.tolist() if lines is None else list(lines)\n if len(lines) == 0:\n return []\n\n if infofunc is not None:\n if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(lines):\n infofunc = pd.Series(index=lines, data=infofunc)\n if len(infofunc) != len(lines) and len(infofunc) != len(net.line):\n raise UserWarning(\"Different amount of hover info than lines to plot\")\n assert isinstance(infofunc, pd.Series), \\\n \"infofunc should be a pandas series with the net.line.index to the infofunc contents\"\n\n no_go_lines = set()\n if respect_switches:\n no_go_lines = set(lines) & set(net.switch.element[(net.switch.et == \"l\") & (net.switch.closed == 0)])\n\n lines_to_plot = net.line.loc[set(net.line.index) & (set(lines) - no_go_lines)]\n no_go_lines_to_plot = None\n use_line_geodata = use_line_geodata if net.line_geodata.shape[0] > 0 else False\n\n if use_line_geodata:\n lines_to_plot = lines_to_plot.loc[set(lines_to_plot.index) & set(net.line_geodata.index)]\n else:\n lines_with_geodata = lines_to_plot.from_bus.isin(net.bus_geodata.index) & \\\n lines_to_plot.to_bus.isin(net.bus_geodata.index)\n lines_to_plot = lines_to_plot.loc[lines_with_geodata]\n\n cmap_lines = None\n if cmap is not None:\n # workaround: if colormap plot is used, each line need to be separate scatter object because\n # plotly still doesn't support appropriately colormap for line objects\n # TODO correct this when plotly solves existing github issue about Line colorbar\n\n cmap = 'jet' if cmap is True else cmap\n\n if cmap_vals is not None:\n if not isinstance(cmap_vals, np.ndarray):\n cmap_vals = np.asarray(cmap_vals)\n else:\n if net.res_line.shape[0] == 0:\n logger.error(\"There are no power flow results for lines which are default for line colormap coloring...\"\n \"set cmap_vals input argument if you want colormap according to some specific values...\")\n cmap_vals = net.res_line.loc[lines_to_plot.index, 'loading_percent'].values\n\n cmap_lines = get_plotly_cmap(cmap_vals, cmap_name=cmap, cmin=cmin, cmax=cmax)\n if len(cmap_lines) == len(net.line):\n # some lines are not plotted although cmap_value were provided for all lines\n line_idx_map = dict(zip(net.line.loc[lines].index.tolist(), range(len(lines))))\n cmap_lines = [cmap_lines[line_idx_map[idx]] for idx in lines_to_plot.index]\n else:\n assert len(cmap_lines) == len(lines_to_plot), \\\n \"Different amounts of cmap values and lines to plot were supplied\"\n\n line_traces = []\n for col_i, (idx, line) in enumerate(lines_to_plot.iterrows()):\n line_color = color\n line_info = line['name']\n if cmap is not None:\n try:\n line_color = cmap_lines[col_i]\n line_info = line['name'] if infofunc is None else infofunc.loc[idx]\n except IndexError:\n logger.warning(\"No color and info for line {:d} (name: {}) available\".format(idx, line['name']))\n\n line_trace = dict(type='scatter', text=[], hoverinfo='text', mode='lines', name=trace_name,\n line=Line(width=width, color=color))\n\n line_trace['x'], line_trace['y'] = _get_line_geodata_plotly(net, lines_to_plot.loc[idx:idx], use_line_geodata)\n\n line_trace['line']['color'] = line_color\n\n line_trace['text'] = line_info\n\n line_traces.append(line_trace)\n\n if show_colorbar and cmap is not None:\n\n cmin = cmin if cmin else cmap_vals.min()\n cmax = cmax if cmax else cmap_vals.max()\n try:\n # TODO for custom colormaps\n cbar_cmap_name = 'Jet' if cmap is 'jet' else cmap\n # workaround to get colorbar for lines (an unvisible node is added)\n lines_cbar = dict(type='scatter', x=[net.bus_geodata.x[0]], y=[net.bus_geodata.y[0]], mode='markers',\n marker=Marker(size=0, cmin=cmin, cmax=cmax,\n color='rgb(255,255,255)',\n colorscale=cbar_cmap_name,\n colorbar=ColorBar(thickness=10,\n x=cpos),\n ))\n if cbar_title:\n lines_cbar['marker']['colorbar']['title'] = cbar_title\n\n lines_cbar['marker']['colorbar']['title']['side'] = 'right'\n\n line_traces.append(lines_cbar)\n except:\n pass\n\n if len(no_go_lines) > 0:\n no_go_lines_to_plot = net.line.loc[no_go_lines]\n for idx, line in no_go_lines_to_plot.iterrows():\n line_color = color\n line_trace = dict(type='scatter',\n text=[], hoverinfo='text', mode='lines', name='disconnected lines',\n line=Line(width=width / 2, color='grey', dash='dot'))\n\n line_trace['x'], line_trace['y'] = _get_line_geodata_plotly(net, no_go_lines_to_plot.loc[idx:idx], use_line_geodata)\n\n line_trace['line']['color'] = line_color\n try:\n line_trace['text'] = infofunc.loc[idx]\n except (KeyError, IndexError):\n line_trace[\"text\"] = line['name']\n\n line_traces.append(line_trace)\n\n if legendgroup:\n line_trace['legendgroup'] = legendgroup\n\n # sort infofunc so that it is the correct order lines_to_plot + no_go_lines_to_plot\n if infofunc is not None:\n if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(net.line):\n infofunc = pd.Series(index=net.line.index, data=infofunc)\n assert isinstance(infofunc, pd.Series), \\\n \"infofunc should be a pandas series with the net.line.index to the infofunc contents\"\n sorted_idx = lines_to_plot.index.tolist()\n if no_go_lines_to_plot is not None:\n sorted_idx += no_go_lines_to_plot.index.tolist()\n infofunc = infofunc.loc[sorted_idx]\n\n center_trace = create_edge_center_trace(line_traces, color=color, infofunc=infofunc,\n use_line_geodata=use_line_geodata)\n line_traces.append(center_trace)\n return line_traces\n\n\ndef create_trafo_trace(net, trafos=None, color='green', width=5, infofunc=None, cmap=None,\n trace_name='trafos', cmin=None, cmax=None, cmap_vals=None, use_line_geodata=None):\n \"\"\"\n Creates a plotly trace of pandapower trafos.\n\n INPUT:\n **net** (pandapowerNet) - The pandapower network\n\n OPTIONAL:\n **trafos** (list, None) - The trafos for which the collections are created.\n If None, all trafos in the network are considered.\n\n **width** (int, 5) - line width\n\n **infofunc** (pd.Series, None) - hoverinfo for trafo elements. Indices should correspond to the pandapower element indices\n\n **trace_name** (String, \"lines\") - name of the trace which will appear in the legend\n\n **color** (String, \"green\") - color of lines in the trace\n\n **cmap** (bool, False) - name of a colormap which exists within plotly (Greys, YlGnBu, Greens, YlOrRd,\n Bluered, RdBu, Reds, Blues, Picnic, Rainbow, Portland, Jet, Hot, Blackbody, Earth, Electric, Viridis)\n\n **cmap_vals** (list, None) - values used for coloring using colormap\n\n **cbar_title** (String, None) - title for the colorbar\n\n **cmin** (float, None) - colorbar range minimum\n\n **cmax** (float, None) - colorbar range maximum\n\n\n \"\"\"\n color = get_plotly_color(color)\n\n # defining lines to be plot\n trafos = net.trafo.index.tolist() if trafos is None else list(trafos)\n if len(trafos) == 0:\n return []\n\n trafo_buses_with_geodata = net.trafo.hv_bus.isin(net.bus_geodata.index) & \\\n net.trafo.lv_bus.isin(net.bus_geodata.index)\n\n trafos_mask = net.trafo.index.isin(trafos)\n trafos_to_plot = net.trafo[trafo_buses_with_geodata & trafos_mask]\n\n if infofunc is not None:\n if not isinstance(infofunc, pd.Series) and isinstance(infofunc, Iterable) and len(infofunc) == len(trafos):\n infofunc = pd.Series(index=trafos, data=infofunc)\n assert isinstance(infofunc, pd.Series), \\\n \"infofunc should be a pandas series with the net.trafo.index to the infofunc contents\"\n infofunc = infofunc.loc[trafos_to_plot.index]\n\n cmap_colors = []\n if cmap is not None:\n cmap = 'jet' if cmap is None else cmap\n\n cmin = 0 if cmin is None else cmin\n cmax = 100 if cmin is None else cmax\n\n if cmap_vals is not None:\n cmap_vals = cmap_vals\n else:\n if net.res_trafo.shape[0] == 0:\n logger.error(\"There are no power flow results for lines which are default for line colormap coloring...\"\n \"set cmap_vals input argument if you want colormap according to some specific values...\")\n cmap_vals = net.res_trafo.loc[trafos_to_plot.index, 'loading_percent'].values\n\n cmap_colors = get_plotly_cmap(cmap_vals, cmap_name=cmap, cmin=cmin, cmax=cmax)\n\n trafo_traces = []\n for col_i, (idx, trafo) in enumerate(trafos_to_plot.iterrows()):\n if cmap is not None:\n color = cmap_colors[col_i]\n\n trafo_trace = dict(type='scatter', text=[], line=Line(width=width, color=color),\n hoverinfo='text', mode='lines', name=trace_name)\n\n trafo_trace['text'] = trafo['name'] if infofunc is None else infofunc.loc[idx]\n\n from_bus = net.bus_geodata.loc[trafo.hv_bus, 'x']\n to_bus = net.bus_geodata.loc[trafo.lv_bus, 'x']\n trafo_trace['x'] = [from_bus, (from_bus + to_bus) / 2, to_bus]\n\n from_bus = net.bus_geodata.loc[trafo.hv_bus, 'y']\n to_bus = net.bus_geodata.loc[trafo.lv_bus, 'y']\n trafo_trace['y'] = [from_bus, (from_bus + to_bus) / 2, to_bus]\n\n trafo_traces.append(trafo_trace)\n\n center_trace = create_edge_center_trace(trafo_traces, color=color, infofunc=infofunc,\n use_line_geodata=use_line_geodata)\n trafo_traces.append(center_trace)\n return trafo_traces\n\n\ndef draw_traces(traces, on_map=False, map_style='basic', showlegend=True, figsize=1,\n aspectratio='auto', filename=\"temp-plot.html\"):\n \"\"\"\n plots all the traces (which can be created using :func:`create_bus_trace`, :func:`create_line_trace`,\n :func:`create_trafo_trace`)\n to PLOTLY (see https://plot.ly/python/)\n\n INPUT:\n **traces** - list of dicts which correspond to plotly traces\n generated using: `create_bus_trace`, `create_line_trace`, `create_trafo_trace`\n\n OPTIONAL:\n **on_map** (bool, False) - enables using mapbox plot in plotly\n\n **map_style** (str, 'basic') - enables using mapbox plot in plotly\n\n - 'streets'\n - 'bright'\n - 'light'\n - 'dark'\n - 'satellite'\n\n **showlegend** (bool, 'True') - enables legend display\n\n **figsize** (float, 1) - aspectratio is multiplied by it in order to get final image size\n\n **aspectratio** (tuple, 'auto') - when 'auto' it preserves original aspect ratio of the network geodata\n any custom aspectration can be given as a tuple, e.g. (1.2, 1)\n\n **filename** (str, \"temp-plot.html\") - plots to a html file called filename\n\n \"\"\"\n\n if on_map:\n try:\n on_map = _on_map_test(traces[0]['x'][0], traces[0]['y'][0])\n except:\n logger.warning(\"Test if geo-data are in lat/long cannot be performed using geopy -> \"\n \"eventual plot errors are possible.\")\n\n if on_map is False:\n logger.warning(\"Existing geodata are not real lat/lon geographical coordinates. -> \"\n \"plot on maps is not possible.\\n\"\n \"Use geo_data_to_latlong(net, projection) to transform geodata from specific projection.\")\n\n if on_map:\n # change traces for mapbox\n # change trace_type to scattermapbox and rename x to lat and y to lon\n for trace in traces:\n trace['lat'] = trace.pop('x')\n trace['lon'] = trace.pop('y')\n trace['type'] = 'scattermapbox'\n if \"line\" in trace and isinstance(trace[\"line\"], Line):\n # scattermapboxplot lines do not support dash for some reason, make it a red line instead\n if \"dash\" in trace[\"line\"]._props:\n _prps = dict(trace[\"line\"]._props)\n _prps.pop(\"dash\", None)\n _prps[\"color\"] = \"red\"\n trace[\"line\"] = scmLine(_prps)\n else:\n trace[\"line\"] = scmLine(dict(trace[\"line\"]._props))\n elif \"marker\" in trace and isinstance(trace[\"marker\"], Marker):\n trace[\"marker\"] = scmMarker(trace[\"marker\"]._props)\n\n # setting Figure object\n fig = Figure(data=traces, # edge_trace\n layout=Layout(\n titlefont=dict(size=16),\n showlegend=showlegend,\n autosize=True if aspectratio is 'auto' else False,\n hovermode='closest',\n margin=dict(b=5, l=5, r=5, t=5),\n # annotations=[dict(\n # text=\"\",\n # showarrow=False,\n # xref=\"paper\", yref=\"paper\",\n # x=0.005, y=-0.002)],\n xaxis=XAxis(showgrid=False, zeroline=False, showticklabels=False),\n yaxis=YAxis(showgrid=False, zeroline=False, showticklabels=False),\n # legend=dict(x=0, y=1.0)\n ), )\n\n # check if geodata are real geographycal lat/lon coordinates using geopy\n\n if on_map:\n try:\n mapbox_access_token = _get_mapbox_token()\n except Exception:\n logger.exception('mapbox token required for map plots. '\n 'Get Mapbox token by signing in to https://www.mapbox.com/.\\n'\n 'After getting a token, set it to pandapower using:\\n'\n 'pandapower.plotting.plotly.mapbox_plot.set_mapbox_token(\\'<token>\\')')\n raise MapboxTokenMissing\n\n fig['layout']['mapbox'] = dict(accesstoken=mapbox_access_token,\n bearing=0,\n center=dict(lat=pd.Series(traces[0]['lat']).dropna().mean(),\n lon=pd.Series(traces[0]['lon']).dropna().mean()),\n style=map_style,\n pitch=0,\n zoom=11)\n\n # default aspectratio: if on_map use auto, else use 'original'\n aspectratio = 'original' if not on_map and aspectratio is 'auto' else aspectratio\n\n if aspectratio is not 'auto':\n if aspectratio is 'original':\n # TODO improve this workaround for getting original aspectratio\n xs = []\n ys = []\n for trace in traces:\n xs += trace['x']\n ys += trace['y']\n x_dropna = pd.Series(xs).dropna()\n y_dropna = pd.Series(ys).dropna()\n xrange = x_dropna.max() - x_dropna.min()\n yrange = y_dropna.max() - y_dropna.min()\n ratio = xrange / yrange\n if ratio < 1:\n aspectratio = (ratio, 1.)\n else:\n aspectratio = (1., 1 / ratio)\n\n aspectratio = np.array(aspectratio) / max(aspectratio)\n fig['layout']['width'], fig['layout']['height'] = ([ar * figsize * 700 for ar in aspectratio])\n\n # check if called from ipynb or not in order to consider appropriate plot function\n if _in_ipynb():\n from plotly.offline import init_notebook_mode, iplot as plot\n init_notebook_mode()\n else:\n from plotly.offline import plot as plot\n\n plot(fig, filename=filename)\n",
"# Copyright (c) 1996-2015 PSERC. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\n\n\"\"\"Splits the gencost variable into two pieces if costs are given for Qg.\n\"\"\"\n\nfrom sys import stderr\n\nfrom numpy import array, arange\n\n\ndef pqcost(gencost, ng, on=None):\n \"\"\"Splits the gencost variable into two pieces if costs are given for Qg.\n\n Checks whether C{gencost} has cost information for reactive power\n generation (rows C{ng+1} to C{2*ng}). If so, it returns the first C{ng}\n rows in C{pcost} and the last C{ng} rows in C{qcost}. Otherwise, leaves\n C{qcost} empty. Also does some error checking.\n If C{on} is specified (list of indices of generators which are on line)\n it only returns the rows corresponding to these generators.\n\n @author: Ray Zimmerman (PSERC Cornell)\n \"\"\"\n if on is None:\n on = arange(ng)\n\n if gencost.shape[0] == ng:\n pcost = gencost[on, :]\n qcost = array([])\n elif gencost.shape[0] == 2 * ng:\n pcost = gencost[on, :]\n qcost = gencost[on + ng, :]\n else:\n stderr.write('pqcost: gencost has wrong number of rows\\n')\n\n return pcost, qcost\n",
"# Copyright (c) 1996-2015 PSERC. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\n\n\"\"\"Evaluates polynomial generator cost & derivatives.\n\"\"\"\n\nimport sys\n\nfrom numpy import zeros, arange, flatnonzero as find\n\nfrom pandapower.pypower.idx_cost import MODEL, NCOST, PW_LINEAR, COST\n\n\ndef polycost(gencost, Pg, der=0):\n \"\"\"Evaluates polynomial generator cost & derivatives.\n\n C{f = polycost(gencost, Pg)} returns the vector of costs evaluated at C{Pg}\n\n C{df = polycost(gencost, Pg, 1)} returns the vector of first derivatives\n of costs evaluated at C{Pg}\n\n C{d2f = polycost(gencost, Pg, 2)} returns the vector of second derivatives\n of costs evaluated at C{Pg}\n\n C{gencost} must contain only polynomial costs\n C{Pg} is in MW, not p.u. (works for C{Qg} too)\n\n @author: Ray Zimmerman (PSERC Cornell)\n \"\"\"\n if any(gencost[:, MODEL] == PW_LINEAR):\n sys.stderr.write('polycost: all costs must be polynomial\\n')\n\n ng = len(Pg)\n maxN = max( gencost[:, NCOST].astype(int) )\n minN = min( gencost[:, NCOST].astype(int) )\n\n ## form coefficient matrix where 1st column is constant term, 2nd linear, etc.\n c = zeros((ng, maxN))\n for n in arange(minN, maxN + 1):\n k = find(gencost[:, NCOST] == n) ## cost with n coefficients\n c[k, :n] = gencost[k, (COST + n - 1):COST - 1:-1]\n\n ## do derivatives\n for d in range(1, der + 1):\n if c.shape[1] >= 2:\n c = c[:, 1:maxN - d + 1]\n else:\n c = zeros((ng, 1))\n break\n\n for k in range(2, maxN - d + 1):\n c[:, k-1] = c[:, k-1] * k\n\n ## evaluate polynomial\n if len(c) == 0:\n f = zeros(Pg.shape)\n else:\n f = c[:, :1].flatten() ## constant term\n for k in range(1, c.shape[1]):\n f = f + c[:, k] * Pg**k\n\n return f\n"
] | [
[
"numpy.asarray",
"numpy.array",
"pandas.Series"
],
[
"numpy.arange",
"numpy.array"
],
[
"numpy.arange",
"numpy.zeros",
"numpy.flatnonzero"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stefan-de/lifelines | [
"519bd3abe6051bd9fb5da0dfffce24ab86171f3f"
] | [
"lifelines/tests/utils/test_utils.py"
] | [
"# -*- coding: utf-8 -*-\n\n\nimport pytest\nimport os\nimport numpy as np\nimport pandas as pd\nfrom pandas.testing import assert_frame_equal, assert_series_equal\nimport numpy.testing as npt\nfrom numpy.linalg import norm, lstsq\nfrom numpy.random import randn\nfrom flaky import flaky\n\nfrom lifelines import CoxPHFitter, WeibullAFTFitter, KaplanMeierFitter, ExponentialFitter\nfrom lifelines.datasets import load_regression_dataset, load_larynx, load_waltons, load_rossi\nfrom lifelines import utils\nfrom lifelines import exceptions\nfrom lifelines.utils.sklearn_adapter import sklearn_adapter\nfrom lifelines.utils.safe_exp import safe_exp\n\n\ndef test_format_p_values():\n assert utils.format_p_value(2)(0.004) == \"<0.005\"\n assert utils.format_p_value(3)(0.004) == \"0.004\"\n\n assert utils.format_p_value(3)(0.000) == \"<0.0005\"\n assert utils.format_p_value(3)(0.005) == \"0.005\"\n assert utils.format_p_value(3)(0.2111) == \"0.211\"\n assert utils.format_p_value(3)(0.2119) == \"0.212\"\n\n\ndef test_ridge_regression_with_penalty_is_less_than_without_penalty():\n X = randn(2, 2)\n Y = randn(2)\n assert norm(utils.ridge_regression(X, Y, c1=2.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])\n assert norm(utils.ridge_regression(X, Y, c1=1.0, c2=1.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])\n\n\ndef test_ridge_regression_with_extreme_c1_penalty_equals_close_to_zero_vector():\n c1 = 10e8\n c2 = 0.0\n offset = np.ones(2)\n X = randn(2, 2)\n Y = randn(2)\n assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0]) < 10e-4\n\n\ndef test_ridge_regression_with_extreme_c2_penalty_equals_close_to_offset():\n c1 = 0.0\n c2 = 10e8\n offset = np.ones(2)\n X = randn(2, 2)\n Y = randn(2)\n assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0] - offset) < 10e-4\n\n\ndef test_lstsq_returns_similar_values_to_ridge_regression():\n X = randn(2, 2)\n Y = randn(2)\n expected = lstsq(X, Y, rcond=None)[0]\n assert norm(utils.ridge_regression(X, Y)[0] - expected) < 10e-4\n\n\ndef test_lstsq_returns_correct_values():\n X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])\n y = [1, 1, 1, -1, -1]\n beta, V = utils.ridge_regression(X, y)\n expected_beta = [-0.98684211, -0.07894737]\n expected_v = [\n [-0.03289474, -0.49342105, 0.06578947, 0.03289474, 0.49342105],\n [-0.30263158, 0.46052632, -0.39473684, 0.30263158, -0.46052632],\n ]\n assert norm(beta - expected_beta) < 10e-4\n for V_row, e_v_row in zip(V, expected_v):\n assert norm(V_row - e_v_row) < 1e-4\n\n\ndef test_unnormalize():\n df = load_larynx()\n m = df.mean(0)\n s = df.std(0)\n\n ndf = utils.normalize(df)\n\n npt.assert_almost_equal(df.values, utils.unnormalize(ndf, m, s).values)\n\n\ndef test_normalize():\n df = load_larynx()\n n, d = df.shape\n npt.assert_almost_equal(utils.normalize(df).mean(0).values, np.zeros(d))\n npt.assert_almost_equal(utils.normalize(df).std(0).values, np.ones(d))\n\n\ndef test_median():\n sv = pd.DataFrame(1 - np.linspace(0, 1, 1000))\n assert utils.median_survival_times(sv) == 500\n\n\ndef test_median_accepts_series():\n sv = pd.Series(1 - np.linspace(0, 1, 1000))\n assert utils.median_survival_times(sv) == 500\n\n\ndef test_qth_survival_times_with_varying_datatype_inputs():\n sf_list = [1.0, 0.75, 0.5, 0.25, 0.0]\n sf_array = np.array([1.0, 0.75, 0.5, 0.25, 0.0])\n sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])\n sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])\n sf_series_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])\n sf_series_no_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0])\n\n q = 0.5\n\n assert utils.qth_survival_times(q, sf_list) == 2\n assert utils.qth_survival_times(q, sf_array) == 2\n assert utils.qth_survival_times(q, sf_df_no_index) == 2\n assert utils.qth_survival_times(q, sf_df_index) == 30\n assert utils.qth_survival_times(q, sf_series_index) == 30\n assert utils.qth_survival_times(q, sf_series_no_index) == 2\n\n\ndef test_qth_survival_times_multi_dim_input():\n sf = np.linspace(1, 0, 50)\n sf_multi_df = pd.DataFrame({\"sf\": sf, \"sf**2\": sf ** 2})\n medians = utils.qth_survival_times(0.5, sf_multi_df)\n assert medians[\"sf\"].loc[0.5] == 25\n assert medians[\"sf**2\"].loc[0.5] == 15\n\n\ndef test_qth_survival_time_returns_inf():\n sf = pd.Series([1.0, 0.7, 0.6])\n assert utils.qth_survival_time(0.5, sf) == np.inf\n\n\ndef test_qth_survival_time_accepts_a_model():\n kmf = KaplanMeierFitter().fit([1.0, 0.7, 0.6])\n assert utils.qth_survival_time(0.8, kmf) > 0\n\n\ndef test_qth_survival_time_with_dataframe():\n sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])\n sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])\n sf_df_too_many_columns = pd.DataFrame([[1, 2], [3, 4]])\n\n assert utils.qth_survival_time(0.5, sf_df_no_index) == 2\n assert utils.qth_survival_time(0.5, sf_df_index) == 30\n\n with pytest.raises(ValueError):\n utils.qth_survival_time(0.5, sf_df_too_many_columns)\n\n\ndef test_qth_survival_times_with_multivariate_q():\n sf = np.linspace(1, 0, 50)\n sf_multi_df = pd.DataFrame({\"sf\": sf, \"sf**2\": sf ** 2})\n\n assert_frame_equal(\n utils.qth_survival_times([0.2, 0.5], sf_multi_df),\n pd.DataFrame([[40, 28], [25, 15]], index=[0.2, 0.5], columns=[\"sf\", \"sf**2\"]),\n )\n assert_frame_equal(\n utils.qth_survival_times([0.2, 0.5], sf_multi_df[\"sf\"]), pd.DataFrame([40, 25], index=[0.2, 0.5], columns=[\"sf\"])\n )\n assert_frame_equal(utils.qth_survival_times(0.5, sf_multi_df), pd.DataFrame([[25, 15]], index=[0.5], columns=[\"sf\", \"sf**2\"]))\n assert utils.qth_survival_times(0.5, sf_multi_df[\"sf\"]) == 25\n\n\ndef test_qth_survival_times_with_duplicate_q_returns_valid_index_and_shape():\n sf = pd.DataFrame(np.linspace(1, 0, 50))\n\n q = pd.Series([0.5, 0.5, 0.2, 0.0, 0.0])\n actual = utils.qth_survival_times(q, sf)\n assert actual.shape[0] == len(q)\n assert actual.index[0] == actual.index[1]\n assert_series_equal(actual.iloc[0], actual.iloc[1])\n\n npt.assert_almost_equal(actual.index.values, q.values)\n\n\ndef test_datetimes_to_durations_with_different_frequencies():\n # days\n start_date = [\"2013-10-10 0:00:00\", \"2013-10-09\", \"2012-10-10\"]\n end_date = [\"2013-10-13\", \"2013-10-10 0:00:00\", \"2013-10-15\"]\n T, C = utils.datetimes_to_durations(start_date, end_date)\n npt.assert_almost_equal(T, np.array([3, 1, 5 + 365]))\n npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))\n\n # years\n start_date = [\"2013-10-10\", \"2013-10-09\", \"2012-10-10\"]\n end_date = [\"2013-10-13\", \"2013-10-10\", \"2013-10-15\"]\n T, C = utils.datetimes_to_durations(start_date, end_date, freq=\"Y\")\n npt.assert_almost_equal(T, np.array([0, 0, 1]))\n npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))\n\n # hours\n start_date = [\"2013-10-10 17:00:00\", \"2013-10-09 0:00:00\", \"2013-10-10 23:00:00\"]\n end_date = [\"2013-10-10 18:00:00\", \"2013-10-10 0:00:00\", \"2013-10-11 2:00:00\"]\n T, C = utils.datetimes_to_durations(start_date, end_date, freq=\"h\")\n npt.assert_almost_equal(T, np.array([1, 24, 3]))\n npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))\n\n\ndef test_datetimes_to_durations_will_handle_dates_above_fill_date():\n start_date = [\"2013-10-08\", \"2013-10-09\", \"2013-10-10\"]\n end_date = [\"2013-10-10\", \"2013-10-12\", \"2013-10-15\"]\n T, C = utils.datetimes_to_durations(start_date, end_date, freq=\"Y\", fill_date=\"2013-10-12\")\n npt.assert_almost_equal(C, np.array([1, 1, 0], dtype=bool))\n\n\ndef test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():\n start_date = [\"2013-10-08\", \"2013-10-09\", \"2013-10-10\"]\n end_date = [\"2013-10-10\", None, None]\n last_observation = [\"2013-10-10\", \"2013-10-12\", \"2013-10-14\"]\n T, E = utils.datetimes_to_durations(start_date, end_date, freq=\"D\", fill_date=last_observation)\n npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))\n npt.assert_almost_equal(T, np.array([2, 3, 4]))\n\n\ndef test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():\n start_date = [\"2013-10-08\", \"2013-10-09\", \"2013-10-10\"]\n end_date = [\"2013-10-10\", None, None]\n last_observation = [\"2013-10-10\", \"2013-10-12\", \"2013-10-14\"]\n T, E = utils.datetimes_to_durations(start_date, end_date, freq=\"D\", fill_date=last_observation)\n npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))\n npt.assert_almost_equal(T, np.array([2, 3, 4]))\n\n\ndef test_datetimes_to_durations_censor():\n start_date = [\"2013-10-10\", \"2013-10-09\", \"2012-10-10\"]\n end_date = [\"2013-10-13\", None, \"\"]\n T, C = utils.datetimes_to_durations(start_date, end_date, freq=\"Y\")\n npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))\n\n\ndef test_datetimes_to_durations_custom_censor():\n start_date = [\"2013-10-10\", \"2013-10-09\", \"2012-10-10\"]\n end_date = [\"2013-10-13\", \"NaT\", \"\"]\n T, C = utils.datetimes_to_durations(start_date, end_date, freq=\"Y\", na_values=[\"NaT\", \"\"])\n npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))\n\n\ndef test_survival_events_from_table_no_ties():\n T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 0, 1])\n d = utils.survival_table_from_events(T, C)\n T_, C_, W_ = utils.survival_events_from_table(d[[\"censored\", \"observed\"]])\n npt.assert_array_equal(T, T_)\n npt.assert_array_equal(C, C_)\n npt.assert_array_equal(W_, np.ones_like(T))\n\n\ndef test_survival_events_from_table_with_ties():\n T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 1, 1])\n d = utils.survival_table_from_events(T, C)\n T_, C_, W_ = utils.survival_events_from_table(d[[\"censored\", \"observed\"]])\n npt.assert_array_equal([1, 2, 3, 4, 5], T_)\n npt.assert_array_equal([1, 0, 1, 1, 1], C_)\n npt.assert_array_equal([1, 1, 1, 2, 1], W_)\n\n\ndef test_survival_table_from_events_with_non_trivial_censorship_column():\n T = np.random.exponential(5, size=50)\n malformed_C = np.random.binomial(2, p=0.8) # set to 2 on purpose!\n proper_C = malformed_C > 0 # (proper \"boolean\" array)\n table1 = utils.survival_table_from_events(T, malformed_C, np.zeros_like(T))\n table2 = utils.survival_table_from_events(T, proper_C, np.zeros_like(T))\n\n assert_frame_equal(table1, table2)\n\n\ndef test_group_survival_table_from_events_on_waltons_data():\n df = load_waltons()\n first_obs = np.zeros(df.shape[0])\n g, removed, observed, censored = utils.group_survival_table_from_events(df[\"group\"], df[\"T\"], df[\"E\"], first_obs)\n assert len(g) == 2\n assert all(removed.columns == [\"removed:miR-137\", \"removed:control\"])\n assert all(removed.index == observed.index)\n assert all(removed.index == censored.index)\n\n\ndef test_survival_table_from_events_binned_with_empty_bin():\n df = load_waltons()\n ix = df[\"group\"] == \"miR-137\"\n event_table = utils.survival_table_from_events(df.loc[ix][\"T\"], df.loc[ix][\"E\"], intervals=[0, 10, 20, 30, 40, 50])\n assert not pd.isnull(event_table).any().any()\n\n\ndef test_survival_table_from_events_at_risk_column():\n df = load_waltons()\n # from R\n expected = [\n 163.0,\n 162.0,\n 160.0,\n 157.0,\n 154.0,\n 152.0,\n 151.0,\n 148.0,\n 144.0,\n 139.0,\n 134.0,\n 133.0,\n 130.0,\n 128.0,\n 126.0,\n 119.0,\n 118.0,\n 108.0,\n 107.0,\n 99.0,\n 96.0,\n 89.0,\n 87.0,\n 69.0,\n 65.0,\n 49.0,\n 38.0,\n 36.0,\n 27.0,\n 24.0,\n 14.0,\n 1.0,\n ]\n df = utils.survival_table_from_events(df[\"T\"], df[\"E\"])\n assert list(df[\"at_risk\"][1:]) == expected # skip the first event as that is the birth time, 0.\n\n\ndef test_survival_table_to_events_casts_to_float():\n T, C = (np.array([1, 2, 3, 4, 4, 5]), np.array([True, False, True, True, True, True]))\n d = utils.survival_table_from_events(T, C, np.zeros_like(T))\n npt.assert_array_equal(d[\"censored\"].values, np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0]))\n npt.assert_array_equal(d[\"removed\"].values, np.array([0.0, 1.0, 1.0, 1.0, 2.0, 1.0]))\n\n\ndef test_group_survival_table_from_events_works_with_series():\n df = pd.DataFrame([[1, True, 3], [1, True, 3], [4, False, 2]], columns=[\"duration\", \"E\", \"G\"])\n ug, _, _, _ = utils.group_survival_table_from_events(df.G, df.duration, df.E, np.array([[0, 0, 0]]))\n npt.assert_array_equal(ug, np.array([3, 2]))\n\n\ndef test_survival_table_from_events_will_collapse_if_asked():\n T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])\n table = utils.survival_table_from_events(T, C, collapse=True)\n assert table.index.tolist() == [\n pd.Interval(-0.001, 3.5089999999999999, closed=\"right\"),\n pd.Interval(3.5089999999999999, 7.0179999999999998, closed=\"right\"),\n ]\n\n\ndef test_survival_table_from_events_will_collapse_to_desired_bins():\n T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])\n table = utils.survival_table_from_events(T, C, collapse=True, intervals=[0, 4, 8])\n assert table.index.tolist() == [pd.Interval(-0.001, 4, closed=\"right\"), pd.Interval(4, 8, closed=\"right\")]\n\n\ndef test_cross_validator_returns_k_results():\n cf = CoxPHFitter()\n results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col=\"T\", event_col=\"E\", k=3)\n assert len(results) == 3\n\n results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col=\"T\", event_col=\"E\", k=5)\n assert len(results) == 5\n\n\ndef test_cross_validator_returns_fitters_k_results():\n cf = CoxPHFitter()\n fitters = [cf, cf]\n results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col=\"T\", event_col=\"E\", k=3)\n assert len(results) == 2\n assert len(results[0]) == len(results[1]) == 3\n\n results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col=\"T\", event_col=\"E\", k=5)\n assert len(results) == 2\n assert len(results[0]) == len(results[1]) == 5\n\n\ndef test_cross_validator_with_predictor():\n cf = CoxPHFitter()\n results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col=\"T\", event_col=\"E\", k=3)\n assert len(results) == 3\n\n\ndef test_cross_validator_with_stratified_cox_model():\n cf = CoxPHFitter(strata=[\"race\"])\n utils.k_fold_cross_validation(cf, load_rossi(), duration_col=\"week\", event_col=\"arrest\")\n\n\ndef test_cross_validator_with_specific_loss_function():\n cf = CoxPHFitter()\n results_sq = utils.k_fold_cross_validation(\n cf, load_regression_dataset(), scoring_method=\"concordance_index\", duration_col=\"T\", event_col=\"E\"\n )\n\n\ndef test_concordance_index():\n size = 1000\n T = np.random.normal(size=size)\n P = np.random.normal(size=size)\n C = np.random.choice([0, 1], size=size)\n Z = np.zeros_like(T)\n\n # Zeros is exactly random\n assert utils.concordance_index(T, Z) == 0.5\n assert utils.concordance_index(T, Z, C) == 0.5\n\n # Itself is 1\n assert utils.concordance_index(T, T) == 1.0\n assert utils.concordance_index(T, T, C) == 1.0\n\n # Random is close to 0.5\n assert abs(utils.concordance_index(T, P) - 0.5) < 0.05\n assert abs(utils.concordance_index(T, P, C) - 0.5) < 0.05\n\n\ndef test_survival_table_from_events_with_non_negative_T_and_no_lagged_births():\n n = 10\n T = np.arange(n)\n C = [True] * n\n min_obs = [0] * n\n df = utils.survival_table_from_events(T, C, min_obs)\n assert df.iloc[0][\"entrance\"] == n\n assert df.index[0] == T.min()\n assert df.index[-1] == T.max()\n\n\ndef test_survival_table_from_events_with_negative_T_and_no_lagged_births():\n n = 10\n T = np.arange(-n / 2, n / 2)\n C = [True] * n\n min_obs = None\n df = utils.survival_table_from_events(T, C, min_obs)\n assert df.iloc[0][\"entrance\"] == n\n assert df.index[0] == T.min()\n assert df.index[-1] == T.max()\n\n\ndef test_survival_table_from_events_with_non_negative_T_and_lagged_births():\n n = 10\n T = np.arange(n)\n C = [True] * n\n min_obs = np.linspace(0, 2, n)\n df = utils.survival_table_from_events(T, C, min_obs)\n assert df.iloc[0][\"entrance\"] == 1\n assert df.index[0] == T.min()\n assert df.index[-1] == T.max()\n\n\ndef test_survival_table_from_events_with_negative_T_and_lagged_births():\n n = 10\n T = np.arange(-n / 2, n / 2)\n C = [True] * n\n min_obs = np.linspace(-n / 2, 2, n)\n df = utils.survival_table_from_events(T, C, min_obs)\n assert df.iloc[0][\"entrance\"] == 1\n assert df.index[0] == T.min()\n assert df.index[-1] == T.max()\n\n\ndef test_survival_table_from_events_raises_value_error_if_too_early_births():\n n = 10\n T = np.arange(0, n)\n C = [True] * n\n min_obs = T.copy()\n min_obs[1] = min_obs[1] + 10\n with pytest.raises(ValueError):\n utils.survival_table_from_events(T, C, min_obs)\n\n\nclass TestLongDataFrameUtils(object):\n @pytest.fixture\n def seed_df(self):\n df = pd.DataFrame.from_records([{\"id\": 1, \"var1\": 0.1, \"T\": 10, \"E\": 1}, {\"id\": 2, \"var1\": 0.5, \"T\": 12, \"E\": 0}])\n return utils.to_long_format(df, \"T\")\n\n @pytest.fixture\n def cv1(self):\n return pd.DataFrame.from_records(\n [\n {\"id\": 1, \"t\": 0, \"var2\": 1.4},\n {\"id\": 1, \"t\": 4, \"var2\": 1.2},\n {\"id\": 1, \"t\": 8, \"var2\": 1.5},\n {\"id\": 2, \"t\": 0, \"var2\": 1.6},\n ]\n )\n\n @pytest.fixture\n def cv2(self):\n return pd.DataFrame.from_records(\n [{\"id\": 1, \"t\": 0, \"var3\": 0}, {\"id\": 1, \"t\": 6, \"var3\": 1}, {\"id\": 2, \"t\": 0, \"var3\": 0}]\n )\n\n def test_order_of_adding_covariates_doesnt_matter(self, seed_df, cv1, cv2):\n df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, \"id\", \"t\", \"E\").pipe(\n utils.add_covariate_to_timeline, cv2, \"id\", \"t\", \"E\"\n )\n\n df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, \"id\", \"t\", \"E\").pipe(\n utils.add_covariate_to_timeline, cv1, \"id\", \"t\", \"E\"\n )\n\n assert_frame_equal(df21, df12, check_like=True)\n\n def test_order_of_adding_covariates_doesnt_matter_in_cumulative_sum(self, seed_df, cv1, cv2):\n df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, \"id\", \"t\", \"E\", cumulative_sum=True).pipe(\n utils.add_covariate_to_timeline, cv2, \"id\", \"t\", \"E\", cumulative_sum=True\n )\n\n df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, \"id\", \"t\", \"E\", cumulative_sum=True).pipe(\n utils.add_covariate_to_timeline, cv1, \"id\", \"t\", \"E\", cumulative_sum=True\n )\n\n assert_frame_equal(df21, df12, check_like=True)\n\n def test_adding_cvs_with_the_same_column_name_will_insert_appropriately(self, seed_df):\n seed_df = seed_df[seed_df[\"id\"] == 1]\n\n cv = pd.DataFrame.from_records([{\"id\": 1, \"t\": 1, \"var1\": 1.0}, {\"id\": 1, \"t\": 2, \"var1\": 2.0}])\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv, \"id\", \"t\", \"E\")\n expected = pd.DataFrame.from_records(\n [\n {\"E\": False, \"id\": 1, \"stop\": 1.0, \"start\": 0, \"var1\": 0.1},\n {\"E\": False, \"id\": 1, \"stop\": 2.0, \"start\": 1, \"var1\": 1.0},\n {\"E\": True, \"id\": 1, \"stop\": 10.0, \"start\": 2, \"var1\": 2.0},\n ]\n )\n assert_frame_equal(df, expected, check_like=True)\n\n def test_adding_cvs_with_the_same_column_name_will_sum_update_appropriately(self, seed_df):\n seed_df = seed_df[seed_df[\"id\"] == 1]\n\n new_value_at_time_0 = 1.0\n old_value_at_time_0 = seed_df[\"var1\"].iloc[0]\n cv = pd.DataFrame.from_records([{\"id\": 1, \"t\": 0, \"var1\": new_value_at_time_0, \"var2\": 2.0}])\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv, \"id\", \"t\", \"E\", overwrite=False)\n\n expected = pd.DataFrame.from_records(\n [{\"E\": True, \"id\": 1, \"stop\": 10.0, \"start\": 0, \"var1\": new_value_at_time_0 + old_value_at_time_0, \"var2\": 2.0}]\n )\n assert_frame_equal(df, expected, check_like=True)\n\n def test_adding_cvs_with_the_same_column_name_will_overwrite_update_appropriately(self, seed_df):\n seed_df = seed_df[seed_df[\"id\"] == 1]\n\n new_value_at_time_0 = 1.0\n cv = pd.DataFrame.from_records([{\"id\": 1, \"t\": 0, \"var1\": new_value_at_time_0}])\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv, \"id\", \"t\", \"E\", overwrite=True)\n\n expected = pd.DataFrame.from_records([{\"E\": True, \"id\": 1, \"stop\": 10.0, \"start\": 0, \"var1\": new_value_at_time_0}])\n assert_frame_equal(df, expected, check_like=True)\n\n def test_enum_flag(self, seed_df, cv1, cv2):\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, \"id\", \"t\", \"E\", add_enum=True).pipe(\n utils.add_covariate_to_timeline, cv2, \"id\", \"t\", \"E\", add_enum=True\n )\n\n idx = df[\"id\"] == 1\n n = idx.sum()\n try:\n assert_series_equal(df[\"enum\"].loc[idx], pd.Series(np.arange(1, n + 1)), check_names=False)\n except AssertionError as e:\n # Windows Numpy and Pandas sometimes have int32 or int64 as default dtype\n if os.name == \"nt\" and \"int32\" in str(e) and \"int64\" in str(e):\n assert_series_equal(\n df[\"enum\"].loc[idx], pd.Series(np.arange(1, n + 1), dtype=df[\"enum\"].loc[idx].dtypes), check_names=False\n )\n else:\n raise e\n\n def test_event_col_is_properly_inserted(self, seed_df, cv2):\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv2, \"id\", \"t\", \"E\")\n assert df.groupby(\"id\").last()[\"E\"].tolist() == [1, 0]\n\n def test_redundant_cv_columns_are_dropped(self, seed_df):\n seed_df = seed_df[seed_df[\"id\"] == 1]\n cv = pd.DataFrame.from_records(\n [\n {\"id\": 1, \"t\": 0, \"var3\": 0, \"var4\": 1},\n {\"id\": 1, \"t\": 1, \"var3\": 0, \"var4\": 1}, # redundant, as nothing changed during the interval\n {\"id\": 1, \"t\": 3, \"var3\": 0, \"var4\": 1}, # redundant, as nothing changed during the interval\n {\"id\": 1, \"t\": 6, \"var3\": 1, \"var4\": 1},\n {\"id\": 1, \"t\": 9, \"var3\": 1, \"var4\": 1}, # redundant, as nothing changed during the interval\n ]\n )\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv, \"id\", \"t\", \"E\")\n assert df.shape[0] == 2\n\n def test_will_convert_event_column_to_bools(self, seed_df, cv1):\n seed_df[\"E\"] = seed_df[\"E\"].astype(int)\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, \"id\", \"t\", \"E\")\n assert df.dtypes[\"E\"] == bool\n\n def test_if_cvs_include_a_start_time_after_the_final_time_it_is_excluded(self, seed_df):\n max_T = seed_df[\"stop\"].max()\n cv = pd.DataFrame.from_records(\n [\n {\"id\": 1, \"t\": 0, \"var3\": 0},\n {\"id\": 1, \"t\": max_T + 10, \"var3\": 1}, # will be excluded\n {\"id\": 2, \"t\": 0, \"var3\": 0},\n ]\n )\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv, \"id\", \"t\", \"E\")\n assert df.shape[0] == 2\n\n def test_if_cvs_include_a_start_time_before_it_is_included(self, seed_df):\n min_T = seed_df[\"start\"].min()\n cv = pd.DataFrame.from_records(\n [{\"id\": 1, \"t\": 0, \"var3\": 0}, {\"id\": 1, \"t\": min_T - 1, \"var3\": 1}, {\"id\": 2, \"t\": 0, \"var3\": 0}]\n )\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv, \"id\", \"t\", \"E\")\n assert df.shape[0] == 3\n\n def test_cvs_with_null_values_are_dropped(self, seed_df):\n seed_df = seed_df[seed_df[\"id\"] == 1]\n cv = pd.DataFrame.from_records(\n [{\"id\": None, \"t\": 0, \"var3\": 0}, {\"id\": 1, \"t\": None, \"var3\": 1}, {\"id\": 2, \"t\": 0, \"var3\": None}]\n )\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv, \"id\", \"t\", \"E\")\n assert df.shape[0] == 1\n\n def test_a_new_row_is_not_created_if_start_times_are_the_same(self, seed_df):\n seed_df = seed_df[seed_df[\"id\"] == 1]\n cv1 = pd.DataFrame.from_records([{\"id\": 1, \"t\": 0, \"var3\": 0}, {\"id\": 1, \"t\": 5, \"var3\": 1}])\n\n cv2 = pd.DataFrame.from_records(\n [{\"id\": 1, \"t\": 0, \"var4\": 0}, {\"id\": 1, \"t\": 5, \"var4\": 1.5}, {\"id\": 1, \"t\": 6, \"var4\": 1.7}]\n )\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, \"id\", \"t\", \"E\").pipe(\n utils.add_covariate_to_timeline, cv2, \"id\", \"t\", \"E\"\n )\n assert df.shape[0] == 3\n\n def test_error_is_raised_if_columns_are_missing_in_seed_df(self, seed_df, cv1):\n del seed_df[\"start\"]\n with pytest.raises(IndexError):\n utils.add_covariate_to_timeline(seed_df, cv1, \"id\", \"t\", \"E\")\n\n def test_cumulative_sum(self):\n seed_df = pd.DataFrame.from_records([{\"id\": 1, \"start\": 0, \"stop\": 5, \"E\": 1}])\n cv = pd.DataFrame.from_records([{\"id\": 1, \"t\": 0, \"var4\": 1}, {\"id\": 1, \"t\": 1, \"var4\": 1}, {\"id\": 1, \"t\": 3, \"var4\": 1}])\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv, \"id\", \"t\", \"E\", cumulative_sum=True)\n expected = pd.DataFrame.from_records(\n [\n {\"id\": 1, \"start\": 0, \"stop\": 1.0, \"cumsum_var4\": 1, \"E\": False},\n {\"id\": 1, \"start\": 1, \"stop\": 3.0, \"cumsum_var4\": 2, \"E\": False},\n {\"id\": 1, \"start\": 3, \"stop\": 5.0, \"cumsum_var4\": 3, \"E\": True},\n ]\n )\n assert_frame_equal(expected, df, check_like=True)\n\n def test_delay(self, cv2):\n seed_df = pd.DataFrame.from_records([{\"id\": 1, \"start\": 0, \"stop\": 50, \"E\": 1}])\n\n cv3 = pd.DataFrame.from_records(\n [{\"id\": 1, \"t\": 0, \"varA\": 2}, {\"id\": 1, \"t\": 10, \"varA\": 4}, {\"id\": 1, \"t\": 20, \"varA\": 6}]\n )\n\n df = seed_df.pipe(utils.add_covariate_to_timeline, cv3, \"id\", \"t\", \"E\", delay=2).fillna(0)\n\n expected = pd.DataFrame.from_records(\n [\n {\"start\": 0, \"stop\": 2.0, \"varA\": 0.0, \"id\": 1, \"E\": False},\n {\"start\": 2, \"stop\": 12.0, \"varA\": 2.0, \"id\": 1, \"E\": False},\n {\"start\": 12, \"stop\": 22.0, \"varA\": 4.0, \"id\": 1, \"E\": False},\n {\"start\": 22, \"stop\": 50.0, \"varA\": 6.0, \"id\": 1, \"E\": True},\n ]\n )\n assert_frame_equal(expected, df, check_like=True)\n\n def test_covariates_from_event_matrix_with_simple_addition(self):\n\n base_df = pd.DataFrame([[1, 0, 5, 1], [2, 0, 4, 1], [3, 0, 8, 1], [4, 0, 4, 1]], columns=[\"id\", \"start\", \"stop\", \"e\"])\n\n event_df = pd.DataFrame([[1, 1], [2, 2], [3, 3], [4, None]], columns=[\"id\", \"poison\"])\n cv = utils.covariates_from_event_matrix(event_df, \"id\")\n ldf = utils.add_covariate_to_timeline(base_df, cv, \"id\", \"duration\", \"e\", cumulative_sum=True)\n assert pd.notnull(ldf).all().all()\n\n expected = pd.DataFrame(\n [\n (0.0, 0.0, 1.0, 1, False),\n (1.0, 1.0, 5.0, 1, True),\n (0.0, 0.0, 2.0, 2, False),\n (2.0, 1.0, 4.0, 2, True),\n (0.0, 0.0, 3.0, 3, False),\n (3.0, 1.0, 8.0, 3, True),\n (0.0, 0.0, 4.0, 4, True),\n ],\n columns=[\"start\", \"cumsum_poison\", \"stop\", \"id\", \"e\"],\n )\n assert_frame_equal(expected, ldf, check_dtype=False, check_like=True)\n\n def test_covariates_from_event_matrix(self):\n\n base_df = pd.DataFrame([[1, 0, 5, 1], [2, 0, 4, 1], [3, 0, 8, 1], [4, 0, 4, 1]], columns=[\"id\", \"start\", \"stop\", \"e\"])\n\n event_df = pd.DataFrame(\n [[1, 1, None, 2], [2, None, 5, None], [3, 3, 3, 7]], columns=[\"id\", \"promotion\", \"movement\", \"raise\"]\n )\n\n cv = utils.covariates_from_event_matrix(event_df, \"id\")\n ldf = utils.add_covariate_to_timeline(base_df, cv, \"id\", \"duration\", \"e\", cumulative_sum=True)\n expected = pd.DataFrame.from_records(\n [\n {\n \"cumsum_movement\": 0.0,\n \"cumsum_promotion\": 0.0,\n \"cumsum_raise\": 0.0,\n \"e\": 0.0,\n \"id\": 1.0,\n \"start\": 0.0,\n \"stop\": 1.0,\n },\n {\n \"cumsum_movement\": 0.0,\n \"cumsum_promotion\": 1.0,\n \"cumsum_raise\": 0.0,\n \"e\": 0.0,\n \"id\": 1.0,\n \"start\": 1.0,\n \"stop\": 2.0,\n },\n {\n \"cumsum_movement\": 0.0,\n \"cumsum_promotion\": 1.0,\n \"cumsum_raise\": 1.0,\n \"e\": 1.0,\n \"id\": 1.0,\n \"start\": 2.0,\n \"stop\": 5.0,\n },\n {\n \"cumsum_movement\": 0.0,\n \"cumsum_promotion\": 0.0,\n \"cumsum_raise\": 0.0,\n \"e\": 1.0,\n \"id\": 2.0,\n \"start\": 0.0,\n \"stop\": 4.0,\n },\n {\n \"cumsum_movement\": 0.0,\n \"cumsum_promotion\": 0.0,\n \"cumsum_raise\": 0.0,\n \"e\": 0.0,\n \"id\": 3.0,\n \"start\": 0.0,\n \"stop\": 3.0,\n },\n {\n \"cumsum_movement\": 1.0,\n \"cumsum_promotion\": 1.0,\n \"cumsum_raise\": 0.0,\n \"e\": 0.0,\n \"id\": 3.0,\n \"start\": 3.0,\n \"stop\": 7.0,\n },\n {\n \"cumsum_movement\": 1.0,\n \"cumsum_promotion\": 1.0,\n \"cumsum_raise\": 1.0,\n \"e\": 1.0,\n \"id\": 3.0,\n \"start\": 7.0,\n \"stop\": 8.0,\n },\n {\n \"cumsum_movement\": None,\n \"cumsum_promotion\": None,\n \"cumsum_raise\": None,\n \"e\": 1.0,\n \"id\": 4.0,\n \"start\": 0.0,\n \"stop\": 4.0,\n },\n ]\n )\n\n assert_frame_equal(expected, ldf, check_dtype=False, check_like=True)\n\n def test_to_episodic_format_with_long_time_gap_is_identical(self):\n rossi = load_rossi()\n rossi[\"id\"] = np.arange(rossi.shape[0])\n\n long_rossi = utils.to_episodic_format(rossi, duration_col=\"week\", event_col=\"arrest\", id_col=\"id\", time_gaps=1000.0)\n\n # using astype(int) would fail on Windows because int32 and int64 are used as dtype\n long_rossi[\"week\"] = long_rossi[\"stop\"].astype(rossi[\"week\"].dtype)\n del long_rossi[\"start\"]\n del long_rossi[\"stop\"]\n\n assert_frame_equal(long_rossi, rossi, check_like=True)\n\n def test_to_episodic_format_preserves_outcome(self):\n E = [1, 1, 0, 0]\n df = pd.DataFrame({\"T\": [1, 3, 1, 3], \"E\": E, \"id\": [1, 2, 3, 4]})\n long_df = utils.to_episodic_format(df, \"T\", \"E\", id_col=\"id\").sort_values([\"id\", \"stop\"])\n assert long_df.shape[0] == 1 + 3 + 1 + 3\n\n assert long_df.groupby(\"id\").last()[\"E\"].tolist() == E\n\n def test_to_episodic_format_handles_floating_durations(self):\n df = pd.DataFrame({\"T\": [0.1, 3.5], \"E\": [1, 1], \"id\": [1, 2]})\n long_df = utils.to_episodic_format(df, \"T\", \"E\", id_col=\"id\").sort_values([\"id\", \"stop\"])\n assert long_df.shape[0] == 1 + 4\n assert long_df[\"stop\"].tolist() == [0.1, 1, 2, 3, 3.5]\n\n def test_to_episodic_format_handles_floating_durations_with_time_gaps(self):\n df = pd.DataFrame({\"T\": [0.1, 3.5], \"E\": [1, 1], \"id\": [1, 2]})\n long_df = utils.to_episodic_format(df, \"T\", \"E\", id_col=\"id\", time_gaps=2.0).sort_values([\"id\", \"stop\"])\n assert long_df[\"stop\"].tolist() == [0.1, 2, 3.5]\n\n def test_to_episodic_format_handles_floating_durations_and_preserves_events(self):\n df = pd.DataFrame({\"T\": [0.1, 3.5], \"E\": [1, 0], \"id\": [1, 2]})\n long_df = utils.to_episodic_format(df, \"T\", \"E\", id_col=\"id\", time_gaps=2.0).sort_values([\"id\", \"stop\"])\n assert long_df.groupby(\"id\").last()[\"E\"].tolist() == [1, 0]\n\n def test_to_episodic_format_handles_floating_durations_and_preserves_events(self):\n df = pd.DataFrame({\"T\": [0.1, 3.5], \"E\": [1, 0], \"id\": [1, 2]})\n long_df = utils.to_episodic_format(df, \"T\", \"E\", id_col=\"id\", time_gaps=2.0).sort_values([\"id\", \"stop\"])\n assert long_df.groupby(\"id\").last()[\"E\"].tolist() == [1, 0]\n\n def test_to_episodic_format_adds_id_col(self):\n df = pd.DataFrame({\"T\": [1, 3], \"E\": [1, 0]})\n long_df = utils.to_episodic_format(df, \"T\", \"E\")\n assert \"id\" in long_df.columns\n\n def test_to_episodic_format_uses_custom_index_as_id(self):\n df = pd.DataFrame({\"T\": [1, 3], \"E\": [1, 0]}, index=[\"A\", \"B\"])\n long_df = utils.to_episodic_format(df, \"T\", \"E\")\n assert long_df[\"id\"].tolist() == [\"A\", \"B\", \"B\", \"B\"]\n\n\nclass TestStepSizer:\n def test_StepSizer_step_will_decrease_if_unstable(self):\n start = 0.95\n ss = utils.StepSizer(start)\n assert ss.next() == start\n ss.update(1.0)\n ss.update(2.0)\n ss.update(1.0)\n ss.update(2.0)\n\n assert ss.next() < start\n\n def test_StepSizer_step_will_increase_if_stable(self):\n start = 0.5\n ss = utils.StepSizer(start)\n assert ss.next() == start\n ss.update(1.0)\n ss.update(0.5)\n ss.update(0.4)\n ss.update(0.1)\n\n assert ss.next() > start\n\n def test_StepSizer_step_will_decrease_if_explodes(self):\n start = 0.5\n ss = utils.StepSizer(start)\n assert ss.next() == start\n ss.update(20.0)\n assert ss.next() < start\n\n\nclass TestSklearnAdapter:\n @pytest.fixture\n def X(self):\n return load_regression_dataset().drop(\"T\", axis=1)\n\n @pytest.fixture\n def Y(self):\n return load_regression_dataset().pop(\"T\")\n\n def test_model_has_correct_api(self, X, Y):\n base_model = sklearn_adapter(CoxPHFitter, event_col=\"E\")\n cph = base_model()\n assert hasattr(cph, \"fit\")\n cph.fit(X, Y)\n assert hasattr(cph, \"predict\")\n cph.predict(X)\n assert hasattr(cph, \"score\")\n cph.score(X, Y)\n\n def test_sklearn_cross_val_score_accept_model(self, X, Y):\n from sklearn.model_selection import cross_val_score\n from sklearn.model_selection import GridSearchCV\n\n base_model = sklearn_adapter(WeibullAFTFitter, event_col=\"E\")\n wf = base_model(penalizer=1.0)\n assert len(cross_val_score(wf, X, Y, cv=3)) == 3\n\n def test_sklearn_GridSearchCV_accept_model(self, X, Y):\n from sklearn.model_selection import cross_val_score\n from sklearn.model_selection import GridSearchCV\n\n base_model = sklearn_adapter(WeibullAFTFitter, event_col=\"E\")\n\n grid_params = {\"penalizer\": 10.0 ** np.arange(-2, 3), \"model_ancillary\": [True, False]}\n clf = GridSearchCV(base_model(), grid_params, cv=4)\n clf.fit(X, Y)\n\n assert clf.best_params_ == {\"model_ancillary\": True, \"penalizer\": 100.0}\n assert clf.predict(X).shape[0] == X.shape[0]\n\n def test_model_can_accept_things_like_strata(self, X, Y):\n X[\"strata\"] = np.random.randint(0, 2, size=X.shape[0])\n base_model = sklearn_adapter(CoxPHFitter, event_col=\"E\")\n cph = base_model(strata=\"strata\")\n cph.fit(X, Y)\n\n def test_we_can_user_other_prediction_methods(self, X, Y):\n\n base_model = sklearn_adapter(WeibullAFTFitter, event_col=\"E\", predict_method=\"predict_median\")\n wf = base_model(strata=\"strata\")\n wf.fit(X, Y)\n assert wf.predict(X).shape[0] == X.shape[0]\n\n @pytest.mark.xfail\n def test_dill(self, X, Y):\n import dill\n\n base_model = sklearn_adapter(CoxPHFitter, event_col=\"E\")\n cph = base_model()\n cph.fit(X, Y)\n\n s = dill.dumps(cph)\n s = dill.loads(s)\n assert cph.predict(X).shape[0] == X.shape[0]\n\n @pytest.mark.xfail\n def test_pickle(self, X, Y):\n import pickle\n\n base_model = sklearn_adapter(CoxPHFitter, event_col=\"E\")\n cph = base_model()\n cph.fit(X, Y)\n\n s = pickle.dumps(cph, protocol=-1)\n s = pickle.loads(s)\n assert cph.predict(X).shape[0] == X.shape[0]\n\n def test_isinstance(self):\n from sklearn.base import BaseEstimator, RegressorMixin, MetaEstimatorMixin, MultiOutputMixin\n\n base_model = sklearn_adapter(CoxPHFitter, event_col=\"E\")\n assert isinstance(base_model(), BaseEstimator)\n assert isinstance(base_model(), RegressorMixin)\n assert isinstance(base_model(), MetaEstimatorMixin)\n\n @pytest.mark.xfail\n def test_sklearn_GridSearchCV_accept_model_with_parallelization(self, X, Y):\n from sklearn.model_selection import cross_val_score\n from sklearn.model_selection import GridSearchCV\n\n base_model = sklearn_adapter(WeibullAFTFitter, event_col=\"E\")\n\n grid_params = {\"penalizer\": 10.0 ** np.arange(-2, 3), \"l1_ratio\": [0.05, 0.5, 0.95], \"model_ancillary\": [True, False]}\n # note the n_jobs\n clf = GridSearchCV(base_model(), grid_params, cv=4, n_jobs=-1)\n clf.fit(X, Y)\n\n assert clf.best_params_ == {\"l1_ratio\": 0.5, \"model_ancillary\": False, \"penalizer\": 0.01}\n assert clf.predict(X).shape[0] == X.shape[0]\n\n @pytest.mark.xfail\n def test_joblib(self, X, Y):\n from joblib import dump, load\n\n base_model = sklearn_adapter(WeibullAFTFitter, event_col=\"E\")\n\n clf = base_model()\n clf.fit(X, Y)\n dump(clf, \"filename.joblib\")\n clf = load(\"filename.joblib\")\n\n @pytest.mark.xfail\n def test_sklearn_check():\n from sklearn.utils.estimator_checks import check_estimator\n\n base_model = sklearn_adapter(WeibullAFTFitter, event_col=\"E\")\n check_estimator(base_model())\n\n\ndef test_rmst_works_at_kaplan_meier_edge_case():\n\n T = [1, 2, 3, 4, 10]\n kmf = KaplanMeierFitter().fit(T)\n\n # when S(t)=0, doesn't matter about extending past\n assert utils.restricted_mean_survival_time(kmf, t=10) == utils.restricted_mean_survival_time(kmf, t=10.001)\n\n assert utils.restricted_mean_survival_time(kmf, t=9.9) <= utils.restricted_mean_survival_time(kmf, t=10.0)\n\n assert abs((utils.restricted_mean_survival_time(kmf, t=4) - (1.0 + 0.8 + 0.6 + 0.4))) < 0.0001\n assert abs((utils.restricted_mean_survival_time(kmf, t=4 + 0.1) - (1.0 + 0.8 + 0.6 + 0.4 + 0.2 * 0.1))) < 0.0001\n\n\ndef test_rmst_exactely_with_known_solution():\n T = np.random.exponential(2, 100)\n exp = ExponentialFitter().fit(T)\n lambda_ = exp.lambda_\n\n assert abs(utils.restricted_mean_survival_time(exp) - lambda_) < 0.001\n assert abs(utils.restricted_mean_survival_time(exp, t=lambda_) - lambda_ * (np.e - 1) / np.e) < 0.001\n\n\n@flaky\ndef test_rmst_approximate_solution():\n T = np.random.exponential(2, 4000)\n exp = ExponentialFitter().fit(T, timeline=np.linspace(0, T.max(), 10000))\n lambda_ = exp.lambda_\n\n with pytest.warns(exceptions.ApproximationWarning) as w:\n\n assert (\n abs(\n utils.restricted_mean_survival_time(exp, t=lambda_)\n - utils.restricted_mean_survival_time(exp.survival_function_, t=lambda_)\n )\n < 0.001\n )\n\n\ndef test_rmst_variance():\n\n T = np.random.exponential(2, 1000)\n expf = ExponentialFitter().fit(T)\n hazard = 1 / expf.lambda_\n t = 1\n\n sq = 2 / hazard ** 2 * (1 - np.exp(-hazard * t) * (1 + hazard * t))\n actual_mean = 1 / hazard * (1 - np.exp(-hazard * t))\n actual_var = sq - actual_mean ** 2\n\n assert abs(utils.restricted_mean_survival_time(expf, t=t, return_variance=True)[0] - actual_mean) < 0.001\n assert abs(utils.restricted_mean_survival_time(expf, t=t, return_variance=True)[1] - actual_var) < 0.001\n\n\ndef test_find_best_parametric_model():\n T = np.random.exponential(2, 1000)\n E = np.ones_like(T)\n\n model, score = utils.find_best_parametric_model(T, E)\n assert True\n\n\ndef test_find_best_parametric_model_can_accept_other_models():\n T = np.random.exponential(2, 1000)\n model, score = utils.find_best_parametric_model(T, additional_models=[ExponentialFitter(), ExponentialFitter()])\n assert True\n\n\ndef test_find_best_parametric_model_with_BIC():\n T = np.random.exponential(2, 1000)\n model, score = utils.find_best_parametric_model(T, scoring_method=\"BIC\")\n assert True\n\n\ndef test_find_best_parametric_model_works_for_left_censoring():\n T = np.random.exponential(2, 100)\n model, score = utils.find_best_parametric_model(T, censoring_type=\"left\", show_progress=True)\n assert True\n\n\ndef test_find_best_parametric_model_works_for_interval_censoring():\n T_1 = np.random.exponential(2, 100)\n T_2 = T_1 + 1\n model, score = utils.find_best_parametric_model((T_1, T_2), censoring_type=\"interval\", show_progress=True)\n assert True\n\n\ndef test_find_best_parametric_model_works_with_weights_and_entry():\n T = np.random.exponential(5, 100)\n W = np.random.randint(1, 5, size=100)\n entry = np.random.exponential(0.01, 100)\n model, score = utils.find_best_parametric_model(T, weights=W, entry=entry, show_progress=True)\n assert True\n\n\ndef test_safe_exp():\n from lifelines.utils.safe_exp import MAX\n\n assert safe_exp(4.0) == np.exp(4.0)\n assert safe_exp(MAX) == np.exp(MAX)\n assert safe_exp(MAX + 1) == np.exp(MAX)\n\n from autograd import grad\n\n assert grad(safe_exp)(4.0) == np.exp(4.0)\n assert grad(safe_exp)(MAX) == np.exp(MAX)\n assert grad(safe_exp)(MAX + 1) == np.exp(MAX)\n"
] | [
[
"pandas.testing.assert_series_equal",
"pandas.Series",
"numpy.linspace",
"pandas.DataFrame",
"numpy.random.randn",
"pandas.testing.assert_frame_equal",
"numpy.zeros_like",
"pandas.DataFrame.from_records",
"numpy.exp",
"numpy.random.randint",
"numpy.ones_like",
"numpy.arange",
"numpy.testing.assert_almost_equal",
"numpy.zeros",
"pandas.notnull",
"numpy.random.choice",
"numpy.linalg.lstsq",
"pandas.Interval",
"numpy.random.binomial",
"numpy.array",
"sklearn.model_selection.cross_val_score",
"pandas.isnull",
"numpy.random.exponential",
"numpy.linalg.norm",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.random.normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
roozhou/botty | [
"a67a87845687cdf6900af10a13dc7170684faa9a",
"a67a87845687cdf6900af10a13dc7170684faa9a"
] | [
"src/char/trapsin.py",
"src/char/bone_necro.py"
] | [
"import keyboard\nfrom utils.custom_mouse import mouse\nfrom char import IChar\nfrom pather import Pather\nfrom logger import Logger\nfrom screen import convert_abs_to_monitor, convert_screen_to_abs, grab\nfrom config import Config\nfrom utils.misc import wait, rotate_vec, unit_vector\nimport random\nfrom pather import Location, Pather\nimport numpy as np\n\n\nclass Trapsin(IChar):\n def __init__(self, skill_hotkeys: dict, pather: Pather):\n Logger.info(\"Setting up Trapsin\")\n super().__init__(skill_hotkeys)\n self._pather = pather\n\n def pre_buff(self):\n if Config().char[\"cta_available\"]:\n self._pre_buff_cta()\n if self._skill_hotkeys[\"fade\"]:\n keyboard.send(self._skill_hotkeys[\"fade\"])\n wait(0.1, 0.13)\n mouse.click(button=\"right\")\n wait(self._cast_duration)\n if self._skill_hotkeys[\"shadow_warrior\"]:\n keyboard.send(self._skill_hotkeys[\"shadow_warrior\"])\n wait(0.1, 0.13)\n mouse.click(button=\"right\")\n wait(self._cast_duration)\n if self._skill_hotkeys[\"burst_of_speed\"]:\n keyboard.send(self._skill_hotkeys[\"burst_of_speed\"])\n wait(0.1, 0.13)\n mouse.click(button=\"right\")\n wait(self._cast_duration)\n\n def _left_attack(self, cast_pos_abs: tuple[float, float], spray: int = 10):\n keyboard.send(Config().char[\"stand_still\"], do_release=False)\n if self._skill_hotkeys[\"skill_left\"]:\n keyboard.send(self._skill_hotkeys[\"skill_left\"])\n for _ in range(4):\n x = cast_pos_abs[0] + (random.random() * 2*spray - spray)\n y = cast_pos_abs[1] + (random.random() * 2*spray - spray)\n cast_pos_monitor = convert_abs_to_monitor((x, y))\n mouse.move(*cast_pos_monitor)\n mouse.press(button=\"left\")\n wait(0.2, 0.3)\n mouse.release(button=\"left\")\n keyboard.send(Config().char[\"stand_still\"], do_press=False)\n\n\n def _right_attack(self, cast_pos_abs: tuple[float, float], spray: float = 10):\n keyboard.send(self._skill_hotkeys[\"lightning_sentry\"])\n x = cast_pos_abs[0] + (random.random() * 2 * spray - spray)\n y = cast_pos_abs[1] + (random.random() * 2 * spray - spray)\n cast_pos_monitor = convert_abs_to_monitor((x, y))\n mouse.move(*cast_pos_monitor)\n def atk(num: int):\n for _ in range(num):\n mouse.press(button=\"right\")\n wait(0.20)\n mouse.release(button=\"right\")\n wait(0.15)\n atk(4)\n keyboard.send(self._skill_hotkeys[\"death_sentry\"])\n atk(1)\n\n def kill_pindle(self) -> bool:\n atk_len = max(1, int(Config().char[\"atk_len_pindle\"] / 2))\n pindle_pos_abs = convert_screen_to_abs(Config().path[\"pindle_end\"][0])\n cast_pos_abs = [pindle_pos_abs[0] * 0.9, pindle_pos_abs[1] * 0.9]\n for _ in range(atk_len):\n self._right_attack(cast_pos_abs, 11)\n self._left_attack(cast_pos_abs, 11)\n # Move to items\n wait(self._cast_duration, self._cast_duration + 0.2)\n if self.capabilities.can_teleport_natively:\n self._pather.traverse_nodes_fixed(\"pindle_end\", self)\n else:\n self._pather.traverse_nodes((Location.A5_PINDLE_SAFE_DIST, Location.A5_PINDLE_END), self, force_tp=True)\n return True\n\n def kill_eldritch(self) -> bool:\n atk_len = max(1, int(Config().char[\"atk_len_eldritch\"] / 2))\n eld_pos_abs = convert_screen_to_abs(Config().path[\"eldritch_end\"][0])\n cast_pos_abs = [eld_pos_abs[0] * 0.9, eld_pos_abs[1] * 0.9]\n for _ in range(atk_len):\n self._right_attack(cast_pos_abs, 90)\n self._left_attack(cast_pos_abs, 90)\n # Move to items\n wait(self._cast_duration, self._cast_duration + 0.2)\n if self.capabilities.can_teleport_natively:\n self._pather.traverse_nodes_fixed(\"eldritch_end\", self)\n else:\n self._pather.traverse_nodes((Location.A5_ELDRITCH_SAFE_DIST, Location.A5_ELDRITCH_END), self, timeout=0.6, force_tp=True)\n return True\n\n def kill_shenk(self) -> bool:\n atk_len = max(1, int(Config().char[\"atk_len_shenk\"] / 2))\n shenk_pos_abs = self._pather.find_abs_node_pos(149, grab())\n if shenk_pos_abs is None:\n shenk_pos_abs = convert_screen_to_abs(Config().path[\"shenk_end\"][0])\n cast_pos_abs = [shenk_pos_abs[0] * 0.9, shenk_pos_abs[1] * 0.9]\n for _ in range(atk_len):\n self._right_attack(cast_pos_abs, 90)\n self._left_attack(cast_pos_abs, 90)\n # Move to items\n wait(self._cast_duration, self._cast_duration + 0.2)\n self._pather.traverse_nodes((Location.A5_SHENK_SAFE_DIST, Location.A5_SHENK_END), self, timeout=1.4, force_tp=True)\n return True\n\n def kill_nihlathak(self, end_nodes: list[int]) -> bool:\n # Find nilhlatak position\n atk_len = max(1, int(Config().char[\"atk_len_nihlathak\"] / 2))\n for i in range(atk_len):\n nihlathak_pos_abs = self._pather.find_abs_node_pos(end_nodes[-1], grab())\n if nihlathak_pos_abs is None:\n return False\n cast_pos_abs = np.array([nihlathak_pos_abs[0] * 0.9, nihlathak_pos_abs[1] * 0.9])\n self._left_attack(cast_pos_abs, 90)\n self._right_attack(cast_pos_abs, 90)\n # Do some tele \"dancing\" after each sequence\n if i < atk_len - 1:\n rot_deg = random.randint(-10, 10) if i % 2 == 0 else random.randint(170, 190)\n tele_pos_abs = unit_vector(rotate_vec(cast_pos_abs, rot_deg)) * 100\n pos_m = convert_abs_to_monitor(tele_pos_abs)\n self.pre_move()\n self.move(pos_m)\n # Move to items\n wait(self._cast_duration, self._cast_duration + 0.2)\n self._pather.traverse_nodes(end_nodes, self, timeout=0.8)\n return True\n\n\nif __name__ == \"__main__\":\n import os\n import keyboard\n keyboard.add_hotkey('f12', lambda: Logger.info('Force Exit (f12)') or os._exit(1))\n keyboard.wait(\"f11\")\n from config import Config\n from char import Trapsin\n pather = Pather()\n char = Trapsin(Config().trapsin, Config().char, pather)",
"import keyboard\nfrom utils.custom_mouse import mouse\nfrom char import IChar\nimport template_finder\nfrom pather import Pather\nfrom logger import Logger\nfrom screen import grab, convert_abs_to_monitor, convert_screen_to_abs\nfrom config import Config\nfrom utils.misc import wait, rotate_vec, unit_vector\nimport random\nfrom pather import Location, Pather\nimport screen as screen\nimport numpy as np\nimport time\nimport os\nfrom ui_manager import ScreenObjects\n\nclass Bone_Necro(IChar):\n def __init__(self, skill_hotkeys: dict, pather: Pather):\n Logger.info(\"Setting up Bone Necro\")\n super().__init__(skill_hotkeys)\n self._pather = pather\n if \"damage_scaling\" in Config().bone_necro:\n self.damage_scaling = float(Config().bone_necro[\"damage_scaling\"])\n\n def move_to(self, x, y):\n pos_m = convert_abs_to_monitor((x, y))\n self.pre_move()\n self.move(pos_m, force_move=True)\n\n def bone_wall(self, cast_pos_abs: tuple[float, float], spray: int):\n if not self._skill_hotkeys[\"bone_wall\"]:\n raise ValueError(\"You did not set bone_wall hotkey!\")\n keyboard.send(Config().char[\"stand_still\"], do_release=False)\n keyboard.send(self._skill_hotkeys[\"bone_wall\"])\n wait(0.02, 0.08)\n x = cast_pos_abs[0] + (random.random() * 2*spray - spray)\n y = cast_pos_abs[1] + (random.random() * 2*spray - spray)\n cast_pos_monitor = convert_abs_to_monitor((x, y))\n mouse.move(*cast_pos_monitor)\n mouse.press(button=\"right\")\n wait(self._cast_duration+.04, self._cast_duration+.08)\n mouse.release(button=\"right\")\n keyboard.send(Config().char[\"stand_still\"], do_press=False)\n\n def pre_buff(self):\n self.bone_armor()\n #only CTA if pre trav\n if Config().char[\"cta_available\"]:\n self._pre_buff_cta()\n Logger.info(\"prebuff/cta\")\n\n def _clay_golem(self):\n Logger.debug('Casting clay golem')\n keyboard.send(self._skill_hotkeys[\"clay_golem\"])\n wait(0.05, 0.2)\n mouse.click(button=\"right\")\n wait(self._cast_duration)\n\n def bone_armor(self):\n if self._skill_hotkeys[\"bone_armor\"]:\n keyboard.send(self._skill_hotkeys[\"bone_armor\"])\n wait(0.04, 0.1)\n mouse.click(button=\"right\")\n wait(self._cast_duration)\n if self._skill_hotkeys[\"clay_golem\"]:\n keyboard.send(self._skill_hotkeys[\"clay_golem\"])\n wait(0.04, 0.1)\n mouse.click(button=\"right\")\n wait(self._cast_duration)\n\n def _bone_armor(self):\n if self._skill_hotkeys[\"bone_armor\"]:\n keyboard.send(self._skill_hotkeys[\"bone_armor\"])\n wait(0.04, 0.1)\n mouse.click(button=\"right\")\n wait(self._cast_duration)\n\n def _corpse_explosion(self, cast_pos_abs: tuple[float, float], spray: int = 10,cast_count: int = 8):\n keyboard.send(Config().char[\"stand_still\"], do_release=False)\n Logger.debug(f'casting corpse explosion {cast_count} times with spray = {spray}')\n for _ in range(cast_count):\n if self._skill_hotkeys[\"corpse_explosion\"]:\n keyboard.send(self._skill_hotkeys[\"corpse_explosion\"])\n x = cast_pos_abs[0] + (random.random() * 2*spray - spray)\n y = cast_pos_abs[1] + (random.random() * 2*spray - spray)\n cast_pos_monitor = convert_abs_to_monitor((x, y))\n mouse.move(*cast_pos_monitor)\n mouse.press(button=\"right\")\n wait(0.075, 0.1)\n mouse.release(button=\"right\")\n keyboard.send(Config().char[\"stand_still\"], do_press=False)\n\n def _cast_circle(self, cast_dir: tuple[float,float],cast_start_angle: float=0.0, cast_end_angle: float=90.0,cast_div: int = 10,cast_spell: str='raise_skeleton',delay: float=1.0, radius=120, hold_duration: float = 3, hold=True):\n if hold:\n Logger.info(f'Circle cast {cast_spell} from {cast_start_angle}º to {cast_end_angle}º over {hold_duration}s')\n else:\n Logger.info(f'Circle cast {cast_spell} from {cast_start_angle}º to {cast_end_angle}º over {cast_div} casts')\n\n keyboard.send(Config().char[\"stand_still\"], do_release=False)\n keyboard.send(self._skill_hotkeys[cast_spell])\n if hold:\n mouse.press(button=\"right\")\n\n for i in range(cast_div):\n angle = self._lerp(cast_start_angle,cast_end_angle,float(i)/cast_div)\n target = unit_vector(rotate_vec(cast_dir, angle))\n Logger.debug(f\"Circle cast - current angle: {angle}º\")\n circle_pos_screen = self._pather._adjust_abs_range_to_screen(target*radius)\n circle_pos_monitor = convert_abs_to_monitor(circle_pos_screen)\n start = time.time()\n mouse.move(*circle_pos_monitor,delay_factor=[0.95*delay, 1.05*delay])\n duration = time.time() - start\n\n if not hold:\n mouse.press(button=\"right\")\n wait(.04, .08)\n mouse.release(button=\"right\")\n wait(self._cast_duration)\n else:\n #adjust the speed so we finish in approximately the time requested\n expected = (hold_duration/cast_div)\n delay = delay*(expected/duration)\n if hold:\n mouse.release(button=\"right\")\n keyboard.send(Config().char[\"stand_still\"], do_press=False)\n\n\n def kill_pindle(self) -> bool:\n for pos in [[200,-100], [-150,100] ]:\n self.bone_wall(pos, spray=10)\n self.cast_in_arc(ability='bone_spear', cast_pos_abs=[110,-50], spread_deg=15, time_in_s=5)\n self._corpse_explosion([165,-75], spray=100, cast_count=5)\n self.cast_in_arc(ability='bone_spirit', cast_pos_abs=[110,-50], spread_deg=15, time_in_s=2.5)\n self._pather.traverse_nodes_fixed(\"pindle_end\", self)\n return True\n\n def kill_eldritch(self) -> bool:\n #build an arc of bone walls\n for pos in [[50,-200], [-200,-175], [-350,50]]:\n self.bone_wall(pos, spray=10)\n self.cast_in_arc(ability='teeth', cast_pos_abs=[-20,-150], spread_deg=15, time_in_s=3)\n self.cast_in_arc(ability='bone_spear', cast_pos_abs=[-20,-150], spread_deg=15, time_in_s=2)\n self._corpse_explosion([-20,-240], spray=100, cast_count=5)\n self.cast_in_arc(ability='bone_spirit', cast_pos_abs=[0,-80], spread_deg=60, time_in_s=2.5)\n self._pather.traverse_nodes((Location.A5_ELDRITCH_SAFE_DIST, Location.A5_ELDRITCH_END), self, timeout=0.6, force_tp=True)\n self.bone_armor()\n return True\n\n\n def kill_shenk(self) -> bool:\n self._cast_circle(cast_dir=[1,1],cast_start_angle=0,cast_end_angle=360,cast_div=5,cast_spell='bone_wall',delay=.8,radius=100, hold=False)\n self.cast_in_arc(ability='teeth', cast_pos_abs=[160,75], spread_deg=360, time_in_s=6)\n self.cast_in_arc(ability='teeth', cast_pos_abs=[160,75], spread_deg=30, time_in_s=2)\n self._corpse_explosion([0,0], spray=200, cast_count=4)\n self.cast_in_arc(ability='bone_spear', cast_pos_abs=[160,75], spread_deg=30, time_in_s=3)\n self._corpse_explosion([240,112], spray=200, cast_count=8)\n self.cast_in_arc(ability='bone_spirit', cast_pos_abs=[80,37], spread_deg=60, time_in_s=3)\n self._pather.traverse_nodes((Location.A5_SHENK_SAFE_DIST, Location.A5_SHENK_END), self, timeout=1.0)\n return True\n\n\n def kill_council(self) -> bool:\n #move down adjacent to the right moat\n self.move_to(-150,150)\n self.move_to(-150,150)\n\n #moat on right side, encircle with bone walls on the other 3 sides\n for pos in [[100,-100], [-125,-25], [-50,100]]:\n self.bone_wall(pos, spray=10)\n self.cast_in_arc(ability='teeth', cast_pos_abs=[40,-100], spread_deg=180, time_in_s=5)\n self.cast_in_arc(ability='bone_spear', cast_pos_abs=[40,-100], spread_deg=120, time_in_s=8)\n\n self._corpse_explosion([40,-100], spray=200, cast_count=8)\n self.cast_in_arc(ability='bone_spirit', cast_pos_abs=[20,-50], spread_deg=180, time_in_s=5)\n self._corpse_explosion([40,-100], spray=200, cast_count=8)\n self.cast_in_arc(ability='bone_spirit', cast_pos_abs=[20,-50], spread_deg=360, time_in_s=4)\n\n return True\n\n\n def kill_nihlathak(self, end_nodes: list[int]) -> bool:\n # Find nilhlatak position\n nihlathak_pos_abs = self._pather.find_abs_node_pos(end_nodes[-1], grab())\n if nihlathak_pos_abs is None:\n return False\n\n cast_pos_abs = np.array(nihlathak_pos_abs)*.2\n self._cast_circle(cast_dir=[1,1],cast_start_angle=0,cast_end_angle=360,cast_div=5,cast_spell='bone_wall',delay=.8,radius=100, hold=False)\n self._bone_armor()\n self.cast_in_arc(ability='teeth', cast_pos_abs=cast_pos_abs, spread_deg=150, time_in_s=5)\n self._bone_armor()\n self._corpse_explosion(cast_pos_abs, spray=200, cast_count=8)\n self.cast_in_arc(ability='bone_spear', cast_pos_abs=cast_pos_abs, spread_deg=10, time_in_s=5)\n self._bone_armor()\n\n self._corpse_explosion(np.array(nihlathak_pos_abs)*.75, spray=200, cast_count=10)\n self.cast_in_arc(ability='bone_spirit', cast_pos_abs=cast_pos_abs, spread_deg=30, time_in_s=2.5)\n\n # Move to items\n wait(self._cast_duration, self._cast_duration + 0.2)\n self._pather.traverse_nodes(end_nodes, self, timeout=0.8)\n return True\n\n def kill_summoner(self) -> bool:\n # Attack\n self.cast_in_arc(ability='teeth', cast_pos_abs=[30,30], spread_deg=360, time_in_s=3)\n self.cast_in_arc(ability='bone_spirit', cast_pos_abs=[30,30], spread_deg=360, time_in_s=2)\n self._corpse_explosion([0,0], spray=200, cast_count=8)\n self.cast_in_arc(ability='bone_spirit', cast_pos_abs=[30,30], spread_deg=360, time_in_s=2)\n\n return True\n\nif __name__ == \"__main__\":\n import os\n import keyboard\n keyboard.add_hotkey('f12', lambda: Logger.info('Force Exit (f12)') or os._exit(1))\n keyboard.wait(\"f11\")\n from config import Config\n from char import Necro\n pather = Pather()\n char = Necro(Config().necro, Config().char, pather)\n"
] | [
[
"numpy.array"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TD21forever/QoS-Predcition-Algorithm-library | [
"f4503462887d719a39c9ccddd6cc55546e783fd5"
] | [
"models/IPCC/model.py"
] | [
"import copy\nimport math\nimport numpy as np\nfrom tqdm import tqdm\nfrom utils.model_util import triad_to_matrix, nonzero_user_mean, nonzero_item_mean\n\n# 相似度计算库\nfrom scipy.stats import pearsonr\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\nclass IPCCModel(object):\n def __init__(self) -> None:\n super().__init__()\n self.matrix = None # QoS矩阵\n self.u_mean = None # 每个用户的评分均值(用于计算修正的余弦相似度)\n self.i_mean = None # 每个项目的评分均值\n self.similarity_matrix = None # 项目相似度矩阵\n self._nan_symbol = -1 # 缺失项标记(数据集中使用-1表示缺失项)\n\n def _get_similarity_matrix(self, matrix, metric):\n \"\"\"获取项目相似度矩阵\n\n Args:\n matrix (): QoS矩阵\n metric (): 相似度计算方法, 可选参数: PCC(皮尔逊相关系数), COS(余弦相似度), ACOS(修正的余弦相似度)\n\n \"\"\"\n _m = copy.deepcopy(matrix)\n _m[_m == self._nan_symbol] = 0 # 将缺失项用0代替,以便之后计算\n n_items = matrix.shape[1]\n similarity_matrix = np.zeros((n_items, n_items))\n\n # 计算相似度矩阵\n for i in tqdm(range(n_items), desc=\"生成相似度矩阵\"):\n for j in range(i + 1, n_items):\n col_i = _m[:, i]\n col_j = _m[:, j]\n nonzero_i = np.nonzero(col_i)[0] # 非0元素对应的下标\n nonzero_j = np.nonzero(col_j)[0]\n intersect = np.intersect1d(nonzero_i,\n nonzero_j) # 对项目i,j同时有评分的用户集合\n\n if len(intersect) == 0:\n sim = 0\n else:\n # 依据指定的相似度计算方法计算项目i,j的相似度\n try:\n if metric == 'PCC':\n # 如果一个项目的评分向量中所有值都相等,则无法计算皮尔逊相关系数\n if len(set(col_i[intersect])) == 1 or len(\n set(col_j[intersect])) == 1:\n sim = 0\n else:\n sim = pearsonr(col_i[intersect],\n col_j[intersect])[0]\n elif metric == 'COS':\n sim = cosine_similarity(col_i[intersect],\n col_j[intersect])\n elif metric == 'ACOS':\n sim = adjusted_cosine_similarity(\n col_i, col_j, intersect, self.u_mean)\n except Exception as e:\n sim = 0\n\n similarity_matrix[i][j] = similarity_matrix[j][i] = sim\n\n return similarity_matrix\n\n def _get_similarity_items(self, iid, topk=-1):\n \"\"\"获取相似用户\n\n Args:\n iid (): 当前项目\n topk (): 相似项目数量, -1表示不限制数量\n\n Returns:\n 依照相似度从大到小排序, 与当前项目最为相似的前topk个相似项目\n\n \"\"\"\n assert isinstance(topk, int)\n ordered_sim_iid = (\n -self.similarity_matrix[iid]).argsort() # 按相似度从大到小排序后, 相似用户对应的索引\n if topk == -1:\n return ordered_sim_iid\n else:\n assert topk > 0\n return ordered_sim_iid[:topk]\n\n def get_similarity(self, iid_a, iid_b):\n \"\"\"传入两个uid,获取这两个用户的相似度\n \"\"\"\n if iid_a == iid_b:\n return float(1)\n if iid_a + 1 > self.matrix.shape[1] or iid_b + 1 > self.matrix.shape[1]:\n return 0\n if self.similarity_matrix is None:\n assert self.matrix is not None, \"Please fit first e.g. model.fit()\"\n self._get_similarity_matrix(self.matrix)\n\n return self.similarity_matrix[iid_a][iid_b]\n\n def fit(self, triad, metric='PCC'):\n \"\"\"训练模型\n\n Args:\n triad (): 数据三元组: (uid, iid, rating)\n metric (): 相似度计算方法, 可选参数: PCC(皮尔逊相关系数), COS(余弦相似度), ACOS(修正的余弦相似度)\n \"\"\"\n self.matrix = triad_to_matrix(triad, self._nan_symbol) # 数据三元组转QoS矩阵\n self.u_mean = nonzero_user_mean(self.matrix,\n self._nan_symbol) # 根据QoS矩阵计算每个用户的评分均值\n # FIXME 考虑i_mean为0的情况\n self.i_mean = nonzero_item_mean(self.matrix,\n self._nan_symbol) # 根据QoS矩阵计算每个项目的评分均值\n self.similarity_matrix = self._get_similarity_matrix(\n self.matrix, metric) # 根据QoS矩阵获取项目相似矩阵\n\n def predict(self, triad, topK=-1):\n y_list = [] # 真实评分\n y_pred_list = [] # 预测评分\n cold_boot_cnt = 0 # 冷启动统计\n\n for row in tqdm(triad, desc=\"Predict... \"):\n uid, iid, rate = int(row[0]), int(row[1]), float(row[2])\n # 冷启动: 新用户因为没有计算过相似用户, 因此无法预测评分\n if iid + 1 > self.matrix.shape[1]:\n cold_boot_cnt += 1\n continue\n i_mean = self.i_mean[iid]\n similarity_items = self._get_similarity_items(iid, topK)\n up = 0 # 分子\n down = 0 # 分母\n # 对于当前项目的每一个相似项目\n for sim_iid in similarity_items:\n sim_item_rate = self.matrix[uid][sim_iid] # 当前用户对相似项目的评分\n similarity = self.get_similarity(iid, sim_iid)\n # 如果当前用户对相似项目没有评分,则不进行计算\n if sim_item_rate == self._nan_symbol:\n continue\n up += similarity * (sim_item_rate - self.i_mean[sim_iid]\n ) # 相似度 * (相似项目评分 - 相似项目评分均值)\n down += similarity # 相似度的绝对值\n\n if down != 0:\n y_pred = i_mean + up / down\n else:\n y_pred = 0\n\n y_pred_list.append(y_pred)\n y_list.append(rate)\n\n print(f\"cold boot :{cold_boot_cnt / len(triad) * 100:4f}%\")\n return y_list, y_pred_list\n\n\ndef adjusted_cosine_similarity(x, y, intersect, u_mean):\n \"\"\"修正的余弦相似度\n\n Returns:\n\n \"\"\"\n n = len(x)\n if n != len(y):\n raise ValueError('x and y must have the same length.')\n if n < 2:\n raise ValueError('x and y must have length at least 2.')\n if len(intersect) < 2:\n raise ValueError('there must be at least two non-zero entries')\n\n x = np.asarray(x)\n y = np.asarray(y)\n\n multiply_sum = sum(\n (x[i] - u_mean[i]) * (y[i] - u_mean[i]) for i in intersect)\n pow_sum_x = sum(math.pow(x[i] - u_mean[i], 2) for i in intersect)\n pow_sum_y = sum(math.pow(y[i] - u_mean[i], 2) for i in intersect)\n\n return multiply_sum / math.sqrt(pow_sum_x * pow_sum_y)\n\n\nif __name__ == \"__main__\":\n triad = np.array([\n [0, 0, 0],\n [0, 1, 0],\n [1, 0, 1],\n [1, 1, 3],\n [1, 2, 4],\n [2, 0, 2],\n [2, 1, 3],\n [2, 2, 5],\n ])\n\n test = np.array([[0, 2, 3]])\n\n ipcc = IPCCModel()\n ipcc.fit(triad)\n ipcc.predict(test, 20)\n"
] | [
[
"numpy.nonzero",
"numpy.asarray",
"scipy.stats.pearsonr",
"sklearn.metrics.pairwise.cosine_similarity",
"numpy.intersect1d",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
XiaoSong9905/tvm | [
"48940f697e15d5b50fa1f032003e6c700ae1e423",
"48940f697e15d5b50fa1f032003e6c700ae1e423",
"48940f697e15d5b50fa1f032003e6c700ae1e423",
"48940f697e15d5b50fa1f032003e6c700ae1e423",
"48940f697e15d5b50fa1f032003e6c700ae1e423",
"48940f697e15d5b50fa1f032003e6c700ae1e423",
"48940f697e15d5b50fa1f032003e6c700ae1e423",
"48940f697e15d5b50fa1f032003e6c700ae1e423",
"48940f697e15d5b50fa1f032003e6c700ae1e423",
"48940f697e15d5b50fa1f032003e6c700ae1e423",
"48940f697e15d5b50fa1f032003e6c700ae1e423",
"48940f697e15d5b50fa1f032003e6c700ae1e423",
"48940f697e15d5b50fa1f032003e6c700ae1e423",
"48940f697e15d5b50fa1f032003e6c700ae1e423"
] | [
"tests/python/relay/test_op_qnn_subtract.py",
"python/tvm/autotvm/feature.py",
"gallery/tutorial/autotvm_relay_x86.py",
"tests/python/contrib/test_ethosn/test_conv2d.py",
"python/tvm/relay/backend/contrib/ethosu/te/inline.py",
"tests/python/contrib/test_rpc_server_device.py",
"python/tvm/relay/backend/contrib/ethosu/te/depthwise.py",
"tests/python/topi/python/test_topi_space_to_batch_nd.py",
"python/tvm/_ffi/runtime_ctypes.py",
"tests/python/relay/test_pass_partition_graph.py",
"python/tvm/relay/op/contrib/ethosu.py",
"gallery/how_to/tune_with_autoscheduler/tune_network_cuda.py",
"python/tvm/topi/testing/roi_pool_python.py",
"tests/python/unittest/test_meta_schedule_feature_extractor_per_store_feature.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport tvm\nimport numpy as np\nfrom tvm import relay\n\n\ndef qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp, data_dtype=\"uint8\"):\n # all x, y and golden outputs should be of the same length\n assert len(x_datas) == len(y_datas)\n assert len(y_datas) == len(golden_outputs)\n\n x = relay.var(\"x\", shape=(1, 4), dtype=data_dtype)\n y = relay.var(\"y\", shape=(1, 4), dtype=data_dtype)\n lhs_scale = relay.const(scale_and_zp[\"lhs_scale\"], \"float32\")\n lhs_zp = relay.const(scale_and_zp[\"lhs_zp\"], \"int32\")\n rhs_scale = relay.const(scale_and_zp[\"rhs_scale\"], \"float32\")\n rhs_zp = relay.const(scale_and_zp[\"rhs_zp\"], \"int32\")\n output_scale = relay.const(scale_and_zp[\"output_scale\"], \"float32\")\n output_zp = relay.const(scale_and_zp[\"output_zp\"], \"int32\")\n z = relay.qnn.op.subtract(\n lhs=x,\n rhs=y,\n lhs_scale=lhs_scale,\n lhs_zero_point=lhs_zp,\n rhs_scale=rhs_scale,\n rhs_zero_point=rhs_zp,\n output_scale=output_scale,\n output_zero_point=output_zp,\n )\n func = relay.Function([x, y], z)\n mod = tvm.IRModule.from_expr(func)\n mod = relay.transform.InferType()(mod)\n mod = relay.qnn.transform.CanonicalizeOps()(mod)\n func = mod[\"main\"]\n for i in range(0, len(x_datas)):\n x_data = x_datas[i]\n y_data = y_datas[i]\n golden_output = golden_outputs[i]\n op_res = relay.create_executor(\"graph\", device=tvm.cpu(0), target=\"llvm\").evaluate(func)(\n x_data, y_data\n )\n np.testing.assert_equal(op_res.numpy(), golden_output)\n\n\ndef test_tflite_same_io_qnn_params():\n scale_and_zp = {\n \"lhs_scale\": 0.00784314,\n \"lhs_zp\": 127,\n \"rhs_scale\": 0.00784314,\n \"rhs_zp\": 127,\n \"output_scale\": 0.00784314,\n \"output_zp\": 127,\n }\n x_datas = [\n np.array((140, 153, 165, 178)).reshape((1, 4)),\n np.array((25, 153, 178, 216)).reshape((1, 4)),\n np.array((25, 153, 216, 165)).reshape((1, 4)),\n ]\n y_datas = [\n np.array((204, 178, 165, 140)).reshape((1, 4)),\n np.array((204, 178, 191, 25)).reshape((1, 4)),\n np.array((204, 178, 25, 191)).reshape((1, 4)),\n ]\n golden_outputs = [\n np.array((63, 102, 127, 165)).reshape((1, 4)),\n np.array((0, 102, 114, 255)).reshape((1, 4)),\n np.array((0, 102, 255, 101)).reshape((1, 4)),\n ]\n qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp)\n\n\ndef test_tflite_different_io_qnn_params():\n scale_and_zp = {\n \"lhs_scale\": 0.0156863,\n \"lhs_zp\": 127,\n \"rhs_scale\": 0.0117647,\n \"rhs_zp\": 85,\n \"output_scale\": 0.0235294,\n \"output_zp\": 128,\n }\n x_datas = [\n np.array((76, 140, 153, 172)).reshape((1, 4)),\n np.array((133, 140, 146, 153)).reshape((1, 4)),\n np.array((76, 140, 172, 146)).reshape((1, 4)),\n ]\n y_datas = [\n np.array((136, 119, 128, 17)).reshape((1, 4)),\n np.array((136, 119, 111, 94)).reshape((1, 4)),\n np.array((136, 119, 17, 128)).reshape((1, 4)),\n ]\n golden_outputs = [\n np.array((68, 120, 123, 192)).reshape((1, 4)),\n np.array((106, 120, 128, 140)).reshape((1, 4)),\n np.array((68, 120, 192, 119)).reshape((1, 4)),\n ]\n qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp)\n\n\ndef test_saturation():\n # Same params\n scale_and_zp = {\n \"lhs_scale\": 0.125,\n \"lhs_zp\": 0,\n \"rhs_scale\": 0.125,\n \"rhs_zp\": 0,\n \"output_scale\": 0.125,\n \"output_zp\": 0,\n }\n x_data = [np.array((255, 1, 1, 0)).reshape((1, 4))]\n y_data = [np.array((255, 255, 128, 0)).reshape((1, 4))]\n golden_output = [np.array((0, 0, 0, 0)).reshape((1, 4))]\n qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)\n\n # Same params, different scale\n scale_and_zp = {\n \"lhs_scale\": 0.125,\n \"lhs_zp\": 0,\n \"rhs_scale\": 0.125,\n \"rhs_zp\": 0,\n \"output_scale\": 0.25,\n \"output_zp\": 0,\n }\n x_data = [np.array((255, 1, 200, 0)).reshape((1, 4))]\n y_data = [np.array((255, 255, 127, 0)).reshape((1, 4))]\n golden_output = [np.array((0, 0, 36, 0)).reshape((1, 4))]\n qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)\n\n # All params different\n scale_and_zp = {\n \"lhs_scale\": 0.5,\n \"lhs_zp\": 0,\n \"rhs_scale\": 0.25,\n \"rhs_zp\": 0,\n \"output_scale\": 0.125,\n \"output_zp\": 0,\n }\n x_data = [np.array((255, 0, 1, 0)).reshape((1, 4))]\n y_data = [np.array((0, 128, 64, 0)).reshape((1, 4))]\n golden_output = [np.array((255, 0, 0, 0)).reshape((1, 4))]\n qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)\n\n\nif __name__ == \"__main__\":\n test_tflite_same_io_qnn_params()\n test_tflite_different_io_qnn_params()\n test_saturation()\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name,\n\"\"\"Extract feature of iter vars\n\nThere are two types of feature\n1) Itervar feature\n This feature is extracted based on loop variables.\n Different loop structures will result in different shapes of feature\n2) Curve sample feature (relation feature)\n This feature is extracted by sampling relation curve.\n This feature is invariant of loop structure.\n\"\"\"\n\nimport struct\nimport numpy as np\nimport tvm._ffi\n\nfrom tvm.target import Target\nfrom tvm.driver import build_module\n\n\ndef ana_lower(sch, args, binds=None, simple_mode=True):\n \"\"\"Do lower while keeping all axes in IR\n i.e. Do not eliminate loop with extent of 1, do not vectorize, unroll or inject virtual threads\n \"\"\"\n sch = sch.normalize()\n # Phase 0\n context = tvm.transform.PassContext(config={\"tir.debug_keep_trivial_loop\": True})\n with context:\n mod = build_module.schedule_to_module(sch, args, binds=binds)\n\n mod = tvm.tir.transform.StorageFlatten(64)(mod._move())\n mod = tvm.tir.transform.Simplify()(mod._move())\n assert simple_mode\n return mod[\"main\"].body\n\n\ntry:\n _get_buffer_curve_sample_flatten = tvm._ffi.get_global_func(\n \"autotvm.feature.GetCurveSampleFeatureFlatten\"\n )\n _get_itervar_feature = tvm._ffi.get_global_func(\"autotvm.feature.GetItervarFeature\")\n _get_itervar_feature_flatten = tvm._ffi.get_global_func(\n \"autotvm.feature.GetItervarFeatureFlatten\"\n )\nexcept ValueError as e:\n\n def raise_error(*args, **kwargs): # pylint: disable=unused-argument\n raise RuntimeError(\"Cannot load autotvm c++ API\")\n\n _get_buffer_curve_sample_flatten = (\n _get_itervar_feature\n ) = _get_itervar_feature_flatten = raise_error\n\n\ndef get_itervar_feature(sch, args, take_log=False):\n \"\"\"get features of iter vars\n\n Parameters\n ----------\n sch: tvm.te.schedule.Schedule\n args: Array of te.tensor.Tensor\n the buffer args for lower\n take_log: bool\n whether take log of numerical statics\n\n Returns\n -------\n features of every axis in the IR, see doc/features.md for detail\n \"\"\"\n stmt = ana_lower(sch, args, simple_mode=True)\n feas = _get_itervar_feature(stmt, take_log)\n\n # convert tvm node to python type\n ret = []\n for row in feas:\n tmp = []\n tmp.append([row[0][0].value, row[0][1]])\n for item in row[1:]:\n tmp.append([item[0].value] + [x.value for x in item[1:]])\n ret.append(tmp)\n return ret\n\n\ndef flatten_itervar_feature(fea):\n \"\"\"flatten features into one-dimensional feature vectors\n\n Parameters\n ----------\n fea: list\n return value of get_itervar_feature\n\n Returns\n -------\n flatten_feature: np.ndarray\n one-dimensional vector\n \"\"\"\n flatten = []\n for axis in fea:\n for pair in axis[1:]:\n flatten.append(pair[1:])\n return np.concatenate(flatten)\n\n\ndef get_itervar_feature_flatten(sch, args, take_log=True):\n \"\"\"get flatten features of iter vars\n this is equivalent to get_itervar_feature + flatten_itervar_feature, but much faster.\n\n Parameters\n ----------\n sch: tvm.te.schedule.Schedule\n args: Array of te.tensor.Tensor\n the buffer args for lower\n take_log: bool\n whether take log of numerical statics\n\n Returns\n -------\n flatten_feature: np.ndarray\n one-dimensional vector\n \"\"\"\n stmt = ana_lower(sch, args, simple_mode=True)\n feas = _get_itervar_feature_flatten(stmt, take_log)\n feas = struct.unpack(\"%df\" % (len(feas) // 4), feas)\n return feas\n\n\ndef get_flatten_name(fea):\n \"\"\"Get names of feature after flatten.\n\n Parameters\n ----------\n fea: list or str\n return value of get_itervar_feature or a line of logfile\n\n Returns\n -------\n feature_names: Array of str\n \"\"\"\n\n feature_name = {\n \"_attr_\": [\"length\", \"nest_level\", \"topdown\", \"bottomup\"]\n + [\"ann_%d\" % i for i in range(20)],\n \"_arith_\": [\"add\", \"mul\", \"div\"],\n \"buf_touch\": [\"stride\", \"mod\", \"count\", \"reuse\", \"T_count\", \"T_reuse\"],\n }\n\n if isinstance(fea, str):\n # pylint: disable=import-outside-toplevel\n from .record import decode\n\n # flatten line to feature\n line = fea\n ret = decode(line)\n if ret is None:\n raise ValueError(\"Unsupported AutoTVM log format\")\n inp, _ = ret\n target = Target(inp.target)\n with target:\n s, args = inp.template.instantiate(inp.config)\n fea = get_itervar_feature(s, args)\n\n names = []\n ct = 0\n for row in fea:\n var_name = str(row[0][1])\n for pair in row[1:]:\n key = pair[0]\n if key in feature_name:\n name_list = feature_name[key]\n else:\n name_list = feature_name[\"buf_touch\"]\n\n for i in range(len((pair[1:]))):\n names.append(\".\".join([\"f%d\" % ct, var_name, key, name_list[i]]))\n ct += 1\n return names\n\n\ndef get_buffer_curve_sample_flatten(sch, args, sample_n=30):\n \"\"\"\n Get flatten curve sample feature (relation feature)\n\n Parameters\n ----------\n sch: tvm.te.schedule.Schedule\n args: Array of te.tensor.Tensor\n the buffer args for lower\n sample_n: int\n number of sample points along one dimension\n\n Returns\n -------\n flatten_feature: np.ndarray\n one-dimensional vector\n \"\"\"\n stmt = ana_lower(sch, args, simple_mode=True)\n feas = _get_buffer_curve_sample_flatten(stmt, sample_n, False)\n feas = struct.unpack(\"%df\" % (len(feas) // 4), feas)\n return feas\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"\nCompiling and Optimizing a Model with the Python Interface (AutoTVM)\n====================================================================\n**Author**:\n`Chris Hoge <https://github.com/hogepodge>`_\n\nIn the `TVMC Tutorial <tvmc_command_line_driver>`_, we covered how to compile, run, and tune a\npre-trained vision model, ResNet-50 v2 using the command line interface for\nTVM, TVMC. TVM is more that just a command-line tool though, it is an\noptimizing framework with APIs available for a number of different languages\nthat gives you tremendous flexibility in working with machine learning models.\n\nIn this tutorial we will cover the same ground we did with TVMC, but show how\nit is done with the Python API. Upon completion of this section, we will have\nused the Python API for TVM to accomplish the following tasks:\n\n* Compile a pre-trained ResNet-50 v2 model for the TVM runtime.\n* Run a real image through the compiled model, and interpret the output and model\n performance.\n* Tune the model that model on a CPU using TVM.\n* Re-compile an optimized model using the tuning data collected by TVM.\n* Run the image through the optimized model, and compare the output and model\n performance.\n\nThe goal of this section is to give you an overview of TVM's capabilites and\nhow to use them through the Python API.\n\"\"\"\n\n################################################################################\n# TVM is a deep learning compiler framework, with a number of different modules\n# available for working with deep learning models and operators. In this\n# tutorial we will work through how to load, compile, and optimize a model\n# using the Python API.\n#\n# We begin by importing a number of dependencies, including ``onnx`` for\n# loading and converting the model, helper utilities for downloading test data,\n# the Python Image Library for working with the image data, ``numpy`` for pre\n# and post-processing of the image data, the TVM Relay framework, and the TVM\n# Graph Executor.\n\nimport onnx\nfrom tvm.contrib.download import download_testdata\nfrom PIL import Image\nimport numpy as np\nimport tvm.relay as relay\nimport tvm\nfrom tvm.contrib import graph_executor\n\n################################################################################\n# Downloading and Loading the ONNX Model\n# --------------------------------------\n#\n# For this tutorial, we will be working with ResNet-50 v2. ResNet-50 is a\n# convolutional neural network that is 50 layers deep and designed to classify\n# images. The model we will be using has been pre-trained on more than a\n# million images with 1000 different classifications. The network has an input\n# image size of 224x224. If you are interested exploring more of how the\n# ResNet-50 model is structured, we recommend downloading\n# `Netron <https://netron.app>`_, a freely available ML model viewer.\n#\n# TVM provides a helper library to download pre-trained models. By providing a\n# model URL, file name, and model type through the module, TVM will download\n# the model and save it to disk. For the instance of an ONNX model, you can\n# then load it into memory using the ONNX runtime.\n#\n# .. admonition:: Working with Other Model Formats\n#\n# TVM supports many popular model formats. A list can be found in the\n# :ref:`Compile Deep Learning Models <tutorial-frontend>` section of the TVM\n# Documentation.\n\nmodel_url = (\n \"https://github.com/onnx/models/raw/main/\"\n \"vision/classification/resnet/model/\"\n \"resnet50-v2-7.onnx\"\n)\n\nmodel_path = download_testdata(model_url, \"resnet50-v2-7.onnx\", module=\"onnx\")\nonnx_model = onnx.load(model_path)\n\n################################################################################\n# Downloading, Preprocessing, and Loading the Test Image\n# ------------------------------------------------------\n#\n# Each model is particular when it comes to expected tensor shapes, formats and\n# data types. For this reason, most models require some pre and\n# post-processing, to ensure the input is valid and to interpret the output.\n# TVMC has adopted NumPy's ``.npz`` format for both input and output data.\n#\n# As input for this tutorial, we will use the image of a cat, but you can feel\n# free to substitute this image for any of your choosing.\n#\n# .. image:: https://s3.amazonaws.com/model-server/inputs/kitten.jpg\n# :height: 224px\n# :width: 224px\n# :align: center\n#\n# Download the image data, then convert it to a numpy array to use as an input to the model.\n\nimg_url = \"https://s3.amazonaws.com/model-server/inputs/kitten.jpg\"\nimg_path = download_testdata(img_url, \"imagenet_cat.png\", module=\"data\")\n\n# Resize it to 224x224\nresized_image = Image.open(img_path).resize((224, 224))\nimg_data = np.asarray(resized_image).astype(\"float32\")\n\n# Our input image is in HWC layout while ONNX expects CHW input, so convert the array\nimg_data = np.transpose(img_data, (2, 0, 1))\n\n# Normalize according to the ImageNet input specification\nimagenet_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))\nimagenet_stddev = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))\nnorm_img_data = (img_data / 255 - imagenet_mean) / imagenet_stddev\n\n# Add the batch dimension, as we are expecting 4-dimensional input: NCHW.\nimg_data = np.expand_dims(norm_img_data, axis=0)\n\n###############################################################################\n# Compile the Model With Relay\n# ----------------------------\n#\n# The next step is to compile the ResNet model. We begin by importing the model\n# to relay using the `from_onnx` importer. We then build the model, with\n# standard optimizations, into a TVM library. Finally, we create a TVM graph\n# runtime module from the library.\n\ntarget = \"llvm\"\n\n######################################################################\n# .. admonition:: Defining the Correct Target\n#\n# Specifying the correct target can have a huge impact on the performance of\n# the compiled module, as it can take advantage of hardware features\n# available on the target. For more information, please refer to\n# :ref:`Auto-tuning a convolutional network for x86 CPU <tune_relay_x86>`.\n# We recommend identifying which CPU you are running, along with optional\n# features, and set the target appropriately. For example, for some\n# processors ``target = \"llvm -mcpu=skylake\"``, or ``target = \"llvm\n# -mcpu=skylake-avx512\"`` for processors with the AVX-512 vector instruction\n# set.\n#\n\n# The input name may vary across model types. You can use a tool\n# like Netron to check input names\ninput_name = \"data\"\nshape_dict = {input_name: img_data.shape}\n\nmod, params = relay.frontend.from_onnx(onnx_model, shape_dict)\n\nwith tvm.transform.PassContext(opt_level=3):\n lib = relay.build(mod, target=target, params=params)\n\ndev = tvm.device(str(target), 0)\nmodule = graph_executor.GraphModule(lib[\"default\"](dev))\n\n######################################################################\n# Execute on the TVM Runtime\n# --------------------------\n# Now that we've compiled the model, we can use the TVM runtime to make\n# predictions with it. To use TVM to run the model and make predictions, we\n# need two things:\n#\n# - The compiled model, which we just produced.\n# - Valid input to the model to make predictions on.\n\ndtype = \"float32\"\nmodule.set_input(input_name, img_data)\nmodule.run()\noutput_shape = (1, 1000)\ntvm_output = module.get_output(0, tvm.nd.empty(output_shape)).numpy()\n\n################################################################################\n# Collect Basic Performance Data\n# ------------------------------\n# We want to collect some basic performance data associated with this\n# unoptimized model and compare it to a tuned model later. To help account for\n# CPU noise, we run the computation in multiple batches in multiple\n# repetitions, then gather some basis statistics on the mean, median, and\n# standard deviation.\nimport timeit\n\ntiming_number = 10\ntiming_repeat = 10\nunoptimized = (\n np.array(timeit.Timer(lambda: module.run()).repeat(repeat=timing_repeat, number=timing_number))\n * 1000\n / timing_number\n)\nunoptimized = {\n \"mean\": np.mean(unoptimized),\n \"median\": np.median(unoptimized),\n \"std\": np.std(unoptimized),\n}\n\nprint(unoptimized)\n\n################################################################################\n# Postprocess the output\n# ----------------------\n#\n# As previously mentioned, each model will have its own particular way of\n# providing output tensors.\n#\n# In our case, we need to run some post-processing to render the outputs from\n# ResNet-50 v2 into a more human-readable form, using the lookup-table provided\n# for the model.\n\nfrom scipy.special import softmax\n\n# Download a list of labels\nlabels_url = \"https://s3.amazonaws.com/onnx-model-zoo/synset.txt\"\nlabels_path = download_testdata(labels_url, \"synset.txt\", module=\"data\")\n\nwith open(labels_path, \"r\") as f:\n labels = [l.rstrip() for l in f]\n\n# Open the output and read the output tensor\nscores = softmax(tvm_output)\nscores = np.squeeze(scores)\nranks = np.argsort(scores)[::-1]\nfor rank in ranks[0:5]:\n print(\"class='%s' with probability=%f\" % (labels[rank], scores[rank]))\n\n################################################################################\n# This should produce the following output:\n#\n# .. code-block:: bash\n#\n# # class='n02123045 tabby, tabby cat' with probability=0.610553\n# # class='n02123159 tiger cat' with probability=0.367179\n# # class='n02124075 Egyptian cat' with probability=0.019365\n# # class='n02129604 tiger, Panthera tigris' with probability=0.001273\n# # class='n04040759 radiator' with probability=0.000261\n\n################################################################################\n# Tune the model\n# --------------\n# The previous model was compiled to work on the TVM runtime, but did not\n# include any platform specific optimization. In this section, we will show you\n# how to build an optimized model using TVM to target your working platform.\n#\n# In some cases, we might not get the expected performance when running\n# inferences using our compiled module. In cases like this, we can make use of\n# the auto-tuner, to find a better configuration for our model and get a boost\n# in performance. Tuning in TVM refers to the process by which a model is\n# optimized to run faster on a given target. This differs from training or\n# fine-tuning in that it does not affect the accuracy of the model, but only\n# the runtime performance. As part of the tuning process, TVM will try running\n# many different operator implementation variants to see which perform best.\n# The results of these runs are stored in a tuning records file.\n#\n# In the simplest form, tuning requires you to provide three things:\n#\n# - the target specification of the device you intend to run this model on\n# - the path to an output file in which the tuning records will be stored\n# - a path to the model to be tuned.\n#\n\nimport tvm.auto_scheduler as auto_scheduler\nfrom tvm.autotvm.tuner import XGBTuner\nfrom tvm import autotvm\n\n################################################################################\n# Set up some basic parameters for the runner. The runner takes compiled code\n# that is generated with a specific set of parameters and measures the\n# performance of it. ``number`` specifies the number of different\n# configurations that we will test, while ``repeat`` specifies how many\n# measurements we will take of each configuration. ``min_repeat_ms`` is a value\n# that specifies how long need to run configuration test. If the number of\n# repeats falls under this time, it will be increased. This option is necessary\n# for accurate tuning on GPUs, and is not required for CPU tuning. Setting this\n# value to 0 disables it. The ``timeout`` places an upper limit on how long to\n# run training code for each tested configuration.\n\nnumber = 10\nrepeat = 1\nmin_repeat_ms = 0 # since we're tuning on a CPU, can be set to 0\ntimeout = 10 # in seconds\n\n# create a TVM runner\nrunner = autotvm.LocalRunner(\n number=number,\n repeat=repeat,\n timeout=timeout,\n min_repeat_ms=min_repeat_ms,\n enable_cpu_cache_flush=True,\n)\n\n################################################################################\n# Create a simple structure for holding tuning options. We use an XGBoost\n# algorithim for guiding the search. For a production job, you will want to set\n# the number of trials to be larger than the value of 10 used here. For CPU we\n# recommend 1500, for GPU 3000-4000. The number of trials required can depend\n# on the particular model and processor, so it's worth spending some time\n# evaluating performance across a range of values to find the best balance\n# between tuning time and model optimization. Because running tuning is time\n# intensive we set number of trials to 10, but do not recommend a value this\n# small. The ``early_stopping`` parameter is the minimum number of trails to\n# run before a condition that stops the search early can be applied. The\n# measure option indicates where trial code will be built, and where it will be\n# run. In this case, we're using the ``LocalRunner`` we just created and a\n# ``LocalBuilder``. The ``tuning_records`` option specifies a file to write\n# the tuning data to.\n\ntuning_option = {\n \"tuner\": \"xgb\",\n \"trials\": 10,\n \"early_stopping\": 100,\n \"measure_option\": autotvm.measure_option(\n builder=autotvm.LocalBuilder(build_func=\"default\"), runner=runner\n ),\n \"tuning_records\": \"resnet-50-v2-autotuning.json\",\n}\n\n################################################################################\n# .. admonition:: Defining the Tuning Search Algorithm\n#\n# By default this search is guided using an `XGBoost Grid` algorithm.\n# Depending on your model complexity and amount of time available, you might\n# want to choose a different algorithm.\n\n\n################################################################################\n# .. admonition:: Setting Tuning Parameters\n#\n# In this example, in the interest of time, we set the number of trials and\n# early stopping to 10. You will likely see more performance improvements if\n# you set these values to be higher but this comes at the expense of time\n# spent tuning. The number of trials required for convergence will vary\n# depending on the specifics of the model and the target platform.\n\n# begin by extracting the tasks from the onnx model\ntasks = autotvm.task.extract_from_program(mod[\"main\"], target=target, params=params)\n\n# Tune the extracted tasks sequentially.\nfor i, task in enumerate(tasks):\n prefix = \"[Task %2d/%2d] \" % (i + 1, len(tasks))\n tuner_obj = XGBTuner(task, loss_type=\"rank\")\n tuner_obj.tune(\n n_trial=min(tuning_option[\"trials\"], len(task.config_space)),\n early_stopping=tuning_option[\"early_stopping\"],\n measure_option=tuning_option[\"measure_option\"],\n callbacks=[\n autotvm.callback.progress_bar(tuning_option[\"trials\"], prefix=prefix),\n autotvm.callback.log_to_file(tuning_option[\"tuning_records\"]),\n ],\n )\n\n################################################################################\n# The output from this tuning process will look something like this:\n#\n# .. code-block:: bash\n#\n# # [Task 1/24] Current/Best: 10.71/ 21.08 GFLOPS | Progress: (60/1000) | 111.77 s Done.\n# # [Task 1/24] Current/Best: 9.32/ 24.18 GFLOPS | Progress: (192/1000) | 365.02 s Done.\n# # [Task 2/24] Current/Best: 22.39/ 177.59 GFLOPS | Progress: (960/1000) | 976.17 s Done.\n# # [Task 3/24] Current/Best: 32.03/ 153.34 GFLOPS | Progress: (800/1000) | 776.84 s Done.\n# # [Task 4/24] Current/Best: 11.96/ 156.49 GFLOPS | Progress: (960/1000) | 632.26 s Done.\n# # [Task 5/24] Current/Best: 23.75/ 130.78 GFLOPS | Progress: (800/1000) | 739.29 s Done.\n# # [Task 6/24] Current/Best: 38.29/ 198.31 GFLOPS | Progress: (1000/1000) | 624.51 s Done.\n# # [Task 7/24] Current/Best: 4.31/ 210.78 GFLOPS | Progress: (1000/1000) | 701.03 s Done.\n# # [Task 8/24] Current/Best: 50.25/ 185.35 GFLOPS | Progress: (972/1000) | 538.55 s Done.\n# # [Task 9/24] Current/Best: 50.19/ 194.42 GFLOPS | Progress: (1000/1000) | 487.30 s Done.\n# # [Task 10/24] Current/Best: 12.90/ 172.60 GFLOPS | Progress: (972/1000) | 607.32 s Done.\n# # [Task 11/24] Current/Best: 62.71/ 203.46 GFLOPS | Progress: (1000/1000) | 581.92 s Done.\n# # [Task 12/24] Current/Best: 36.79/ 224.71 GFLOPS | Progress: (1000/1000) | 675.13 s Done.\n# # [Task 13/24] Current/Best: 7.76/ 219.72 GFLOPS | Progress: (1000/1000) | 519.06 s Done.\n# # [Task 14/24] Current/Best: 12.26/ 202.42 GFLOPS | Progress: (1000/1000) | 514.30 s Done.\n# # [Task 15/24] Current/Best: 31.59/ 197.61 GFLOPS | Progress: (1000/1000) | 558.54 s Done.\n# # [Task 16/24] Current/Best: 31.63/ 206.08 GFLOPS | Progress: (1000/1000) | 708.36 s Done.\n# # [Task 17/24] Current/Best: 41.18/ 204.45 GFLOPS | Progress: (1000/1000) | 736.08 s Done.\n# # [Task 18/24] Current/Best: 15.85/ 222.38 GFLOPS | Progress: (980/1000) | 516.73 s Done.\n# # [Task 19/24] Current/Best: 15.78/ 203.41 GFLOPS | Progress: (1000/1000) | 587.13 s Done.\n# # [Task 20/24] Current/Best: 30.47/ 205.92 GFLOPS | Progress: (980/1000) | 471.00 s Done.\n# # [Task 21/24] Current/Best: 46.91/ 227.99 GFLOPS | Progress: (308/1000) | 219.18 s Done.\n# # [Task 22/24] Current/Best: 13.33/ 207.66 GFLOPS | Progress: (1000/1000) | 761.74 s Done.\n# # [Task 23/24] Current/Best: 53.29/ 192.98 GFLOPS | Progress: (1000/1000) | 799.90 s Done.\n# # [Task 24/24] Current/Best: 25.03/ 146.14 GFLOPS | Progress: (1000/1000) | 1112.55 s Done.\n\n################################################################################\n# Compiling an Optimized Model with Tuning Data\n# ----------------------------------------------\n#\n# As an output of the tuning process above, we obtained the tuning records\n# stored in ``resnet-50-v2-autotuning.json``. The compiler will use the results to\n# generate high performance code for the model on your specified target.\n#\n# Now that tuning data for the model has been collected, we can re-compile the\n# model using optimized operators to speed up our computations.\n\nwith autotvm.apply_history_best(tuning_option[\"tuning_records\"]):\n with tvm.transform.PassContext(opt_level=3, config={}):\n lib = relay.build(mod, target=target, params=params)\n\ndev = tvm.device(str(target), 0)\nmodule = graph_executor.GraphModule(lib[\"default\"](dev))\n\n################################################################################\n# Verify that the optimized model runs and produces the same results:\n\ndtype = \"float32\"\nmodule.set_input(input_name, img_data)\nmodule.run()\noutput_shape = (1, 1000)\ntvm_output = module.get_output(0, tvm.nd.empty(output_shape)).numpy()\n\nscores = softmax(tvm_output)\nscores = np.squeeze(scores)\nranks = np.argsort(scores)[::-1]\nfor rank in ranks[0:5]:\n print(\"class='%s' with probability=%f\" % (labels[rank], scores[rank]))\n\n################################################################################\n# Verifying that the predictions are the same:\n#\n# .. code-block:: bash\n#\n# # class='n02123045 tabby, tabby cat' with probability=0.610550\n# # class='n02123159 tiger cat' with probability=0.367181\n# # class='n02124075 Egyptian cat' with probability=0.019365\n# # class='n02129604 tiger, Panthera tigris' with probability=0.001273\n# # class='n04040759 radiator' with probability=0.000261\n\n################################################################################\n# Comparing the Tuned and Untuned Models\n# --------------------------------------\n# We want to collect some basic performance data associated with this optimized\n# model to compare it to the unoptimized model. Depending on your underlying\n# hardware, number of iterations, and other factors, you should see a performance\n# improvement in comparing the optimized model to the unoptimized model.\n\nimport timeit\n\ntiming_number = 10\ntiming_repeat = 10\noptimized = (\n np.array(timeit.Timer(lambda: module.run()).repeat(repeat=timing_repeat, number=timing_number))\n * 1000\n / timing_number\n)\noptimized = {\"mean\": np.mean(optimized), \"median\": np.median(optimized), \"std\": np.std(optimized)}\n\n\nprint(\"optimized: %s\" % (optimized))\nprint(\"unoptimized: %s\" % (unoptimized))\n\n################################################################################\n# Final Remarks\n# -------------\n#\n# In this tutorial, we gave a short example of how to use the TVM Python API\n# to compile, run, and tune a model. We also discussed the need for pre and\n# post-processing of inputs and outputs. After the tuning process, we\n# demonstrated how to compare the performance of the unoptimized and optimize\n# models.\n#\n# Here we presented a simple example using ResNet-50 v2 locally. However, TVM\n# supports many more features including cross-compilation, remote execution and\n# profiling/benchmarking.\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Arm(R) Ethos(TM)-N integration conv2d tests\"\"\"\n\nimport numpy as np\nimport pytest\nimport math\nimport tvm\nfrom tvm import relay\nfrom tvm.testing import requires_ethosn\nfrom . import infrastructure as tei\n\n\ndef _get_same_padding(data, kernel, dilation, stride):\n dilated_kernel_h = dilation[0] * (kernel[0] - 1) + 1\n dilated_kernel_w = dilation[1] * (kernel[1] - 1) + 1\n out = int(math.ceil(float(data[0]) / float(stride[0])))\n pad = max(0, (out - 1) * stride[0] + dilated_kernel_h - data[0])\n pad_top = pad // 2\n pad_bottom = pad - pad_top\n\n out = int(math.ceil(float(data[1]) / float(stride[1])))\n pad = max(0, (out - 1) * stride[1] + dilated_kernel_w - data[1])\n pad_left = pad // 2\n pad_right = pad - pad_left\n return [pad_top, pad_left, pad_bottom, pad_right]\n\n\ndef _get_model(\n shape,\n kernel_h,\n kernel_w,\n input_zp,\n input_sc,\n kernel_zp,\n kernel_sc,\n output_zp,\n output_sc,\n pad,\n strides,\n dilation,\n groups,\n dtype,\n out_channels,\n weight_format,\n):\n \"\"\"Return a model and any parameters it may have\"\"\"\n a = relay.var(\"a\", shape=shape, dtype=dtype)\n if pad == \"op\" or pad == \"both\":\n p = _get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w), dilation, strides)\n a = relay.nn.pad(\n a,\n pad_width=[(0, 0), (p[0], p[2]), (p[1], p[3]), (0, 0)],\n pad_value=input_zp,\n pad_mode=\"constant\",\n )\n shape = (shape[0], shape[1] + p[0] + p[2], shape[2] + p[1] + p[3], shape[3])\n\n p = _get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w), dilation, strides)\n if weight_format == \"HWIO\":\n weight_shape = (kernel_h, kernel_w, shape[3] // groups, out_channels)\n else:\n weight_shape = (kernel_h, kernel_w, out_channels, 1)\n w = tvm.nd.array(\n np.random.randint(\n np.iinfo(dtype).min, high=np.iinfo(dtype).max + 1, size=weight_shape, dtype=dtype\n )\n )\n weights = relay.const(w, dtype)\n conv = relay.qnn.op.conv2d(\n a,\n weights,\n input_zero_point=relay.const(input_zp, \"int32\"),\n kernel_zero_point=relay.const(kernel_zp, \"int32\"),\n input_scale=relay.const(input_sc, \"float32\"),\n kernel_scale=relay.const(kernel_sc, \"float32\"),\n kernel_size=(kernel_h, kernel_w),\n data_layout=\"NHWC\",\n kernel_layout=weight_format,\n dilation=dilation,\n strides=strides,\n groups=groups,\n channels=out_channels,\n padding=p if pad == \"attr\" or pad == \"both\" else (0, 0, 0, 0),\n out_dtype=\"int32\",\n )\n b = tvm.nd.array(\n np.random.randint(\n np.iinfo(dtype).min, high=np.iinfo(dtype).max + 1, size=(out_channels,), dtype=\"int32\"\n )\n )\n biasc = relay.const(b, \"int32\")\n bias = relay.nn.bias_add(conv, biasc, axis=3)\n if isinstance(kernel_sc, tvm.runtime.ndarray.NDArray):\n req_input_sc = [sc * input_sc for sc in kernel_sc.numpy()]\n else:\n req_input_sc = input_sc * kernel_sc\n req = relay.qnn.op.requantize(\n bias,\n relay.const(req_input_sc, \"float32\"), # input zero scale\n relay.const(0, \"int32\"), # input zero point\n relay.const(output_sc, \"float32\"), # output zero scale\n relay.const(output_zp, \"int32\"), # output zero point\n out_dtype=dtype,\n )\n params = {\"w\": w, \"b\": b}\n return req, params\n\n\n@requires_ethosn\[email protected](\"depthwise\", [False, True])\[email protected](\"dtype\", [\"uint8\", \"int8\"])\ndef test_conv2d(dtype, depthwise):\n trials = [\n [(1, 17, 20, 26), 4, 3, 1, \"attr\", (2, 2), (1, 1), False],\n [(1, 30, 27, 30), 5, 5, 3, \"none\", (1, 1), (1, 1), False],\n [(1, 30, 27, 30), 5, 5, 3, \"none\", (1, 1), (1, 1), dtype == \"int8\"],\n [(1, 14, 28, 11), 6, 2, 2, \"op\", (2, 2), (1, 1), False],\n [(1, 9, 20, 30), 7, 1, 5, \"none\", (1, 1), (1, 1), False],\n [(1, 21, 21, 22), 8, 5, 1, \"attr\", (2, 2), (1, 1), False],\n [(1, 21, 21, 22), 8, 5, 1, \"attr\", (2, 2), (1, 1), dtype == \"int8\"],\n [(1, 21, 25, 29), 9, 2, 5, \"op\", (1, 1), (1, 1), False],\n [(1, 21, 25, 29), 9, 2, 5, \"op\", (1, 1), (1, 1), dtype == \"int8\"],\n [(1, 31, 28, 15), 10, 1, 2, \"attr\", (2, 2), (1, 1), False],\n [(1, 21, 21, 8), 11, 3, 3, \"none\", (1, 1), (1, 1), False],\n [(1, 5, 11, 6), 12, 5, 2, \"op\", (2, 2), (1, 1), False],\n [(1, 12, 7, 18), 13, 1, 3, \"op\", (1, 1), (1, 1), False],\n [(1, 24, 6, 26), 14, 3, 5, \"none\", (2, 2), (1, 1), False],\n [(1, 19, 24, 16), 15, 2, 1, \"attr\", (1, 1), (1, 1), False],\n ]\n\n np.random.seed(0)\n for shape, out_channels, kernel_h, kernel_w, pad, stride, dilation, qnn_per_channel in trials:\n if depthwise:\n out_channels = shape[3]\n groups = out_channels\n kernel_w = kernel_h\n weight_format = \"HWOI\"\n stride = (1, 1) if kernel_w == 1 else (2, 2)\n else:\n groups = 1\n weight_format = \"HWIO\"\n\n outputs = []\n inputs = {\n \"a\": tvm.nd.array(\n np.random.randint(\n np.iinfo(dtype).min,\n np.iinfo(dtype).max + 1,\n size=shape,\n dtype=dtype,\n )\n ),\n }\n input_zp = np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max)\n input_sc = np.random.random() * 2\n if qnn_per_channel:\n kernel_sc = tvm.nd.array(\n np.random.uniform(low=0, high=2, size=(out_channels,)).astype(np.float32)\n )\n else:\n kernel_sc = np.random.random() * 2\n kernel_zp = (\n 0 if dtype == \"int8\" else np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max)\n )\n output_zp, output_sc = tei.get_conv2d_qnn_params(\n dtype, input_zp, input_sc, kernel_zp, kernel_sc, kernel_h, kernel_w, shape[3]\n )\n model, params = _get_model(\n shape,\n kernel_h,\n kernel_w,\n input_zp,\n input_sc,\n kernel_zp,\n kernel_sc,\n output_zp,\n output_sc,\n pad,\n stride,\n dilation,\n groups,\n dtype,\n out_channels,\n weight_format,\n )\n for npu in [False, True]:\n mod = tei.make_module(model, params)\n outputs.append(tei.build_and_run(mod, inputs, 1, params, npu=npu))\n\n tei.verify(outputs, dtype, 1)\n\n\n@requires_ethosn\ndef test_conv2d_failure():\n lb = \"2.328306e-10\" if tei.get_ethosn_api_version() > 2102 else \"0\"\n trials = [\n (\n (1, 4, 4, 4),\n 1,\n 1,\n 0,\n 1,\n 0,\n 1,\n 0,\n 1,\n \"none\",\n (1, 1),\n (1, 1),\n 1,\n \"uint8\",\n 8,\n \"HWIO\",\n f\"Overall scale (of the input * weights / output) should be in the range [{lb}, 1)\",\n ),\n (\n (1, 4, 4, 4),\n 2,\n 2,\n 0,\n 1,\n 0,\n 1,\n 0,\n 2,\n \"both\",\n (1, 1),\n (1, 1),\n 1,\n \"uint8\",\n 8,\n \"HWIO\",\n \"both op and attr padding exist, must be either op/attr only or no padding\",\n ),\n (\n (1, 4, 4, 4),\n 1,\n 1,\n 0,\n 1,\n 0,\n 1,\n 0,\n 2,\n \"none\",\n (1, 1, 1),\n (1, 1),\n 1,\n \"uint8\",\n 8,\n \"HWIO\",\n \"stride size=3, stride size must = 2\",\n ),\n (\n (1, 4, 4, 4),\n 1,\n 1,\n 0,\n 1,\n 0,\n 1,\n 0,\n 2,\n \"none\",\n (1, 1),\n (2, 1),\n 1,\n \"uint8\",\n 8,\n \"HWIO\",\n \"dilation=[2, 1], dilation must = [1, 1]\",\n ),\n (\n (2, 4, 4, 4),\n 1,\n 1,\n 0,\n 1,\n 0,\n 1,\n 0,\n 2,\n \"none\",\n (1, 1),\n (1, 1),\n 1,\n \"uint8\",\n 8,\n \"HWIO\",\n \"batch size=2, batch size must = 1\",\n ),\n ]\n\n np.random.seed(0)\n for (\n shape,\n kernel_h,\n kernel_w,\n input_zp,\n input_sc,\n kernel_zp,\n kernel_sc,\n output_zp,\n output_sc,\n pad,\n stride,\n dilation,\n groups,\n dtype,\n out_channels,\n weight_format,\n err_msg,\n ) in trials:\n model, params = _get_model(\n shape,\n kernel_h,\n kernel_w,\n input_zp,\n input_sc,\n kernel_zp,\n kernel_sc,\n output_zp,\n output_sc,\n pad,\n stride,\n dilation,\n groups,\n dtype,\n out_channels,\n weight_format,\n )\n model = tei.make_ethosn_composite(model, \"ethos-n.qnn_conv2d\")\n mod = tei.make_ethosn_partition(model)\n tei.test_error(mod, {}, err_msg)\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=unused-argument\n\"\"\"Tensor Expressions for operations that will be inlined\"\"\"\nimport numpy as np # type: ignore\n\nfrom tvm.contrib.ethosu.cascader import TESubgraph, InlinePart, Propagator, register_matcher\n\n\nINLINE_OPS = {\"T_reshape\", \"T_strided_slice\"}\n\n\n@register_matcher\ndef match_ethosu_inline(output_tensor, device_config):\n \"\"\"Match a Tensor Expression corresponding to an operator that will be inlined.\n\n If the Tensor Expression matches, an InlinePart will be created that models the\n matched Tensor Expression. Otherwise, None will be returned. This matcher is\n naive and assumes nothing about the compute of the Tensor Expression. Therefore,\n the resulting InlinePart will have full-tensor dependencies (i.e. each output\n element depends on every input element).\n\n Parameters\n ----------\n output_tensor : tvm.te.Tensor\n The tensor to attempt to match with.\n device_config : EthosuDeviceConfig\n Target device configuration\n\n Returns\n -------\n Union[None, InlinePart]\n The created InlinePart if there was a match, otherwise None.\n\n \"\"\"\n if output_tensor.op.name not in INLINE_OPS:\n return None\n\n input_tensors = output_tensor.op.input_tensors\n propagators = []\n output_dims = len(output_tensor.shape)\n for input_tensor in input_tensors:\n input_dims = len(input_tensor.shape)\n transform_matrix = np.zeros((input_dims + 1, output_dims + 1))\n for i, axis in enumerate(input_tensor.shape):\n transform_matrix[i, output_dims] = int(axis)\n transform_matrix[input_dims, output_dims] = 1\n offset_vector = np.zeros(input_dims, dtype=\"int64\")\n propagators.append(\n Propagator(\n transform_matrix.tolist(),\n offset_vector.tolist(),\n )\n )\n\n subgraph = TESubgraph(input_tensors, output_tensor)\n return InlinePart(\n subgraph,\n propagators,\n )\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"iOS RPC Server tests.\"\"\"\n# pylint: disable=invalid-name, no-value-for-parameter, missing-function-docstring, import-error\nimport sys\nimport multiprocessing\nimport pytest\nimport numpy as np\n\nimport tvm.testing\nimport tvm.relay.testing\nfrom tvm import te\nfrom tvm import rpc\nfrom tvm import relay, auto_scheduler\nfrom tvm.contrib import utils, xcode, graph_executor\nfrom tvm.autotvm.measure import request_remote\nfrom tvm.auto_scheduler.measure_record import load_records\nfrom tvm.auto_scheduler.measure import MeasureErrorNo\nfrom tvm.auto_scheduler.utils import call_func_with_timeout\nfrom tvm.contrib.popen_pool import PopenWorker, StatusKind\nfrom tvm.rpc import tracker, proxy, server_ios_launcher\n\n\nHOST_URL = \"0.0.0.0\"\nHOST_PORT = 9190\nDEVICE_KEY = \"ios_mobile_device\"\n\n\nTEMPORARY_DIRECTORY = utils.tempdir()\nARCH = \"x86_64\"\nSDK = \"iphonesimulator\"\nDSO_NAME = \"lib.dylib\"\nDTYPE = \"float32\"\n\n\nnp.random.seed(0)\n\n\nios_rpc_bundle_description_required = pytest.mark.skipif(\n not server_ios_launcher.ServerIOSLauncher.is_compatible_environment(),\n reason=\"To run this test, you need to set environment variables required in ServerIOSLauncher.\",\n)\n\n\[email protected](scope=\"session\", autouse=True)\ndef setup_and_teardown_actions():\n \"\"\"Setup and teardown actions for pytest.\"\"\"\n\n # No setup actions\n yield\n # Teardown actions:\n server_ios_launcher.ServerIOSLauncher.shutdown_booted_devices()\n\n\ndef setup_rpc_standalone_configuration(f):\n \"\"\"\n Host -- RPC server\n \"\"\"\n\n def wrapper():\n with server_ios_launcher.ServerIOSContextManager(\n mode=server_ios_launcher.RPCServerMode.standalone.value,\n host=HOST_URL,\n port=HOST_PORT,\n key=DEVICE_KEY,\n ) as ios_server:\n f(host=ios_server.host, port=ios_server.port)\n\n return wrapper\n\n\ndef setup_rpc_proxy_configuration(f):\n \"\"\"\n Host -- Proxy -- RPC server\n \"\"\"\n\n def wrapper():\n proxy_server = proxy.Proxy(host=HOST_URL, port=HOST_PORT)\n with server_ios_launcher.ServerIOSContextManager(\n mode=server_ios_launcher.RPCServerMode.proxy.value,\n host=proxy_server.host,\n port=proxy_server.port,\n key=DEVICE_KEY,\n ):\n f(host=proxy_server.host, port=proxy_server.port)\n proxy_server.terminate()\n\n return wrapper\n\n\ndef setup_rpc_tracker_configuration(f):\n \"\"\"\n tracker\n / \\\n Host -- RPC server\n \"\"\"\n\n def wrapper():\n tracker_server = tracker.Tracker(host=HOST_URL, port=HOST_PORT, silent=True)\n with server_ios_launcher.ServerIOSContextManager(\n mode=server_ios_launcher.RPCServerMode.tracker.value,\n host=tracker_server.host,\n port=tracker_server.port,\n key=DEVICE_KEY,\n ):\n f(host=tracker_server.host, port=tracker_server.port)\n tracker_server.terminate()\n\n return wrapper\n\n\ndef setup_rpc_tracker_via_proxy_configuration(f):\n \"\"\"\n tracker\n / \\\n Host -- Proxy -- RPC server\n \"\"\"\n\n def wrapper():\n tracker_server = tracker.Tracker(host=HOST_URL, port=HOST_PORT, silent=True)\n proxy_server_tracker = proxy.Proxy(\n host=HOST_URL, port=8888, tracker_addr=(tracker_server.host, tracker_server.port)\n )\n with server_ios_launcher.ServerIOSContextManager(\n mode=server_ios_launcher.RPCServerMode.proxy.value,\n host=proxy_server_tracker.host,\n port=proxy_server_tracker.port,\n key=DEVICE_KEY,\n ):\n f(host=tracker_server.host, port=tracker_server.port)\n proxy_server_tracker.terminate()\n tracker_server.terminate()\n\n return wrapper\n\n\ndef wrapper_for_call_function_with_timeout(timeout, func, args=(), kwargs=None):\n \"\"\"Wrapper for call_func_with_timeout.\"\"\"\n\n def wrapper(*_args, **_kwargs):\n \"\"\"\n This wrapper is needed because the cloudpicle\n cannot serialize objects that contain pointers (RPCSession)\n \"\"\"\n func(*_args, **_kwargs)\n return StatusKind.COMPLETE\n\n worker = PopenWorker()\n ret = call_func_with_timeout(worker, timeout=timeout, func=wrapper, args=args, kwargs=kwargs)\n if isinstance(ret, Exception):\n raise ret\n return ret\n\n\ndef try_create_remote_session(session_factory, args=(), kwargs=None):\n \"\"\"Deadlock-safe RPC Session creation.\"\"\"\n\n try:\n successful_attempt = True\n results = []\n for _ in range(2):\n ret = wrapper_for_call_function_with_timeout(\n timeout=10, func=session_factory, args=args, kwargs=kwargs\n )\n results.append(ret)\n if not np.all(np.array(results) == StatusKind.COMPLETE):\n raise ValueError(\"One or more sessions ended incorrectly.\")\n except Exception as e: # pylint: disable=broad-except\n successful_attempt = False\n print(e)\n return successful_attempt\n\n\ndef ios_create_dylib(output, objects, **kwargs): # pylint: disable=unused-argument\n xcode.create_dylib(output, objects, arch=ARCH, sdk=SDK)\n\n\nios_create_dylib.output_format = \"dylib\"\n\n\ndef export_lib(lib):\n \"\"\"Export lib to temporary directory.\"\"\"\n\n path_dso = TEMPORARY_DIRECTORY.relpath(DSO_NAME)\n lib.export_library(path_dso, fcompile=ios_create_dylib)\n return path_dso\n\n\ndef get_add_relay_module(a_numpy, b_numpy):\n \"\"\"Get simple relay module that add two tensors.\"\"\"\n\n a = relay.var(\"a\", shape=a_numpy.shape, dtype=DTYPE)\n b = relay.var(\"b\", shape=b_numpy.shape, dtype=DTYPE)\n params = {}\n out = tvm.IRModule.from_expr(relay.add(a, b))\n return out, params\n\n\ndef get_add_module(target):\n \"\"\"Get simple module that add two tensors.\"\"\"\n\n n = te.var(\"n\")\n A = te.placeholder((n,), name=\"A\")\n B = te.placeholder((n,), name=\"B\")\n C = te.compute(A.shape, lambda i: A[i] + B[i], name=\"C\")\n s = te.create_schedule(C.op)\n return tvm.build(s, [A, B, C], target=target, target_host=target, name=\"simple_add\")\n\n\[email protected]()\n@ios_rpc_bundle_description_required\n@setup_rpc_standalone_configuration\ndef test_rpc_standalone(host, port):\n status_ok = try_create_remote_session(session_factory=rpc.connect, args=(host, port))\n assert status_ok\n\n\[email protected]()\n@ios_rpc_bundle_description_required\n@setup_rpc_proxy_configuration\ndef test_rpc_proxy(host, port):\n status_ok = try_create_remote_session(\n session_factory=rpc.connect, args=(host, port, DEVICE_KEY)\n )\n assert status_ok\n\n\[email protected]()\n@ios_rpc_bundle_description_required\n@setup_rpc_tracker_configuration\ndef test_rpc_tracker(host, port):\n status_ok = try_create_remote_session(\n session_factory=request_remote, args=(DEVICE_KEY, host, port)\n )\n assert status_ok\n\n\[email protected]()\n@ios_rpc_bundle_description_required\n@setup_rpc_tracker_via_proxy_configuration\ndef test_rpc_tracker_via_proxy(host, port):\n status_ok = try_create_remote_session(\n session_factory=request_remote, args=(DEVICE_KEY, host, port)\n )\n assert status_ok\n\n\[email protected](depends=[\"test_rpc_standalone\"])\n@ios_rpc_bundle_description_required\n@setup_rpc_standalone_configuration\ndef test_can_call_remote_function_with_rpc_standalone(host, port):\n remote_session = rpc.connect(host, port)\n f = remote_session.get_function(\"runtime.GetFFIString\")\n assert f(\"hello\") == \"hello\"\n\n\[email protected](depends=[\"test_rpc_proxy\"])\n@ios_rpc_bundle_description_required\n@setup_rpc_proxy_configuration\ndef test_can_call_remote_function_with_rpc_proxy(host, port):\n remote_session = rpc.connect(host, port, key=DEVICE_KEY)\n f = remote_session.get_function(\"runtime.GetFFIString\")\n assert f(\"hello\") == \"hello\"\n\n\[email protected](depends=[\"test_rpc_tracker\"])\n@ios_rpc_bundle_description_required\n@setup_rpc_tracker_configuration\ndef test_can_call_remote_function_with_rpc_tracker(host, port):\n remote_session = request_remote(DEVICE_KEY, host, port)\n f = remote_session.get_function(\"runtime.GetFFIString\")\n assert f(\"hello\") == \"hello\"\n\n\[email protected](depends=[\"test_rpc_tracker_via_proxy\"])\n@ios_rpc_bundle_description_required\n@setup_rpc_tracker_via_proxy_configuration\ndef test_can_call_remote_function_with_rpc_tracker_via_proxy(host, port):\n remote_session = request_remote(DEVICE_KEY, host, port)\n f = remote_session.get_function(\"runtime.GetFFIString\")\n assert f(\"hello\") == \"hello\"\n\n\[email protected](depends=[\"test_rpc_standalone\"])\n@ios_rpc_bundle_description_required\n@setup_rpc_standalone_configuration\ndef test_basic_functionality_of_rpc_session(host, port):\n remote_session = rpc.connect(host, port)\n device = remote_session.cpu(0)\n\n target = tvm.target.Target(target=f\"llvm -mtriple={ARCH}-apple-darwin\")\n lib = get_add_module(target)\n path_dso = export_lib(lib)\n\n # Check correct upload\n remote_session.upload(path_dso)\n\n # Check correct download\n downloaded_lib = remote_session.download(DSO_NAME)\n with open(path_dso, \"rb\") as source_lib_file:\n assert downloaded_lib == bytearray(\n source_lib_file.read()\n ), \"The downloaded module does not match the loaded module\"\n\n # Check correct remote computing\n lib = remote_session.load_module(DSO_NAME)\n n = 100\n a = tvm.nd.array(np.random.uniform(size=n).astype(DTYPE), device)\n b = tvm.nd.array(np.random.uniform(size=n).astype(DTYPE), device)\n c = tvm.nd.array(np.zeros(n, dtype=DTYPE), device)\n lib(a, b, c)\n tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())\n\n # Check correct remove\n remote_session.remove(DSO_NAME)\n\n\[email protected](depends=[\"test_rpc_standalone\"])\[email protected](reason=\"Not implemented functionality\")\n@ios_rpc_bundle_description_required\n@setup_rpc_standalone_configuration\ndef test_cleanup_workspace_after_session_end(host, port):\n # Arrange\n remote_session = rpc.connect(host, port)\n target = tvm.target.Target(target=f\"llvm -mtriple={ARCH}-apple-darwin\")\n lib = get_add_module(target)\n path_dso = export_lib(lib)\n remote_session.upload(path_dso)\n\n # Act\n del remote_session\n remote_session = rpc.connect(host, port)\n try:\n remote_session.download(DSO_NAME)\n status_ok = False\n except Exception as _: # pylint: disable=broad-except\n status_ok = True\n\n # Assert\n assert status_ok, \"Workspace not cleared after RPC Session termination.\"\n\n\[email protected](depends=[\"test_rpc_standalone\"])\n@ios_rpc_bundle_description_required\n@setup_rpc_standalone_configuration\ndef test_graph_executor_remote_run(host, port):\n remote_session = rpc.connect(host, port)\n target = tvm.target.Target(target=f\"llvm -mtriple={ARCH}-apple-darwin\")\n device = remote_session.cpu(0)\n\n size = 100\n a = np.random.uniform(size=size).astype(DTYPE)\n b = np.random.uniform(size=size).astype(DTYPE)\n mod, params = get_add_relay_module(a, b)\n with tvm.transform.PassContext(opt_level=3):\n lib = relay.build(mod, target=target, target_host=target, params=params)\n\n path_dso = export_lib(lib)\n remote_session.upload(path_dso)\n lib = remote_session.load_module(DSO_NAME)\n\n gen_module = graph_executor.GraphModule(lib[\"default\"](device))\n\n # Check set input\n gen_module.set_input(\"a\", tvm.nd.array(a))\n gen_module.set_input(\"b\", tvm.nd.array(b))\n tvm.testing.assert_allclose(gen_module.get_input(0).numpy(), a)\n tvm.testing.assert_allclose(gen_module.get_input(1).numpy(), b)\n\n # Check run\n gen_module.run()\n out = gen_module.get_output(0)\n tvm.testing.assert_allclose(out.numpy(), a + b)\n\n\[email protected](\n strict=False, reason=\"flaky test (see https://github.com/apache/tvm/issues/9824)\"\n)\[email protected](depends=[\"test_rpc_tracker\"])\n@ios_rpc_bundle_description_required\n@setup_rpc_tracker_configuration\ndef test_check_auto_schedule_tuning(host, port): # pylint: disable=too-many-locals\n log_file = TEMPORARY_DIRECTORY.relpath(\"ios_tuning_stat.log\")\n target = tvm.target.Target(target=f\"llvm -mtriple={ARCH}-apple-darwin\")\n mod, params = relay.testing.mlp.get_workload(batch_size=4, image_shape=(1, 4, 4))\n\n try:\n status_ok = True\n measure_runner = auto_scheduler.RPCRunner(\n DEVICE_KEY,\n host,\n port,\n min_repeat_ms=1,\n timeout=10,\n n_parallel=multiprocessing.cpu_count(),\n )\n builder = auto_scheduler.LocalBuilder(timeout=10, build_func=ios_create_dylib)\n tune_option = auto_scheduler.TuningOptions(\n builder=builder,\n num_measure_trials=2,\n num_measures_per_round=1,\n runner=measure_runner,\n measure_callbacks=[auto_scheduler.RecordToFile(log_file)],\n verbose=0,\n )\n\n tasks, task_weights = auto_scheduler.extract_tasks(mod[\"main\"], params, target)\n tasks, task_weights = tasks[:2], task_weights[:2]\n tuner = auto_scheduler.TaskScheduler(tasks, task_weights)\n tuner.tune(tune_option, search_policy=\"sketch.random\")\n\n # Check tuning log\n tuning_statistic = list(load_records(log_file))\n for _, measure_result in tuning_statistic:\n if measure_result.error_no != MeasureErrorNo.NO_ERROR:\n raise ValueError(\n f\"Error for MeasureResult. Error code: {measure_result.error_no},\"\n f\" for details see MeasureErrorNO.\"\n )\n\n except Exception as e: # pylint: disable=broad-except\n status_ok = False\n print(e)\n\n assert status_ok, \"Tuning failed, see logs.\"\n\n\nif __name__ == \"__main__\":\n sys.exit(pytest.main([__file__] + sys.argv[1:]))\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name,unused-argument\n\"\"\"Tensor Expressions for depthwise convolutions\"\"\"\nfrom typing import Tuple, Union, List\nimport numpy as np\n\nfrom tvm import te\nfrom tvm.contrib.ethosu.cascader import TESubgraph, EthosuPart, Propagator, register_matcher\n\nfrom .dma import dma_ofm_compute, dma_ifm_compute\n\n\ndef depthwise_conv2d_compute(\n ifm: te.Tensor,\n weight: te.Tensor,\n scale_bias: te.Tensor,\n lut: te.Tensor,\n ifm_scale: float,\n ifm_zero_point: int,\n weight_zero_point: int,\n ofm_scale: float,\n ofm_zero_point: int,\n strides: Tuple[int, int],\n padding: Tuple[int, int, int, int],\n dilation: Union[Tuple[int, int], List[int]],\n activation: str,\n clip_min: int,\n clip_max: int,\n rounding_mode: str,\n upscale: str,\n ifm_layout: str,\n ofm_layout: str,\n ofm_dtype: str,\n) -> te.Tensor:\n \"\"\"A compute operator representing the capabilities of 2D convolution for the NPU.\n\n Parameters\n ----------\n ifm : te.Tensor\n The Input Feature Map tensor (IFM).\n weight : te.Tensor\n The weight tensor.\n scale_bias : te.Tensor\n The packed per-channel weight scale and bias tensor.\n lut : te.Tensor\n The look-up table of values to use if activation = \"LUT\".\n ifm_scale : float\n The quantization scale for the Input Feature Map tensor.\n ifm_zero_point : int\n The quantization zero point for the Input Feature Map tensor.\n weight_zero_point : int\n The quantization zero point for the weight tensor.\n ofm_scale : float\n The quantization scale for the Output Feature Map tensor.\n ofm_zero_point : int\n The quantization zero point for the Output Feature Map tensor.\n strides : tuple\n The 2 dimensional strides as (stride_height, stride_width).\n padding : tuple\n The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).\n dilation : Union[int, tuple, list]\n The 2 dimensional dilation as (dilation_height, dilation_width).\n activation : str\n The activation function to use.\n \"NONE\" - no activation function.\n \"CLIP\" - clip the output between clip_min and clip_max.\n \"TANH\" - tanh activation function.\n \"SIGMOID\" - sigmoid activation function.\n \"LUT\" - use a look-up table to perform the activation function.\n clip_min : int\n The minimum clipping value if activation = \"CLIP\".\n clip_max : int\n The maximum clipping value if activation = \"CLIP\".\n rounding_mode : str\n The rounding mode to apply to the Output Feature Map tensor.\n \"TFL\" - Tensorflow Lite rounding scheme.\n \"TRUNCATE\" - Truncate towards zero.\n \"NATURAL\" - Round to nearest value, with x.5 rounded up towards +infinity.\n upscale : str\n The 2x2 upscaling mode to apply to the Input Feature Map tensor.\n \"NONE\" - no upscaling.\n \"NEAREST\" - upscale using nearest neighbour.\n \"ZEROS\" - upscale using zeros.\n ifm_layout : str\n The layout of the Input Feature Map tensor. Can be \"NHWC\" or \"NHCWB16\".\n ofm_layout : str\n The layout of the Output Feature Map tensor. Can be \"NHWC\" or \"NHCWB16\".\n ofm_dtype : str, optional\n The Output Feature Map tensor data type. Can be 'int8', 'uint8' or 'int16'.\n\n Returns\n -------\n te.Tensor\n The OFM tensor.\n\n \"\"\"\n assert ifm.shape[0] == 1, f\"Only batch size 1 is supported\"\n assert ifm_layout in {\"NHWC\", \"NHCWB16\"}\n assert ofm_layout in {\"NHWC\", \"NHCWB16\"}\n\n padding = [int(v) for v in padding]\n stride_h, stride_w = [int(v) for v in strides]\n dilation_h, dilation_w = [int(v) for v in dilation]\n channels, kernel_h, kernel_w, _ = [int(v) for v in weight.shape]\n\n # Compute operation for the IFM DMA pipeline\n dmaed_ifm = dma_ifm_compute(ifm, ifm_layout, ifm_zero_point, ifm_scale, channels, padding)\n\n # 2D Depthwise Convolution compute operation\n dilated_kernel_h = (kernel_h - 1) * dilation_h + 1\n dilated_kernel_w = (kernel_w - 1) * dilation_w + 1\n ofm_height = (dmaed_ifm.shape[1] - dilated_kernel_h) // stride_h + 1\n ofm_width = (dmaed_ifm.shape[2] - dilated_kernel_w) // stride_w + 1\n rh = te.reduce_axis((0, kernel_h), name=\"ry\")\n rw = te.reduce_axis((0, kernel_w), name=\"rx\")\n\n depthwise_conv2d_attrs = {\n \"op\": \"ethosu_depthwise_conv2d\",\n \"weight_zero_point\": weight_zero_point,\n \"activation\": activation,\n \"clip_min\": clip_min,\n \"clip_max\": clip_max,\n \"rounding_mode\": rounding_mode,\n \"upscale\": upscale,\n \"stride_h\": stride_h,\n \"stride_w\": stride_w,\n \"dilation_h\": dilation_h,\n \"dilation_w\": dilation_w,\n }\n\n has_lut = activation in (\"TANH\", \"LUT\", \"SIGMOID\")\n\n # This is a trick to insert the LUT tensor into the TE graph if LUT is present\n lut_expr = (lut[0] + lut[255]).astype(ifm.dtype) if has_lut else 0\n\n # Add the LUT tensor to the attributes to be able to later tell which tensor is the LUT\n if has_lut:\n depthwise_conv2d_attrs[\"lut\"] = lut\n\n depthwise = te.compute(\n (1, ofm_height, ofm_width, channels),\n lambda nn, hh, ww, cc: te.sum(\n (\n dmaed_ifm(\n nn, hh * stride_h + rh * dilation_h, ww * stride_w + rw * dilation_w, cc\n ).astype(ifm.dtype)\n * weight[cc, rh, rw, 0].astype(ifm.dtype)\n # This is a trick to load 10 elements of the scale_bias at once, not accurate maths\n + (scale_bias[cc, 0] * scale_bias[cc, 9] + lut_expr).astype(ifm.dtype)\n ).astype(ofm_dtype),\n axis=[rh, rw],\n ),\n name=\"ethosu_depthwise_conv2d\",\n attrs=depthwise_conv2d_attrs,\n )\n\n nhwc_to_nhcwb16 = [\n [1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 0, 1 / 16, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 0, 16],\n [0, 0, 0, 0, 1],\n ]\n nhcwb16_to_nhwc = [\n [1, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 16, 0, 1, -16],\n [0, 0, 0, 0, 0, 1],\n ]\n ifm_matrix = [\n [1, 0, 0, 0, 0],\n [0, stride_h, 0, 0, (dilated_kernel_h - stride_h)],\n [0, 0, stride_w, 0, (dilated_kernel_w - stride_w)],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1],\n ]\n weights_matrix = [\n [0, 0, 0, 1, 0],\n [0, 0, 0, 0, kernel_h],\n [0, 0, 0, 0, kernel_w],\n [0, 0, 0, 0, 1],\n [0, 0, 0, 0, 1],\n ]\n bias_matrix = [\n [0, 0, 0, 1, 0],\n [0, 0, 0, 0, 10],\n [0, 0, 0, 0, 1],\n ]\n if ofm_layout == \"NHCWB16\":\n ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()\n weights_matrix = np.matmul(weights_matrix, nhcwb16_to_nhwc).tolist()\n bias_matrix = np.matmul(bias_matrix, nhcwb16_to_nhwc).tolist()\n if ifm_layout == \"NHCWB16\":\n ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()\n ifm_propagator = Propagator(\n ifm_matrix,\n [0, -padding[0], -padding[1], 0]\n if ifm_layout == \"NHWC\"\n else [0, -padding[0], 0, -padding[1], 0],\n )\n weights_propagator = Propagator(\n weights_matrix,\n [0, 0, 0, 0],\n )\n bias_propagator = Propagator(\n bias_matrix,\n [0, 0],\n )\n propagator_attrs = {\n \"ifm_propagator\": ifm_propagator,\n \"weights_propagator\": weights_propagator,\n \"bias_propagator\": bias_propagator,\n }\n\n # Compute operation for the OFM DMA pipeline\n return dma_ofm_compute(\n depthwise, ofm_layout, ofm_zero_point, ofm_scale, channels, attrs=propagator_attrs\n )\n\n\n@register_matcher\ndef match_ethosu_depthwise_conv2d(output_tensor, device_config):\n \"\"\"Match a Tensor Expression corresponding to an NPU Depthwise Conv2D.\n\n If the Tensor Expression matches, an EthosuPart will be created that models the\n matched Tensor Expression. Otherwise, None will be returned.\n\n Parameters\n ----------\n output_tensor : tvm.te.Tensor\n The tensor to attempt to match with.\n device_config : EthosuDeviceConfig\n Target device configuration.\n\n Returns\n -------\n Union[None, EthosuPart]\n The created EthosuPart if there was a match, otherwise None.\n\n \"\"\"\n write = output_tensor\n if write.op.name != \"ethosu_write\":\n return None\n convert_to_nhcwb16 = write.op.input_tensors[0]\n if convert_to_nhcwb16.op.name != \"ethosu_convert_to_nhcwb16\":\n return None\n depthwise2d = convert_to_nhcwb16.op.input_tensors[0]\n if depthwise2d.op.name != \"ethosu_depthwise_conv2d\":\n return None\n pad = depthwise2d.op.input_tensors[0]\n if pad.op.name != \"ethosu_pad\":\n return None\n upscale = pad.op.input_tensors[0]\n if upscale.op.name != \"ethosu_upscale\":\n return None\n convert_to_nhwc = upscale.op.input_tensors[0]\n if convert_to_nhwc.op.name != \"ethosu_convert_to_nhwc\":\n return None\n read = convert_to_nhwc.op.input_tensors[0]\n if read.op.name != \"ethosu_read\":\n return None\n\n input_tensors = [\n read.op.input_tensors[0],\n depthwise2d.op.input_tensors[1],\n depthwise2d.op.input_tensors[2],\n ]\n subgraph = TESubgraph(input_tensors, output_tensor)\n propagators = [\n write.op.attrs[\"ifm_propagator\"],\n write.op.attrs[\"weights_propagator\"],\n write.op.attrs[\"bias_propagator\"],\n ]\n ifm_dtype = input_tensors[0].dtype\n ofm_dtype = output_tensor.dtype\n\n ifm_channels = int(input_tensors[0].shape[3])\n ofm_channels, kernel_height, kernel_width = (int(axis) for axis in input_tensors[1].shape[0:3])\n\n subkernels = len(\n device_config.get_kernel_steps(depthwise2d.op.name, kernel_height, kernel_width, ifm_dtype)\n )\n\n output_layout = convert_to_nhcwb16.op.attrs[\"layout\"]\n input_layout = convert_to_nhwc.op.attrs[\"layout\"]\n output_quantum = device_config.get_output_quantum(output_layout)\n\n valid_block_configs = device_config.get_valid_block_configs(\n propagators[0],\n depthwise2d.op.attrs,\n output_tensor.shape,\n ofm_channels,\n ifm_channels,\n output_layout,\n input_layout,\n ifm_dtype,\n ofm_dtype,\n kernel_height,\n kernel_width,\n )\n\n return EthosuPart(\n subgraph,\n propagators,\n output_quantum,\n subkernels,\n valid_block_configs,\n 1,\n )\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Test code for space to batch\"\"\"\nimport numpy as np\nimport tvm\nfrom tvm import te\nfrom tvm import topi\nimport tvm.testing\nimport tvm.topi.testing\n\n\ndef verify_space_to_batch_nd(input_shape, block_shape, pad_before, pad_after, pad_value=0):\n out_shape = []\n out_shape.append(int((input_shape[0] * np.prod(block_shape))))\n for i in range(1, len(block_shape) + 1):\n pad = pad_before[i - 1] + pad_after[i - 1]\n out_shape.append(int((input_shape[i] + pad) // block_shape[i - 1]))\n for i in range(len(block_shape) + 1, len(input_shape)):\n out_shape.append(input_shape[i])\n\n A = te.placeholder(input_shape, name=\"A\", dtype=\"float32\")\n dtype = A.dtype\n a_np = np.random.uniform(size=input_shape).astype(dtype)\n\n B = topi.nn.space_to_batch_nd(A, block_shape, pad_before, pad_after, pad_value)\n\n b_np = tvm.topi.testing.space_to_batch_nd_python(\n a_np, block_shape, pad_before, pad_after, pad_value\n )\n\n def check_target(target, dev):\n print(\"Running on target: %s\" % target)\n with tvm.target.create(target):\n s = tvm.topi.testing.get_injective_schedule(target)(B)\n a = tvm.nd.array(a_np, dev)\n b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)\n f = tvm.build(s, [A, B], target)\n f(a, b)\n tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)\n\n for target, dev in tvm.testing.enabled_targets():\n check_target(target, dev)\n\n\[email protected]_gpu\ndef test_space_to_batch():\n # Without paddings\n verify_space_to_batch_nd([3, 3, 2, 1], [3], [0], [0])\n # With paddings\n verify_space_to_batch_nd([3, 3, 2, 1], [3], [1], [2])\n # Multiple spatial dims\n verify_space_to_batch_nd([3, 3, 4, 5, 2], [3, 4, 2], [1, 0, 3], [2, 0, 0])\n # No remaining dims\n verify_space_to_batch_nd([3, 3, 4, 5, 2], [3, 4, 2, 2], [1, 4, 0, 0], [2, 0, 1, 0])\n\n\nif __name__ == \"__main__\":\n test_space_to_batch()\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Common runtime ctypes.\"\"\"\n# pylint: disable=invalid-name\nimport ctypes\nimport json\nimport numpy as np\nfrom .base import _LIB, check_call\n\ntvm_shape_index_t = ctypes.c_int64\n\n\nclass ArgTypeCode(object):\n \"\"\"Type code used in API calls\"\"\"\n\n INT = 0\n UINT = 1\n FLOAT = 2\n HANDLE = 3\n NULL = 4\n TVM_TYPE = 5\n DLDEVICE = 6\n DLTENSOR_HANDLE = 7\n OBJECT_HANDLE = 8\n MODULE_HANDLE = 9\n PACKED_FUNC_HANDLE = 10\n STR = 11\n BYTES = 12\n NDARRAY_HANDLE = 13\n OBJECT_RVALUE_REF_ARG = 14\n EXT_BEGIN = 15\n\n\nclass TVMByteArray(ctypes.Structure):\n \"\"\"Temp data structure for byte array.\"\"\"\n\n _fields_ = [(\"data\", ctypes.POINTER(ctypes.c_byte)), (\"size\", ctypes.c_size_t)]\n\n\nclass DataTypeCode(object):\n \"\"\"DataType code in DLTensor.\"\"\"\n\n INT = 0\n UINT = 1\n FLOAT = 2\n HANDLE = 3\n BFLOAT = 4\n\n\nclass DataType(ctypes.Structure):\n \"\"\"TVM datatype structure\"\"\"\n\n _fields_ = [(\"type_code\", ctypes.c_uint8), (\"bits\", ctypes.c_uint8), (\"lanes\", ctypes.c_uint16)]\n CODE2STR = {\n DataTypeCode.INT: \"int\",\n DataTypeCode.UINT: \"uint\",\n DataTypeCode.FLOAT: \"float\",\n DataTypeCode.HANDLE: \"handle\",\n DataTypeCode.BFLOAT: \"bfloat\",\n }\n NUMPY2STR = {\n np.dtype(np.bool_): \"bool\",\n np.dtype(np.int8): \"int8\",\n np.dtype(np.int16): \"int16\",\n np.dtype(np.int32): \"int32\",\n np.dtype(np.int64): \"int64\",\n np.dtype(np.uint8): \"uint8\",\n np.dtype(np.uint16): \"uint16\",\n np.dtype(np.uint32): \"uint32\",\n np.dtype(np.uint64): \"uint64\",\n np.dtype(np.float16): \"float16\",\n np.dtype(np.float32): \"float32\",\n np.dtype(np.float64): \"float64\",\n np.dtype(np.float_): \"float64\",\n }\n STR2DTYPE = {\n \"bool\": {\"type_code\": DataTypeCode.UINT, \"bits\": 1, \"lanes\": 1},\n \"int8\": {\"type_code\": DataTypeCode.INT, \"bits\": 8, \"lanes\": 1},\n \"int16\": {\"type_code\": DataTypeCode.INT, \"bits\": 16, \"lanes\": 1},\n \"int32\": {\"type_code\": DataTypeCode.INT, \"bits\": 32, \"lanes\": 1},\n \"int64\": {\"type_code\": DataTypeCode.INT, \"bits\": 64, \"lanes\": 1},\n \"uint8\": {\"type_code\": DataTypeCode.UINT, \"bits\": 8, \"lanes\": 1},\n \"uint16\": {\"type_code\": DataTypeCode.UINT, \"bits\": 16, \"lanes\": 1},\n \"uint32\": {\"type_code\": DataTypeCode.UINT, \"bits\": 32, \"lanes\": 1},\n \"uint64\": {\"type_code\": DataTypeCode.UINT, \"bits\": 64, \"lanes\": 1},\n \"float16\": {\"type_code\": DataTypeCode.FLOAT, \"bits\": 16, \"lanes\": 1},\n \"float32\": {\"type_code\": DataTypeCode.FLOAT, \"bits\": 32, \"lanes\": 1},\n \"float64\": {\"type_code\": DataTypeCode.FLOAT, \"bits\": 64, \"lanes\": 1},\n }\n\n def __init__(self, type_str):\n super(DataType, self).__init__()\n numpy_str_map = DataType.NUMPY2STR\n if type_str in numpy_str_map:\n type_str = numpy_str_map[type_str]\n elif isinstance(type_str, np.dtype):\n type_str = str(type_str)\n\n assert isinstance(type_str, str)\n\n str_dtype_map = DataType.STR2DTYPE\n if type_str in str_dtype_map:\n dtype_map = str_dtype_map[type_str]\n self.bits = dtype_map[\"bits\"]\n self.type_code = dtype_map[\"type_code\"]\n self.lanes = dtype_map[\"lanes\"]\n return\n\n arr = type_str.split(\"x\")\n head = arr[0]\n self.lanes = int(arr[1]) if len(arr) > 1 else 1\n bits = 32\n\n if head.startswith(\"int\"):\n self.type_code = DataTypeCode.INT\n head = head[3:]\n elif head.startswith(\"uint\"):\n self.type_code = DataTypeCode.UINT\n head = head[4:]\n elif head.startswith(\"float\"):\n self.type_code = DataTypeCode.FLOAT\n head = head[5:]\n elif head.startswith(\"handle\"):\n self.type_code = DataTypeCode.HANDLE\n bits = 64\n head = \"\"\n elif head.startswith(\"bfloat\"):\n self.type_code = DataTypeCode.BFLOAT\n head = head[6:]\n elif head.startswith(\"custom\"):\n # pylint: disable=import-outside-toplevel\n import tvm.runtime._ffi_api\n\n low, high = head.find(\"[\"), head.find(\"]\")\n if not low or not high or low >= high:\n raise ValueError(\"Badly formatted custom type string %s\" % type_str)\n type_name = head[low + 1 : high]\n self.type_code = tvm.runtime._ffi_api._datatype_get_type_code(type_name)\n head = head[high + 1 :]\n else:\n raise ValueError(\"Do not know how to handle type %s\" % type_str)\n bits = int(head) if head else bits\n self.bits = bits\n\n def __repr__(self):\n # pylint: disable=import-outside-toplevel\n if self.bits == 1 and self.lanes == 1:\n return \"bool\"\n if self.type_code in DataType.CODE2STR:\n type_name = DataType.CODE2STR[self.type_code]\n else:\n import tvm.runtime._ffi_api\n\n type_name = \"custom[%s]\" % tvm.runtime._ffi_api._datatype_get_type_name(self.type_code)\n x = \"%s%d\" % (type_name, self.bits)\n if self.lanes != 1:\n x += \"x%d\" % self.lanes\n return x\n\n def __eq__(self, other):\n return (\n self.bits == other.bits\n and self.type_code == other.type_code\n and self.lanes == other.lanes\n )\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n\nRPC_SESS_MASK = 128\n\n\nclass Device(ctypes.Structure):\n \"\"\"TVM device strucure.\n\n Typically constructed using convenience function\n :meth:`tvm.runtime.device`.\n\n Exposes uniform interface to device-specific APIs such as CUDA or\n OpenCL. Some properties may return None depending on whether an\n API exposes that particular property.\n\n \"\"\"\n\n _fields_ = [(\"device_type\", ctypes.c_int), (\"device_id\", ctypes.c_int)]\n MASK2STR = {\n 1: \"cpu\",\n 2: \"cuda\",\n 4: \"opencl\",\n 5: \"aocl\",\n 7: \"vulkan\",\n 8: \"metal\",\n 9: \"vpi\",\n 10: \"rocm\",\n 12: \"ext_dev\",\n 14: \"hexagon\",\n 15: \"webgpu\",\n }\n STR2MASK = {\n \"llvm\": 1,\n \"stackvm\": 1,\n \"cpu\": 1,\n \"c\": 1,\n \"hybrid\": 1,\n \"composite\": 1,\n \"cuda\": 2,\n \"nvptx\": 2,\n \"cl\": 4,\n \"opencl\": 4,\n \"sdaccel\": 4,\n \"aocl\": 5,\n \"aocl_sw_emu\": 5,\n \"vulkan\": 7,\n \"metal\": 8,\n \"vpi\": 9,\n \"rocm\": 10,\n \"ext_dev\": 12,\n \"hexagon\": 14,\n \"webgpu\": 15,\n }\n\n def __init__(self, device_type, device_id):\n super(Device, self).__init__()\n self.device_type = int(device_type)\n self.device_id = device_id\n\n def _GetDeviceAttr(self, device_type, device_id, attr_id):\n \"\"\"Internal helper function to invoke runtime.GetDeviceAttr\"\"\"\n # pylint: disable=import-outside-toplevel\n import tvm.runtime._ffi_api\n\n return tvm.runtime._ffi_api.GetDeviceAttr(device_type, device_id, attr_id)\n\n @property\n def exist(self):\n \"\"\"Whether this device exists.\n\n Returns True if TVM has support for the device, if the\n physical device is present, and the device is accessible\n through appropriate drivers (e.g. cuda/vulkan).\n\n Returns\n -------\n exist : bool\n True if the device exists\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 0) != 0\n\n @property\n def max_threads_per_block(self):\n \"\"\"Maximum number of threads on each block.\n\n Returns device value for cuda, metal, rocm, opencl, and vulkan\n devices. Returns remote device value for RPC devices.\n Returns None for all other devices.\n\n Returns\n -------\n max_threads_per_block : int or None\n The number of threads on each block\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 1)\n\n @property\n def warp_size(self):\n \"\"\"Number of threads that execute concurrently.\n\n Returns device value for for cuda, rocm, and vulkan. Returns\n 1 for metal and opencl devices, regardless of the physical\n device. Returns remote device value for RPC devices. Returns\n None for all other devices.\n\n Returns\n -------\n warp_size : int or None\n Number of threads that execute concurrently\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 2)\n\n @property\n def max_shared_memory_per_block(self):\n \"\"\"Total amount of shared memory per block in bytes.\n\n Returns device value for cuda, rocm, opencl, and vulkan.\n Returns remote device value for RPC devices. Returns None for\n all other devices.\n\n Returns\n -------\n max_shared_memory_per_block : int or None\n Total amount of shared memory per block in bytes\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 3)\n\n @property\n def compute_version(self):\n \"\"\"Get compute version number as string.\n\n Returns maximum API version (e.g. CUDA/OpenCL/Vulkan)\n supported by the device.\n\n Returns device value for cuda, rocm, opencl, and\n vulkan. Returns remote device value for RPC devices. Returns\n None for all other devices.\n\n Returns\n -------\n version : str or None\n The version string in `major.minor` format.\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 4)\n\n @property\n def device_name(self):\n \"\"\"Return the vendor-specific name of device.\n\n Returns device value for cuda, rocm, opencl, and vulkan.\n Returns remote device value for RPC devices. Returns None for\n all other devices.\n\n Returns\n -------\n device_name : str or None\n The name of the device.\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 5)\n\n @property\n def max_clock_rate(self):\n \"\"\"Return the max clock frequency of device (kHz).\n\n Returns device value for cuda, rocm, and opencl. Returns\n remote device value for RPC devices. Returns None for all\n other devices.\n\n Returns\n -------\n max_clock_rate : int or None\n The maximum clock frequency of the device (kHz)\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 6)\n\n @property\n def multi_processor_count(self):\n \"\"\"Return the number of compute units in the device.\n\n Returns device value for cuda, rocm, and opencl. Returns\n remote device value for RPC devices. Returns None for all\n other devices.\n\n Returns\n -------\n multi_processor_count : int or None\n Thee number of compute units in the device\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 7)\n\n @property\n def max_thread_dimensions(self):\n \"\"\"Return the maximum size of each thread axis\n\n Returns device value for cuda, rocm, opencl, and vulkan.\n Returns remote device value for RPC devices. Returns None for\n all other devices.\n\n Returns\n -------\n dims: List of int, or None\n The maximum length of threadIdx.x, threadIdx.y, threadIdx.z\n\n \"\"\"\n return json.loads(self._GetDeviceAttr(self.device_type, self.device_id, 8))\n\n @property\n def api_version(self):\n \"\"\"Returns version number of the SDK used to compile TVM.\n\n For example, CUDA_VERSION for cuda or VK_HEADER_VERSION for\n Vulkan.\n\n Returns device value for cuda, rocm, opencl, and vulkan.\n Returns remote device value for RPC devices. Returns None for\n all other devices.\n\n Returns\n -------\n version : int or None\n The version of the SDK\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 11)\n\n @property\n def driver_version(self):\n \"\"\"Returns version number of the driver\n\n Returns driver vendor's internal version number.\n (e.g. \"450.408.256\" for nvidia-driver-450)\n\n Returns device value for opencl and vulkan. Returns remote\n device value for RPC devices. Returns None for all other\n devices.\n\n Returns\n -------\n version : str or None\n The version string in `major.minor.patch` format.\n\n \"\"\"\n return self._GetDeviceAttr(self.device_type, self.device_id, 12)\n\n def create_raw_stream(self):\n \"\"\"Create a new runtime stream at the context.\n\n User should free the stream after use.\n\n Returns\n -------\n stream : TVMStreamHandle\n The created runtime stream.\n \"\"\"\n stream = ctypes.c_void_p()\n check_call(_LIB.TVMStreamCreate(self.device_type, self.device_id, ctypes.byref(stream)))\n return stream\n\n def free_raw_stream(self, stream):\n \"\"\"Free a created stream handle.\n\n Parameters\n ----------\n stream : TVMStreamHandle\n The stream which should to be released.\n \"\"\"\n check_call(_LIB.TVMStreamFree(self.device_type, self.device_id, stream))\n\n def set_raw_stream(self, stream):\n \"\"\"Set a created stream handle.\n\n Parameters\n ----------\n stream : TVMStreamHandle\n The stream which should to be set to the device.\n \"\"\"\n check_call(_LIB.TVMSetStream(self.device_type, self.device_id, stream))\n\n def sync(self, stream=None):\n \"\"\"Synchronize until jobs finished at the context.\n\n Parameters\n ----------\n stream : TVMStreamHandle\n Jobs in this stream should be finished.\n \"\"\"\n check_call(_LIB.TVMSynchronize(self.device_type, self.device_id, stream))\n\n def __eq__(self, other):\n return (\n isinstance(other, Device)\n and self.device_id == other.device_id\n and self.device_type == other.device_type\n )\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash(str(self))\n\n def __repr__(self):\n if self.device_type >= RPC_SESS_MASK:\n tbl_id = self.device_type / RPC_SESS_MASK - 1\n dev_type = self.device_type % RPC_SESS_MASK\n return \"remote[%d]:%s(%d)\" % (tbl_id, Device.MASK2STR[dev_type], self.device_id)\n return \"%s(%d)\" % (Device.MASK2STR[self.device_type], self.device_id)\n\n\nclass TVMArray(ctypes.Structure):\n \"\"\"TVMValue in C API\"\"\"\n\n _fields_ = [\n (\"data\", ctypes.c_void_p),\n (\"device\", Device),\n (\"ndim\", ctypes.c_int),\n (\"dtype\", DataType),\n (\"shape\", ctypes.POINTER(tvm_shape_index_t)),\n (\"strides\", ctypes.POINTER(tvm_shape_index_t)),\n (\"byte_offset\", ctypes.c_uint64),\n ]\n\n\nclass ObjectRValueRef:\n \"\"\"Represent an RValue ref to an object that can be moved.\n\n Parameters\n ----------\n obj : tvm.runtime.Object\n The object that this value refers to\n \"\"\"\n\n __slots__ = [\"obj\"]\n\n def __init__(self, obj):\n self.obj = obj\n\n\nTVMArrayHandle = ctypes.POINTER(TVMArray)\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Unit tests for graph partitioning.\"\"\"\n# pylint: disable=not-callable\nimport os\nimport sys\n\nimport numpy as np\n\nimport tvm\nfrom tvm.relay.backend import te_compiler\nfrom tvm.relay.backend.runtime import Runtime\nimport tvm.relay.testing\nimport tvm.relay.op as reg\nfrom tvm import relay\nfrom tvm import runtime as tvm_runtime\nfrom tvm.relay import transform\nfrom tvm.relay.testing import byoc\nfrom tvm.contrib import utils\nfrom tvm.relay.expr_functor import ExprMutator\nfrom tvm.relay.op.annotation import compiler_begin, compiler_end\nfrom tvm.relay.op.contrib.register import get_pattern_table\nfrom tvm.relay.build_module import bind_params_by_name\n\n\n# Leverage the pass manager to write a simple white list based annotator\[email protected]_pass(opt_level=0)\nclass WhiteListAnnotator:\n def __init__(self, op_list, compiler):\n assert isinstance(op_list, (list, tuple, set))\n self.op_list = op_list\n self.compiler = compiler\n\n def transform_function(self, func, mod, dev):\n\n annotator = self\n\n class Annotator(tvm.relay.ExprMutator):\n def visit_call(self, call):\n op_name = call.op.name\n if op_name in annotator.op_list:\n new_args = []\n for arg in call.args:\n ann = compiler_begin(super().visit(arg), annotator.compiler)\n new_args.append(ann)\n new_call = relay.Call(call.op, new_args, call.attrs, call.type_args)\n return compiler_end(new_call, annotator.compiler)\n else:\n return super().visit_call(call)\n\n return Annotator().visit(func)\n\n\nclass WholeGraphAnnotator(ExprMutator):\n \"\"\"\n An annotator that creates a compiler for an entire graph.\n \"\"\"\n\n def __init__(self, compiler):\n super(WholeGraphAnnotator, self).__init__()\n self.compiler = compiler\n self.last_call = True\n\n def visit_call(self, call):\n curr_last = self.last_call\n self.last_call = False\n\n params = []\n for arg in call.args:\n param = super().visit(arg)\n if isinstance(param, relay.expr.Var):\n param = compiler_begin(param, self.compiler)\n params.append(param)\n\n new_call = relay.Call(call.op, params, call.attrs)\n if curr_last:\n new_call = compiler_end(new_call, self.compiler)\n return new_call\n\n\nclass MobileNetAnnotator(ExprMutator):\n \"\"\"\n Annotate mobilenet until global_avg_pool.\n \"\"\"\n\n def __init__(self, compiler):\n super(MobileNetAnnotator, self).__init__()\n self.compiler = compiler\n self.compiler_open = False\n\n def visit_call(self, call):\n\n if call.op.name == \"nn.global_avg_pool2d\":\n self.compiler_open = True\n compiler_open = self.compiler_open\n\n params = []\n for arg in call.args:\n param = super().visit(arg)\n if call.op.name == \"nn.global_avg_pool2d\":\n param = compiler_end(param, self.compiler)\n if compiler_open and isinstance(param, relay.expr.Var):\n param = compiler_begin(param, self.compiler)\n params.append(param)\n\n new_call = relay.Call(call.op, params, call.attrs)\n return new_call\n\n\ndef check_result(\n mod,\n map_inputs,\n out_shape,\n result,\n tol=1e-5,\n target=\"llvm\",\n device=tvm.cpu(),\n params=None,\n runtime=Runtime(\"cpp\"),\n):\n if sys.platform == \"win32\":\n print(\"Skip test on Windows for now\")\n return\n\n def update_lib(lib):\n test_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))\n source_dir = os.path.join(test_dir, \"..\", \"..\", \"..\")\n contrib_path = os.path.join(source_dir, \"src\", \"runtime\", \"contrib\")\n\n kwargs = {}\n kwargs[\"options\"] = [\"-O2\", \"-std=c++14\", \"-I\" + contrib_path]\n tmp_path = utils.tempdir()\n lib_name = \"lib.so\"\n lib_path = tmp_path.relpath(lib_name)\n lib.export_library(lib_path, fcompile=False, **kwargs)\n lib = tvm_runtime.load_module(lib_path)\n\n return lib\n\n def check_vm_result():\n te_compiler.get().clear()\n with tvm.transform.PassContext(opt_level=3):\n exe = relay.vm.compile(mod, target=target, params=params)\n code, lib = exe.save()\n lib = update_lib(lib)\n exe = tvm_runtime.vm.Executable.load_exec(code, lib)\n vm = tvm_runtime.vm.VirtualMachine(exe, device)\n outs = vm.run(**map_inputs)\n outs = outs if isinstance(outs, tvm_runtime.container.ADT) else [outs]\n results = result if isinstance(result, list) else [result]\n for out, ref in zip(outs, results):\n tvm.testing.assert_allclose(out.numpy(), ref, rtol=tol, atol=tol)\n\n def check_graph_executor_result():\n te_compiler.get().clear()\n with tvm.transform.PassContext(opt_level=3):\n json, lib, param = relay.build(mod, target=target, params=params, runtime=runtime)\n lib = update_lib(lib)\n rt_mod = tvm.contrib.graph_executor.create(json, lib, device)\n\n for name, data in map_inputs.items():\n rt_mod.set_input(name, data)\n rt_mod.set_input(**param)\n rt_mod.run()\n\n out_shapes = out_shape if isinstance(out_shape, list) else [out_shape]\n results = result if isinstance(result, list) else [result]\n\n for idx, shape in enumerate(out_shapes):\n out = tvm.nd.empty(shape, device=device)\n out = rt_mod.get_output(idx, out)\n tvm.testing.assert_allclose(out.numpy(), results[idx], rtol=tol, atol=tol)\n\n check_vm_result()\n check_graph_executor_result()\n\n\ndef test_multi_node_compiler():\n x = relay.var(\"x\", shape=(10, 10))\n w0 = relay.var(\"w0\", shape=(10, 10))\n w1 = relay.var(\"w1\", shape=(10, 10))\n w2 = relay.var(\"w2\", shape=(10, 10))\n w3 = relay.var(\"w3\", shape=(10, 10))\n w4 = relay.var(\"w4\", shape=(10, 10))\n w5 = relay.var(\"w5\", shape=(10, 10))\n w6 = relay.var(\"w6\", shape=(10, 10))\n w7 = relay.var(\"w7\", shape=(10, 10))\n\n # C compiler\n # FIXME: We generate two compilers for this case but they should be merged to one\n # due to the common input (x).\n z0 = relay.add(x, w0)\n p0 = relay.subtract(z0, w1)\n q0 = relay.multiply(p0, w2)\n\n z1 = relay.add(x, w3)\n p1 = relay.subtract(z1, w4)\n q1 = relay.multiply(p1, w5)\n\n # Other parts on TVM\n z2 = relay.add(x, w6)\n q2 = relay.subtract(z2, w7)\n\n r = relay.concatenate((q0, q1, q2), axis=0)\n f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)\n mod = tvm.IRModule()\n ann = byoc.CcompilerAnnotator()\n mod[\"main\"] = ann.visit(f)\n mod = transform.PartitionGraph()(mod)\n mod = transform.InferType()(mod)\n\n x_data = np.random.rand(10, 10).astype(\"float32\")\n w_data = []\n for _ in range(8):\n w_data.append(np.random.rand(10, 10).astype(\"float32\"))\n\n map_inputs = {\"w{}\".format(i): w_data[i] for i in range(8)}\n map_inputs[\"x\"] = x_data\n\n targets = [(\"llvm\", Runtime(\"cpp\")), (\"c\", Runtime(\"crt\", {\"system-lib\": True}))]\n for tgt, rt in targets:\n check_result(\n mod,\n map_inputs,\n (30, 10),\n np.concatenate(\n (\n ((x_data + w_data[0]) - w_data[1]) * w_data[2],\n ((x_data + w_data[3]) - w_data[4]) * w_data[5],\n x_data + w_data[6] - w_data[7],\n ),\n axis=0,\n ),\n target=tgt,\n runtime=rt,\n )\n\n\ndef test_extern_ccompiler_single_op():\n @transform.function_pass(opt_level=0)\n class MyAnnotator:\n def transform_function(self, func, mod, dev):\n class Annotator(tvm.relay.ExprMutator):\n def visit_call(self, call):\n new_args = []\n for arg in call.args:\n ann = compiler_begin(self.visit(arg), \"ccompiler\")\n new_args.append(ann)\n new_call = relay.Call(call.op, new_args)\n return compiler_end(new_call, \"ccompiler\")\n\n return Annotator().visit(func)\n\n x = relay.var(\"x\", shape=(8, 8))\n y = relay.var(\"y\", shape=(8, 8))\n z = x + y\n f = relay.Function([x, y], z)\n x_data = np.random.rand(8, 8).astype(\"float32\")\n y_data = np.random.rand(8, 8).astype(\"float32\")\n mod = tvm.IRModule()\n mod[\"main\"] = f\n mod = MyAnnotator()(mod)\n mod = transform.PartitionGraph()(mod)\n\n check_result(mod, {\"x\": x_data, \"y\": y_data}, (8, 8), x_data + y_data)\n\n\ndef set_func_attr(func, compile_name, symbol_name):\n func = func.with_attr(\"Primitive\", tvm.tir.IntImm(\"int32\", 1))\n func = func.with_attr(\"Inline\", tvm.tir.IntImm(\"int32\", 1))\n func = func.with_attr(\"Compiler\", compile_name)\n func = func.with_attr(\"global_symbol\", symbol_name)\n return func\n\n\ndef test_extern_ccompiler_default_ops():\n def expected():\n mod = tvm.IRModule()\n x = relay.var(\"x\", shape=(8, 8))\n y = relay.var(\"y\", shape=(8, 8))\n x0 = relay.var(\"x0\", shape=(8, 8))\n y0 = relay.var(\"y0\", shape=(8, 8))\n add = x0 + y0\n # Function that uses C compiler\n func = relay.Function([x0, y0], add)\n func = set_func_attr(func, \"ccompiler\", \"tvmgen_default_ccompiler_main_0\")\n glb_0 = relay.GlobalVar(\"tvmgen_default_ccompiler_main_0\")\n mod[glb_0] = func\n add_call = relay.Call(glb_0, [x, y])\n # Function that uses default compiler. Ops are fused in this function.\n p0 = relay.var(\"p0\", shape=(8, 8))\n log = relay.log(p0)\n exp = relay.exp(p0)\n concat = relay.concatenate([log, exp], axis=0)\n fused_func = relay.Function([p0], concat)\n fused_func = fused_func.with_attr(\"Primitive\", tvm.tir.IntImm(\"int32\", 1))\n fused_call = relay.Call(fused_func, [add_call])\n main = relay.Function([x, y], fused_call)\n mod[\"main\"] = main\n mod = transform.InferType()(mod)\n return mod\n\n x = relay.var(\"x\", shape=(8, 8))\n y = relay.var(\"y\", shape=(8, 8))\n add = x + y\n log = relay.log(add)\n exp = relay.exp(add)\n concat = relay.concatenate([log, exp], axis=0)\n f = relay.Function([x, y], concat)\n mod = tvm.IRModule()\n mod[\"main\"] = f\n mod = WhiteListAnnotator([\"add\", \"subtract\", \"multiply\"], \"ccompiler\")(mod)\n mod = transform.PartitionGraph()(mod)\n fused_mod = transform.FuseOps(2)(mod)\n expected_mod = expected()\n assert tvm.ir.structural_equal(fused_mod, expected_mod, map_free_vars=True)\n\n x_data = np.random.rand(8, 8).astype(\"float32\")\n y_data = np.random.rand(8, 8).astype(\"float32\")\n np_add = x_data + y_data\n res = np.concatenate([np.log(np_add), np.exp(np_add)])\n check_result(mod, {\"x\": x_data, \"y\": y_data}, (16, 8), res)\n\n\ndef test_extern_compiler_sanitized_ops():\n def expected():\n mod = tvm.IRModule()\n x = relay.var(\"x\", shape=(8, 8))\n y = relay.var(\"y\", shape=(8, 8))\n x0 = relay.var(\"x0\", shape=(8, 8))\n y0 = relay.var(\"y0\", shape=(8, 8))\n add = x0 + y0\n # Function that uses C compiler\n func = relay.Function([x0, y0], add)\n func = set_func_attr(func, \"unsanitary-name++\", \"tvmgen_default_unsanitary_name___main_0\")\n glb_0 = relay.GlobalVar(\"tvmgen_default_unsanitary_name___main_0\")\n mod[glb_0] = func\n add_call = relay.Call(glb_0, [x, y])\n # Function that uses default compiler. Ops are fused in this function.\n p0 = relay.var(\"p0\", shape=(8, 8))\n log = relay.log(p0)\n exp = relay.exp(p0)\n concat = relay.concatenate([log, exp], axis=0)\n fused_func = relay.Function([p0], concat)\n fused_func = fused_func.with_attr(\"Primitive\", tvm.tir.IntImm(\"int32\", 1))\n fused_call = relay.Call(fused_func, [add_call])\n main = relay.Function([x, y], fused_call)\n mod[\"main\"] = main\n mod = transform.InferType()(mod)\n return mod\n\n x = relay.var(\"x\", shape=(8, 8))\n y = relay.var(\"y\", shape=(8, 8))\n add = x + y\n log = relay.log(add)\n exp = relay.exp(add)\n concat = relay.concatenate([log, exp], axis=0)\n f = relay.Function([x, y], concat)\n mod = tvm.IRModule()\n mod[\"main\"] = f\n mod = WhiteListAnnotator([\"add\", \"subtract\", \"multiply\"], \"unsanitary-name++\")(mod)\n mod = transform.PartitionGraph()(mod)\n fused_mod = transform.FuseOps(2)(mod)\n expected_mod = expected()\n assert tvm.ir.structural_equal(fused_mod, expected_mod, map_free_vars=True)\n\n\ndef test_extern_ccompiler_multiple_functions():\n def expected():\n mod = tvm.IRModule()\n x = relay.var(\"x\", shape=(8, 8))\n y = relay.var(\"y\", shape=(8, 8))\n x0 = relay.var(\"x0\", shape=(8, 8))\n y0 = relay.var(\"y0\", shape=(8, 8))\n add = x0 + y0\n # Function that uses C compiler\n func = relay.Function([x0, y0], add)\n func = set_func_attr(func, \"ccompiler\", \"tvmgen_default_ccompiler_main_0\")\n glb_0 = relay.GlobalVar(\"tvmgen_default_ccompiler_main_0\")\n mod[glb_0] = func\n add_call = relay.Call(glb_0, [x, y])\n # Function that uses default compiler. Ops are fused in this function.\n p0 = relay.var(\"p0\", shape=(8, 8))\n log = relay.log(p0)\n exp = relay.exp(p0)\n concat = relay.concatenate([log, exp], axis=0)\n fused_func = relay.Function([p0], concat)\n fused_func = fused_func.with_attr(\"Primitive\", tvm.tir.IntImm(\"int32\", 1))\n fused_call = relay.Call(fused_func, [add_call])\n main = relay.Function([x, y], fused_call)\n mod[\"main\"] = main\n # define the second one\n a = relay.var(\"a\", shape=(16, 16))\n b = relay.var(\"b\", shape=(16, 16))\n a0 = relay.var(\"a0\", shape=(16, 16))\n b0 = relay.var(\"b0\", shape=(16, 16))\n add = a0 + b0\n # Function that uses C compiler\n func = relay.Function([a0, b0], add)\n func = set_func_attr(func, \"ccompiler\", \"tvmgen_default_ccompiler_subfunction_0\")\n glb_0 = relay.GlobalVar(\"tvmgen_default_ccompiler_subfunction_0\")\n mod[glb_0] = func\n add_call = relay.Call(glb_0, [a, b])\n # Function that uses default compiler. Ops are fused in this function.\n p0 = relay.var(\"p0\", shape=(16, 16))\n log = relay.log(p0)\n exp = relay.exp(p0)\n concat = relay.concatenate([log, exp], axis=0)\n fused_func = relay.Function([p0], concat)\n fused_func = fused_func.with_attr(\"Primitive\", tvm.tir.IntImm(\"int32\", 1))\n fused_call = relay.Call(fused_func, [add_call])\n sunfunction = relay.Function([a, b], fused_call)\n mod[\"subfunction\"] = sunfunction\n mod = transform.InferType()(mod)\n return mod\n\n x = relay.var(\"x\", shape=(8, 8))\n y = relay.var(\"y\", shape=(8, 8))\n add = x + y\n log = relay.log(add)\n exp = relay.exp(add)\n concat = relay.concatenate([log, exp], axis=0)\n f = relay.Function([x, y], concat)\n mod = tvm.IRModule()\n mod[\"main\"] = f\n # define second function\n a = relay.var(\"a\", shape=(16, 16))\n b = relay.var(\"b\", shape=(16, 16))\n add = a + b\n log = relay.log(add)\n exp = relay.exp(add)\n concat = relay.concatenate([log, exp], axis=0)\n f2 = relay.Function([a, b], concat)\n mod[\"subfunction\"] = f2\n mod = WhiteListAnnotator([\"add\", \"subtract\", \"multiply\"], \"ccompiler\")(mod)\n mod = transform.PartitionGraph()(mod)\n\n fused_mod = transform.FuseOps(2)(mod)\n expected_mod = expected()\n assert tvm.ir.structural_equal(fused_mod, expected_mod, map_free_vars=True)\n\n x_data = np.random.rand(8, 8).astype(\"float32\")\n y_data = np.random.rand(8, 8).astype(\"float32\")\n np_add = x_data + y_data\n res = np.concatenate([np.log(np_add), np.exp(np_add)])\n check_result(mod, {\"x\": x_data, \"y\": y_data}, (16, 8), res)\n\n\ndef test_extern_ccompiler():\n x = relay.var(\"x\", shape=(2, 2))\n y = relay.var(\"y\", shape=(2, 2))\n z = x + x\n p = y * y\n f = relay.Function([x, y], p - z)\n x_data = np.random.rand(2, 2).astype(\"float32\")\n y_data = np.random.rand(2, 2).astype(\"float32\")\n mod = tvm.IRModule()\n mod[\"main\"] = f\n mod = WhiteListAnnotator([\"add\", \"subtract\", \"multiply\"], \"ccompiler\")(mod)\n mod = transform.PartitionGraph()(mod)\n\n check_result(mod, {\"x\": x_data, \"y\": y_data}, (2, 2), (y_data * y_data) - (x_data + x_data))\n\n\ndef test_extern_dnnl():\n if not tvm.get_global_func(\"relay.ext.dnnl\", True):\n print(\"skip because DNNL codegen is not available\")\n return\n\n dtype = \"float32\"\n ishape = (1, 32, 14, 14)\n w1shape = (32, 1, 3, 3)\n\n def expected():\n data0 = relay.var(\"data\", shape=(ishape), dtype=dtype)\n input0 = relay.var(\"input\", shape=(w1shape), dtype=dtype)\n depthwise_conv2d_1 = relay.nn.conv2d(\n data0, input0, kernel_size=(3, 3), padding=(1, 1), groups=32\n )\n depthwise_conv2d_2 = relay.nn.conv2d(\n depthwise_conv2d_1, input0, kernel_size=(3, 3), padding=(1, 1), groups=32\n )\n out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)\n\n func = relay.Function([data0, input0], out)\n func = set_func_attr(func, \"dnnl\", \"tvmgen_default_dnnl_main_0\")\n glb_var = relay.GlobalVar(\"tvmgen_default_dnnl_main_0\")\n mod = tvm.IRModule()\n mod[glb_var] = func\n mod = transform.InferType()(mod)\n\n data = relay.var(\"data\", shape=(ishape), dtype=dtype)\n weight = relay.var(\"input\", shape=(w1shape), dtype=dtype)\n main_f = relay.Function([data, weight], glb_var(data, weight))\n mod[\"main\"] = main_f\n mod = transform.InferType()(mod)\n\n return mod\n\n def get_func():\n data = relay.var(\"data\", shape=(ishape), dtype=dtype)\n weight1 = relay.var(\"weight1\", shape=(w1shape), dtype=dtype)\n depthwise_conv2d_1 = relay.nn.conv2d(\n data, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32\n )\n depthwise_conv2d_2 = relay.nn.conv2d(\n depthwise_conv2d_1, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32\n )\n out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)\n\n return relay.Function([data, weight1], out)\n\n mod = tvm.IRModule()\n mod[\"main\"] = WholeGraphAnnotator(\"dnnl\").visit(get_func())\n mod = transform.PartitionGraph()(mod)\n mod = transform.InferType()(mod)\n\n assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)\n\n ref_mod = tvm.IRModule()\n ref_mod[\"main\"] = get_func()\n\n i_data = np.random.uniform(0, 1, ishape).astype(dtype)\n w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)\n\n ref_res = relay.create_executor(\"graph\", mod=ref_mod, device=tvm.cpu()).evaluate()(\n i_data, w1_data\n )\n check_result(\n mod, {\"data\": i_data, \"weight1\": w1_data}, (1, 32, 14, 14), ref_res.numpy(), tol=1e-5\n )\n\n\ndef test_extern_dnnl_mobilenet():\n if not tvm.get_global_func(\"relay.ext.dnnl\", True):\n print(\"skip because DNNL codegen is not available\")\n return\n\n dtype = \"float32\"\n ishape = (1, 3, 224, 224)\n ref_mod, params = relay.testing.mobilenet.get_workload(batch_size=1, dtype=\"float32\")\n mod = transform.AnnotateTarget([\"dnnl\"])(ref_mod)\n mod = transform.MergeCompilerRegions()(mod)\n mod = transform.PartitionGraph()(mod)\n i_data = np.random.uniform(0, 1, ishape).astype(dtype)\n\n ref_res = relay.create_executor(\"graph\", mod=ref_mod, device=tvm.cpu(0)).evaluate()(\n i_data, **params\n )\n te_compiler.get().clear()\n\n check_result(mod, {\"data\": i_data}, (1, 1000), ref_res.numpy(), tol=1e-5, params=params)\n\n\ndef test_function_lifting():\n def partition():\n data = relay.var(\"data\", relay.TensorType((1, 3, 224, 224), \"float32\"))\n weight = relay.var(\"weight\", relay.TensorType((16, 3, 3, 3), \"float32\"))\n bn_gamma = relay.var(\"bn_gamma\", relay.TensorType((16,), \"float32\"))\n bn_beta = relay.var(\"bn_beta\", relay.TensorType((16,), \"float32\"))\n bn_mmean = relay.var(\"bn_mean\", relay.TensorType((16,), \"float32\"))\n bn_mvar = relay.var(\"bn_var\", relay.TensorType((16,), \"float32\"))\n\n conv = relay.nn.conv2d(\n data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1)\n )\n bn_output = relay.nn.batch_norm(conv, bn_gamma, bn_beta, bn_mmean, bn_mvar)\n\n func = relay.Function(\n [data, weight, bn_gamma, bn_beta, bn_mmean, bn_mvar], bn_output.astuple()\n )\n mod = tvm.IRModule()\n mod[\"main\"] = func\n mod = relay.transform.InferType()(mod)\n op_list = [\"nn.batch_norm\", \"nn.conv2d\"]\n mod = WhiteListAnnotator(op_list, \"test_compiler\")(mod)\n\n opt_pass = tvm.transform.Sequential(\n [\n transform.InferType(),\n transform.PartitionGraph(),\n transform.SimplifyInference(),\n transform.FoldConstant(),\n transform.AlterOpLayout(),\n ]\n )\n\n with tvm.transform.PassContext(opt_level=3):\n mod = opt_pass(mod)\n\n return mod\n\n def expected():\n # function for batch_norm\n data0 = relay.var(\"data0\", relay.TensorType((1, 16, 224, 224), \"float32\"))\n mod = tvm.IRModule()\n bn_gamma = relay.var(\"bn_gamma1\", relay.TensorType((16,), \"float32\"))\n bn_beta = relay.var(\"bn_beta1\", relay.TensorType((16,), \"float32\"))\n bn_mmean = relay.var(\"bn_mean1\", relay.TensorType((16,), \"float32\"))\n bn_mvar = relay.var(\"bn_var1\", relay.TensorType((16,), \"float32\"))\n\n bn = relay.nn.batch_norm(data0, bn_gamma, bn_beta, bn_mmean, bn_mvar)\n func0 = relay.Function([data0, bn_gamma, bn_beta, bn_mmean, bn_mvar], bn.astuple())\n func0 = set_func_attr(func0, \"test_compiler\", \"tvmgen_default_test_compiler_main_2\")\n gv0 = relay.GlobalVar(\"tvmgen_default_test_compiler_main_2\")\n mod[gv0] = func0\n mod = transform.InferType()(mod)\n\n # function for conv2d\n data1 = relay.var(\"data1\", relay.TensorType((1, 3, 224, 224), \"float32\"))\n weight1 = relay.var(\"weight1\", relay.TensorType((16, 3, 3, 3), \"float32\"))\n conv = relay.nn.conv2d(\n data=data1, weight=weight1, kernel_size=(3, 3), channels=16, padding=(1, 1)\n )\n func1 = relay.Function([data1, weight1], conv)\n func1 = set_func_attr(func1, \"test_compiler\", \"tvmgen_default_test_compiler_main_0\")\n gv1 = relay.GlobalVar(\"tvmgen_default_test_compiler_main_0\")\n mod[gv1] = func1\n mod = transform.InferType()(mod)\n\n # main function\n data = relay.var(\"data\", relay.TensorType((1, 3, 224, 224), \"float32\"))\n weight = relay.var(\"weight\", relay.TensorType((16, 3, 3, 3), \"float32\"))\n bn_gamma0 = relay.var(\"bn_gamma\", relay.TensorType((16,), \"float32\"))\n bn_beta0 = relay.var(\"bn_beta\", relay.TensorType((16,), \"float32\"))\n bn_mmean0 = relay.var(\"bn_mean\", relay.TensorType((16,), \"float32\"))\n bn_mvar0 = relay.var(\"bn_var\", relay.TensorType((16,), \"float32\"))\n\n call1 = gv1(data, weight)\n call0 = gv0(call1, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0)\n mod[\"main\"] = relay.Function(\n [data, weight, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0], call0\n )\n mod = transform.InferType()(mod)\n return mod\n\n partitioned = partition()\n ref_mod = expected()\n assert tvm.ir.structural_equal(partitioned, ref_mod, map_free_vars=True)\n\n\ndef test_function_lifting_inline():\n def partition():\n data = relay.var(\"data\", relay.TensorType((1, 16, 224, 224), \"float32\"))\n bn_gamma = relay.var(\"bn_gamma\", relay.TensorType((16,), \"float32\"))\n bn_beta = relay.var(\"bn_beta\", relay.TensorType((16,), \"float32\"))\n bn_mmean = relay.var(\"bn_mean\", relay.TensorType((16,), \"float32\"))\n bn_mvar = relay.var(\"bn_var\", relay.TensorType((16,), \"float32\"))\n\n bn_output = relay.nn.batch_norm(data, bn_gamma, bn_beta, bn_mmean, bn_mvar)\n\n func = relay.Function([data, bn_gamma, bn_beta, bn_mmean, bn_mvar], bn_output.astuple())\n mod = tvm.IRModule()\n mod[\"main\"] = func\n op_list = [\"nn.batch_norm\", \"nn.conv2d\"]\n mod = WhiteListAnnotator(op_list, \"test_compiler\")(mod)\n\n opt_pass = tvm.transform.Sequential(\n [\n transform.InferType(),\n transform.PartitionGraph(),\n transform.SimplifyInference(),\n transform.FoldConstant(),\n transform.AlterOpLayout(),\n transform.Inline(),\n ]\n )\n\n with tvm.transform.PassContext(opt_level=3):\n mod = opt_pass(mod)\n\n return mod\n\n def expected():\n # function for batch_norm\n data0 = relay.var(\"data0\", relay.TensorType((1, 16, 224, 224), \"float32\"))\n mod = tvm.IRModule()\n bn_gamma = relay.var(\"bn_gamma1\", relay.TensorType((16,), \"float32\"))\n bn_beta = relay.var(\"bn_beta1\", relay.TensorType((16,), \"float32\"))\n bn_mmean = relay.var(\"bn_mean1\", relay.TensorType((16,), \"float32\"))\n bn_mvar = relay.var(\"bn_var1\", relay.TensorType((16,), \"float32\"))\n\n bn = relay.nn.batch_norm(data0, bn_gamma, bn_beta, bn_mmean, bn_mvar)\n func0 = relay.Function([data0, bn_gamma, bn_beta, bn_mmean, bn_mvar], bn.astuple())\n func0 = set_func_attr(func0, \"test_compiler\", \"tvmgen_default_test_compiler_main_0\")\n\n # main function\n data = relay.var(\"data\", relay.TensorType((1, 16, 224, 224), \"float32\"))\n bn_gamma0 = relay.var(\"bn_gamma\", relay.TensorType((16,), \"float32\"))\n bn_beta0 = relay.var(\"bn_beta\", relay.TensorType((16,), \"float32\"))\n bn_mmean0 = relay.var(\"bn_mean\", relay.TensorType((16,), \"float32\"))\n bn_mvar0 = relay.var(\"bn_var\", relay.TensorType((16,), \"float32\"))\n\n call0 = func0(data, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0)\n mod[\"main\"] = relay.Function([data, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0], call0)\n mod = transform.InferType()(mod)\n return mod\n\n partitioned = partition()\n ref_mod = expected()\n assert tvm.ir.structural_equal(partitioned, ref_mod, map_free_vars=True)\n\n\ndef test_constant_propagation():\n ones = np.ones(shape=(8, 8), dtype=\"float32\")\n\n def expected():\n mod = tvm.IRModule()\n y = relay.var(\"y\", shape=(8, 8))\n x0 = relay.const(ones)\n y0 = relay.var(\"y0\", shape=(8, 8))\n add = x0 + y0\n # Function that uses C compiler\n func = relay.Function([y0], add)\n func = set_func_attr(func, \"ccompiler\", \"tvmgen_default_ccompiler_main_0\")\n glb_0 = relay.GlobalVar(\"tvmgen_default_ccompiler_main_0\")\n mod[glb_0] = func\n mod = relay.transform.InferType()(mod)\n add_call = relay.Call(glb_0, [y])\n log = relay.log(add_call)\n main = relay.Function([y], log)\n mod[\"main\"] = main\n mod = relay.transform.InferType()(mod)\n return mod\n\n x = relay.var(\"x\", shape=(8, 8))\n y = relay.var(\"y\", shape=(8, 8))\n add = x + y\n log = relay.log(add)\n f = relay.Function([x, y], log)\n f = bind_params_by_name(f, {\"x\": tvm.nd.array(ones)})\n mod = tvm.IRModule()\n mod[\"main\"] = f\n mod = WhiteListAnnotator([\"add\"], \"ccompiler\")(mod)\n mod = transform.PartitionGraph()(mod)\n mod = relay.transform.InferType()(mod)\n\n expected_mod = expected()\n expected_mod = relay.transform.InferType()(expected_mod)\n assert tvm.ir.structural_equal(mod, expected_mod, map_free_vars=True)\n\n y_data = np.random.rand(8, 8).astype(\"float32\")\n np_add = ones + y_data\n check_result(mod, {\"y\": y_data}, (8, 8), np.log(np_add))\n\n\ndef test_multiple_outputs():\n def create_graph():\n data = relay.var(\"data\", relay.TensorType((1, 3, 224, 224), \"float32\"))\n weight = relay.var(\"weight\", relay.TensorType((16, 3, 3, 3), \"float32\"))\n bn_gamma = relay.var(\"bn_gamma\", relay.TensorType((16,), \"float32\"))\n bn_beta = relay.var(\"bn_beta\", relay.TensorType((16,), \"float32\"))\n bn_mean = relay.var(\"bn_mean\", relay.TensorType((16,), \"float32\"))\n bn_var = relay.var(\"bn_var\", relay.TensorType((16,), \"float32\"))\n\n data_cb = compiler_begin(data, \"test_target\")\n weight_cb = compiler_begin(weight, \"test_target\")\n bn_gamma_cb = compiler_begin(bn_gamma, \"test_target\")\n bn_beta_cb = compiler_begin(bn_beta, \"test_target\")\n bn_mean_cb = compiler_begin(bn_mean, \"test_target\")\n bn_var_cb = compiler_begin(bn_var, \"test_target\")\n\n conv_o = relay.nn.conv2d(\n data=data_cb, weight=weight_cb, kernel_size=(3, 3), channels=16, padding=(1, 1)\n )\n\n bn_o = relay.nn.batch_norm(conv_o, bn_gamma_cb, bn_beta_cb, bn_mean_cb, bn_var_cb)\n\n relu_o = relay.nn.relu(bn_o[0])\n relu_o_ce = compiler_end(relu_o, \"test_target\")\n\n bn_omean = bn_o[1]\n rebn_omean_ce = compiler_end(bn_omean, \"test_target\")\n bn_ovar = bn_o[2]\n bn_ovar_ce = compiler_end(bn_ovar, \"test_target\")\n\n dummy_mean_abs = relay.abs(rebn_omean_ce)\n dummy_ovar_abs = relay.abs(bn_ovar_ce)\n dummy_tuple = relay.Tuple((relu_o_ce, dummy_mean_abs, dummy_ovar_abs))\n\n func = relay.Function([data, weight, bn_gamma, bn_beta, bn_mean, bn_var], dummy_tuple)\n return func\n\n def expected():\n mod = tvm.IRModule()\n\n # function 0\n data = relay.var(\"test_target_0_i0\", relay.TensorType((1, 3, 224, 224), \"float32\"))\n weight = relay.var(\"test_target_0_i1\", relay.TensorType((16, 3, 3, 3), \"float32\"))\n bn_gamma = relay.var(\"test_target_0_i2\", relay.TensorType((16,), \"float32\"))\n bn_beta = relay.var(\"test_target_0_i3\", relay.TensorType((16,), \"float32\"))\n bn_mean = relay.var(\"test_target_0_i4\", relay.TensorType((16,), \"float32\"))\n bn_var = relay.var(\"test_target_0_i5\", relay.TensorType((16,), \"float32\"))\n\n conv_o = relay.nn.conv2d(\n data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1)\n )\n\n bn_o = relay.nn.batch_norm(conv_o, bn_gamma, bn_beta, bn_mean, bn_var)\n\n relu_o = relay.nn.relu(bn_o[0])\n tuple_o = relay.Tuple((relu_o, bn_o[1], bn_o[2]))\n\n func0 = relay.Function([data, weight, bn_gamma, bn_beta, bn_mean, bn_var], tuple_o)\n func0 = set_func_attr(func0, \"test_target\", \"tvmgen_default_test_target_main_0\")\n gv0 = relay.GlobalVar(\"tvmgen_default_test_target_main_0\")\n mod[gv0] = func0\n mod = relay.transform.InferType()(mod)\n\n # body\n data = relay.var(\"data\", relay.TensorType((1, 3, 224, 224), \"float32\"))\n weight = relay.var(\"weight\", relay.TensorType((16, 3, 3, 3), \"float32\"))\n bn_gamma = relay.var(\"bn_gamma\", relay.TensorType((16,), \"float32\"))\n bn_beta = relay.var(\"bn_beta\", relay.TensorType((16,), \"float32\"))\n bn_mean = relay.var(\"bn_mean\", relay.TensorType((16,), \"float32\"))\n bn_var = relay.var(\"bn_var\", relay.TensorType((16,), \"float32\"))\n\n f0_o = gv0(data, weight, bn_gamma, bn_beta, bn_mean, bn_var)\n f0_relu_o = relay.TupleGetItem(f0_o, 0)\n f0_mean_o = relay.TupleGetItem(f0_o, 1)\n f0_var_o = relay.TupleGetItem(f0_o, 2)\n\n f0_mean_abs = relay.abs(f0_mean_o)\n f0_var_abs = relay.abs(f0_var_o)\n main_tuple = relay.Tuple((f0_relu_o, f0_mean_abs, f0_var_abs))\n\n func = relay.Function([data, weight, bn_gamma, bn_beta, bn_mean, bn_var], main_tuple)\n mod[\"main\"] = func\n mod = relay.transform.InferType()(mod)\n return mod\n\n mod = tvm.IRModule()\n mod[\"main\"] = create_graph()\n ref_mod = expected()\n partitioned = transform.PartitionGraph()(mod)\n assert tvm.ir.structural_equal(partitioned, ref_mod, map_free_vars=True)\n\n\ndef test_mixed_single_multiple_outputs():\n def create_graph():\n data = relay.var(\"data\", shape=(10, 10))\n\n cb_1 = compiler_begin(data, \"test_target\")\n O_1 = relay.abs(cb_1)\n ce_2 = compiler_end(O_1, \"test_target\")\n O_2 = relay.nn.relu(O_1)\n ce_3 = compiler_end(O_2, \"test_target\")\n\n X = relay.tanh(ce_2)\n\n cb_3 = compiler_begin(ce_3, \"test_target\")\n cb_4 = compiler_begin(X, \"test_target\")\n O_3 = relay.add(cb_3, cb_4)\n ce_4 = compiler_end(O_3, \"test_target\")\n\n func = relay.Function([data], ce_4)\n return func\n\n def expected():\n mod = tvm.IRModule()\n\n # function 1\n f1_cb1 = relay.var(\"test_target_0_i0\", shape=(10, 10))\n f1_O_1 = relay.abs(f1_cb1)\n f1_O_2 = relay.nn.relu(f1_O_1)\n f1_out = relay.Tuple((f1_O_2, f1_O_1))\n func1 = relay.Function([f1_cb1], f1_out)\n func1 = set_func_attr(func1, \"test_target\", \"tvmgen_default_test_target_main_0\")\n gv1 = relay.GlobalVar(\"tvmgen_default_test_target_main_0\")\n mod[gv1] = func1\n mod = relay.transform.InferType()(mod)\n\n # function 0\n f2_cb3 = relay.var(\"test_target_1_i0\", shape=(10, 10))\n f2_cb4 = relay.var(\"test_target_1_i1\", shape=(10, 10))\n f2_O_3 = relay.add(f2_cb3, f2_cb4)\n func0 = relay.Function([f2_cb3, f2_cb4], f2_O_3)\n func0 = set_func_attr(func0, \"test_target\", \"tvmgen_default_test_target_main_1\")\n gv0 = relay.GlobalVar(\"tvmgen_default_test_target_main_1\")\n mod[gv0] = func0\n mod = relay.transform.InferType()(mod)\n\n # body\n data = relay.var(\"data\", shape=(10, 10))\n tuple_out = gv1(data)\n ce_2 = relay.TupleGetItem(tuple_out, 1)\n ce_3 = relay.TupleGetItem(tuple_out, 0)\n\n X = relay.tanh(ce_2)\n ce_4 = gv0(ce_3, X)\n func = relay.Function([data], ce_4)\n mod[\"main\"] = func\n mod = relay.transform.InferType()(mod)\n return mod\n\n mod = tvm.IRModule()\n mod[\"main\"] = create_graph()\n mod = transform.InferType()(mod)\n\n ref_mod = expected()\n\n partitioned = transform.PartitionGraph()(mod)\n assert tvm.ir.structural_equal(partitioned, ref_mod, map_free_vars=True)\n\n\ndef test_dnnl_fuse():\n dnnl_patterns = get_pattern_table(\"dnnl\")\n (\n conv2d_bias_relu_pat,\n conv2d_bias_sigmoid_pat,\n conv2d_bias_pat,\n conv2d_relu_pat,\n conv2d_sigmoid_pat,\n ) = (\n dnnl_patterns[1],\n dnnl_patterns[13],\n dnnl_patterns[19],\n dnnl_patterns[25],\n dnnl_patterns[37],\n )\n\n def get_blocks(\n prefix,\n data,\n in_channel,\n out_channel,\n include_bias_add=True,\n include_bn=True,\n include_sigmoid=False,\n ):\n weight = relay.var(prefix + \"weight\")\n bias = relay.var(prefix + \"bias\")\n bn_gamma = relay.var(prefix + \"bn_gamma\")\n bn_beta = relay.var(prefix + \"bn_beta\")\n bn_mmean = relay.var(prefix + \"bn_mean\")\n bn_mvar = relay.var(prefix + \"bn_var\")\n\n layer = relay.nn.conv2d(\n data=data, weight=weight, kernel_size=(3, 3), channels=out_channel, padding=(1, 1)\n )\n if include_bias_add:\n layer = relay.nn.bias_add(layer, bias)\n if include_bn:\n bn_output = relay.nn.batch_norm(layer, bn_gamma, bn_beta, bn_mmean, bn_mvar)\n layer = bn_output[0]\n if include_sigmoid:\n # dummy layer to prevent pattern detection\n layer = relay.sigmoid(layer)\n layer = relay.nn.relu(layer)\n return layer\n\n def get_net(include_bias_add=True, include_bn=True, include_sigmoid=False):\n data = relay.var(\"data\", relay.TensorType((1, 3, 224, 224), \"float32\"))\n block1 = get_blocks(\"block1_\", data, 3, 8, include_bias_add, include_bn, include_sigmoid)\n # The second block is always conv + relu, to make it more interesting\n block2 = get_blocks(\"block2_\", block1, 8, 8, False, False, include_sigmoid)\n return relay.Function(relay.analysis.free_vars(block2), block2)\n\n def get_partitoned_mod(mod, params, pattern_table):\n # This is required for constant folding\n mod[\"main\"] = bind_params_by_name(mod[\"main\"], params)\n\n remove_bn_pass = tvm.transform.Sequential(\n [\n transform.InferType(),\n transform.SimplifyInference(),\n transform.FoldConstant(),\n transform.FoldScaleAxis(),\n ]\n )\n # fold consecutive add ops to simplify pattern `conv2d-bias_add-bn-relu`\n remove_linear_pass = tvm.transform.Sequential(\n [\n transform.SimplifyExpr(),\n transform.FoldConstant(),\n ]\n )\n composite_partition = tvm.transform.Sequential(\n [\n transform.CanonicalizeOps(),\n remove_bn_pass,\n remove_linear_pass,\n transform.MergeComposite(pattern_table),\n transform.AnnotateTarget(\"dnnl\"),\n transform.PartitionGraph(),\n ]\n )\n\n with tvm.transform.PassContext(opt_level=3, disabled_pass=[\"AlterOpLayout\"]):\n return composite_partition(mod)\n\n def test_detect_pattern(\n pattern_table, include_bias_add, include_bn, include_sigmoid, num_expected_partition\n ):\n net = get_net(include_bias_add, include_bn, include_sigmoid)\n mod, params = tvm.relay.testing.create_workload(net)\n mod = get_partitoned_mod(mod, params, pattern_table)\n assert len(mod.functions) - 1 == num_expected_partition # -1 for main\n\n def test_partition():\n # conv + bn + relu, conv + relu -> fused conv_bias_relu, conv, and relu\n test_detect_pattern([conv2d_bias_relu_pat], False, True, False, 3)\n # conv + bn + relu, conv + relu -> conv, bias, relu, and fused conv_relu\n test_detect_pattern([conv2d_relu_pat], False, True, False, 4)\n # conv + bn + relu, conv + relu -> fused conv_bias_relu, and fused conv_relu\n test_detect_pattern([conv2d_bias_relu_pat, conv2d_relu_pat], False, True, False, 2)\n # conv + bias_add + bn + relu, conv + relu -> fused conv_bias_relu, and fused conv_relu\n test_detect_pattern([conv2d_bias_relu_pat, conv2d_relu_pat], True, True, False, 2)\n # conv + relu, conv + relu -> two fused conv_relu\n test_detect_pattern([conv2d_relu_pat], False, False, False, 2)\n # conv + relu, conv + relu -> no fusion, 4 partition each with a single op\n test_detect_pattern([conv2d_bias_relu_pat], False, False, False, 4)\n # conv + bn + sigmoid + relu, conv + sigmoid + relu -> no fusion\n test_detect_pattern([conv2d_bias_relu_pat, conv2d_relu_pat], False, True, True, 7)\n # conv + bias_add + bn + sigmoid + relu, conv + sigmoid + relu -> fused conv_bias\n # and single op sigmoid, relu, conv, sigmoid, relu\n test_detect_pattern([conv2d_bias_pat, conv2d_relu_pat], True, True, True, 6)\n # conv + bias_add + bn + sigmoid + relu, conv + sigmoid + relu -> fused conv_bias_sigmoid\n # and single op relu, conv, sigmoid, relu\n test_detect_pattern([conv2d_bias_sigmoid_pat, conv2d_relu_pat], True, True, True, 5)\n # conv + bias_add + bn + sigmoid + relu, conv + sigmoid + relu -> fused conv_bias_sigmoid,\n # fused conv_sigmoid and single op relu, relu\n test_detect_pattern([conv2d_bias_sigmoid_pat, conv2d_sigmoid_pat], True, True, True, 4)\n\n def test_partition_mobilenet():\n mod, params = relay.testing.mobilenet.get_workload()\n mod = get_partitoned_mod(mod, params, dnnl_patterns)\n # 27 fused conv + bn + relu, one dense and one softmax\n assert len(mod.functions) - 1 == 29 # -1 for main\n\n def test_exec(mod, params, ref_mod, ref_params, out_shape):\n ishape = (1, 3, 224, 224)\n i_data = np.random.randn(*ishape).astype(np.float32)\n ref_res = relay.create_executor(\"graph\", mod=ref_mod, device=tvm.cpu(0)).evaluate()(\n i_data, **ref_params\n )\n te_compiler.get().clear()\n\n mod = get_partitoned_mod(mod, params, dnnl_patterns)\n\n check_result(mod, {\"data\": i_data}, out_shape, ref_res.numpy(), tol=1e-5, params=params)\n\n test_partition()\n test_partition_mobilenet()\n\n if not tvm.get_global_func(\"relay.ext.dnnl\", True):\n print(\"skip because DNNL codegen is not available\")\n return\n\n net = get_net()\n mod, params = tvm.relay.testing.create_workload(net)\n ref_mod, ref_params = tvm.relay.testing.create_workload(net)\n test_exec(mod, params, ref_mod, ref_params, (1, 8, 224, 224))\n\n mod, params = relay.testing.mobilenet.get_workload()\n ref_mod, ref_params = relay.testing.mobilenet.get_workload()\n test_exec(mod, params, ref_mod, ref_params, (1, 1000))\n\n\ndef test_multiple_use_of_an_output():\n def expected_same_output_region():\n mod = tvm.IRModule()\n x = relay.var(\"x\", shape=(8, 8))\n y = relay.var(\"y\", shape=(8, 8))\n z = relay.var(\"z\", shape=(8, 8))\n x0 = relay.var(\"x0\", shape=(8, 8))\n y0 = relay.var(\"y0\", shape=(8, 8))\n log = relay.log(x0)\n sub = x0 - y0\n mul = log * sub\n # The partitioned graph contains log, subtract, and multiply\n func = relay.Function([x0, y0], mul)\n func = set_func_attr(func, \"ccompiler\", \"tvmgen_default_ccompiler_main_0\")\n glb_0 = relay.GlobalVar(\"tvmgen_default_ccompiler_main_0\")\n mod[glb_0] = func\n mod = transform.InferType()(mod)\n\n add = x + y\n call = relay.Call(glb_0, [add, z])\n main = relay.Function([x, y, z], call)\n mod[\"main\"] = main\n mod = transform.InferType()(mod)\n return mod\n\n def expected_different_output_region():\n mod = tvm.IRModule()\n x = relay.var(\"x\", shape=(8, 8))\n y = relay.var(\"y\", shape=(8, 8))\n z = relay.var(\"z\", shape=(8, 8))\n\n # The partitioned graph contains log\n i0 = relay.var(\"i0\", shape=(8, 8))\n log = relay.log(i0)\n func = relay.Function([i0], log)\n func = set_func_attr(func, \"ccompiler\", \"tvmgen_default_ccompiler_main_0\")\n glb_0 = relay.GlobalVar(\"tvmgen_default_ccompiler_main_0\")\n mod[glb_0] = func\n mod = transform.InferType()(mod)\n\n # The partitioned graph contains subtract\n x0 = relay.var(\"x0\", shape=(8, 8))\n y0 = relay.var(\"y0\", shape=(8, 8))\n sub = x0 - y0\n func = relay.Function([x0, y0], sub)\n func = set_func_attr(func, \"ccompiler\", \"tvmgen_default_ccompiler_main_1\")\n glb_1 = relay.GlobalVar(\"tvmgen_default_ccompiler_main_1\")\n mod[glb_1] = func\n mod = transform.InferType()(mod)\n\n add = x + y\n call_log = relay.Call(glb_0, [add])\n call_sub = relay.Call(glb_1, [add, z])\n main = relay.Function([x, y, z], call_log * call_sub)\n mod[\"main\"] = main\n mod = transform.InferType()(mod)\n return mod\n\n def get_mod():\n x = relay.var(\"x\", shape=(8, 8))\n y = relay.var(\"y\", shape=(8, 8))\n z = relay.var(\"z\", shape=(8, 8))\n add = x + y\n sub = add - z\n log = relay.log(add)\n sub1 = log * sub\n f = relay.Function([x, y, z], sub1)\n mod = tvm.IRModule()\n mod[\"main\"] = f\n return mod\n\n def test_same_output_region():\n mod = get_mod()\n mod = WhiteListAnnotator([\"subtract\", \"log\", \"multiply\"], \"ccompiler\")(mod)\n mod = transform.MergeCompilerRegions()(mod)\n mod = transform.PartitionGraph()(mod)\n\n expected_mod = expected_same_output_region()\n assert tvm.ir.structural_equal(mod, expected_mod, map_free_vars=True)\n\n def test_different_output_region():\n mod = get_mod()\n mod = WhiteListAnnotator([\"subtract\", \"log\"], \"ccompiler\")(mod)\n mod = transform.MergeCompilerRegions()(mod)\n mod = transform.PartitionGraph()(mod)\n\n expected_mod = expected_different_output_region()\n assert tvm.ir.structural_equal(mod, expected_mod, map_free_vars=True)\n\n test_same_output_region()\n test_different_output_region()\n\n\ndef test_duplicate_outputs():\n target = \"test_duplicate_outputs\"\n\n @tvm.ir.register_op_attr(\"abs\", \"target.\" + target)\n def abs(expr): # pylint: disable=unused-variable\n return True\n\n def create_graph():\n data = relay.var(\"data\", shape=(10, 10))\n x = relay.abs(data)\n out_1 = relay.nn.relu(x)\n out_2 = relay.tanh(x)\n out_3 = relay.log(x)\n out = relay.Tuple([out_1, out_2, out_3])\n func = relay.Function([data], out)\n return func\n\n def expected():\n mod = tvm.IRModule()\n\n # function 0\n f0_i0 = relay.var(target + \"_0_i0\", shape=(10, 10))\n f0_o0 = relay.abs(f0_i0)\n func0 = relay.Function([f0_i0], f0_o0)\n\n func0 = func0.with_attr(\"Primitive\", tvm.tir.IntImm(\"int32\", 1))\n func0 = func0.with_attr(\"Inline\", tvm.tir.IntImm(\"int32\", 1))\n func0 = func0.with_attr(\"Compiler\", target)\n func0 = func0.with_attr(\"global_symbol\", \"tvmgen_default_\" + target + \"_main_0\")\n gv0 = relay.GlobalVar(\"tvmgen_default_\" + target + \"_main_0\")\n mod[gv0] = func0\n mod = transform.InferType()(mod)\n\n # body\n data = relay.var(\"data\", shape=(10, 10))\n function_out = gv0(data)\n out_1 = relay.nn.relu(function_out)\n out_2 = relay.tanh(function_out)\n out_3 = relay.log(function_out)\n out = relay.Tuple([out_1, out_2, out_3])\n func = relay.Function([data], out)\n mod[\"main\"] = func\n mod = transform.InferType()(mod)\n return mod\n\n mod = tvm.IRModule()\n mod[\"main\"] = create_graph()\n\n seq = tvm.transform.Sequential(\n [\n transform.AnnotateTarget(target),\n transform.MergeCompilerRegions(),\n transform.PartitionGraph(),\n ]\n )\n\n ref_mod = expected()\n partitioned = seq(mod)\n assert tvm.ir.structural_equal(partitioned, ref_mod, map_free_vars=True)\n\n\ndef test_duplicate_merge_and_tuplegetitem():\n target = \"test_duplicate_merge_and_tuplegetitem\"\n\n @tvm.ir.register_op_attr(\"nn.batch_norm\", \"target.\" + target)\n def batch_norm(expr): # pylint: disable=unused-variable\n return True\n\n @tvm.ir.register_op_attr(\"nn.relu\", \"target.\" + target)\n def relu(expr): # pylint: disable=unused-variable\n return True\n\n def create_graph():\n data = relay.var(\"data\", shape=(10, 10))\n bn_gamma = relay.var(\"bn_gamma\")\n bn_beta = relay.var(\"bn_beta\")\n bn_mmean = relay.var(\"bn_mean\")\n bn_mvar = relay.var(\"bn_var\")\n x = relay.nn.batch_norm(data, bn_gamma, bn_beta, bn_mmean, bn_mvar)\n out_1 = relay.nn.relu(x[0])\n bn_out_1 = x[1]\n out_2 = relay.tanh(bn_out_1)\n out_3 = relay.log(bn_out_1)\n out = relay.Tuple([out_1, out_2, out_3])\n func = relay.Function([data, bn_gamma, bn_beta, bn_mmean, bn_mvar], out)\n return func\n\n def expected():\n mod = tvm.IRModule()\n\n # function 0\n f0_i0 = relay.var(target + \"_0_i0\", shape=(10, 10))\n f0_i1 = relay.var(target + \"_0_i1\")\n f0_i2 = relay.var(target + \"_0_i2\")\n f0_i3 = relay.var(target + \"_0_i3\")\n f0_i4 = relay.var(target + \"_0_i4\")\n f0_n0 = relay.nn.batch_norm(f0_i0, f0_i1, f0_i2, f0_i3, f0_i4)\n f0_n1 = f0_n0[1]\n f0_n2 = relay.nn.relu(f0_n0[0])\n f0_o0 = relay.Tuple([f0_n2, f0_n1])\n func0 = relay.Function([f0_i0, f0_i1, f0_i2, f0_i3, f0_i4], f0_o0)\n\n func0 = func0.with_attr(\"Primitive\", tvm.tir.IntImm(\"int32\", 1))\n func0 = func0.with_attr(\"Inline\", tvm.tir.IntImm(\"int32\", 1))\n func0 = func0.with_attr(\"Compiler\", target)\n func0 = func0.with_attr(\"global_symbol\", \"tvmgen_default_\" + target + \"_main_0\")\n gv0 = relay.GlobalVar(\"tvmgen_default_\" + target + \"_main_0\")\n mod[gv0] = func0\n mod = transform.InferType()(mod)\n\n # body\n data = relay.var(\"data\", shape=(10, 10))\n bn_gamma = relay.var(\"bn_gamma\")\n bn_beta = relay.var(\"bn_beta\")\n bn_mmean = relay.var(\"bn_mean\")\n bn_mvar = relay.var(\"bn_var\")\n function_out = gv0(data, bn_gamma, bn_beta, bn_mmean, bn_mvar)\n get_out0 = relay.TupleGetItem(function_out, 0)\n get_out1 = relay.TupleGetItem(function_out, 1)\n out_2 = relay.tanh(get_out1)\n out_3 = relay.log(get_out1)\n out = relay.Tuple([get_out0, out_2, out_3])\n func = relay.Function([data, bn_gamma, bn_beta, bn_mmean, bn_mvar], out)\n mod[\"main\"] = func\n mod = transform.InferType()(mod)\n return mod\n\n mod = tvm.IRModule()\n mod[\"main\"] = create_graph()\n mod = transform.InferType()(mod)\n\n seq = tvm.transform.Sequential(\n [\n transform.AnnotateTarget(target),\n transform.MergeCompilerRegions(),\n transform.PartitionGraph(),\n ]\n )\n\n ref_mod = expected()\n partitioned = seq(mod)\n assert tvm.ir.structural_equal(partitioned, ref_mod, map_free_vars=True)\n\n\ndef test_constant_tuples():\n @tvm.ir.register_op_attr(\"qnn.concatenate\", \"target.const_tuples\")\n def add(expr): # pylint: disable=unused-variable\n return True\n\n def create_graph():\n a = relay.var(\"a\", shape=(10, 10), dtype=\"uint8\")\n b = relay.var(\"b\", shape=(10, 10), dtype=\"uint8\")\n a1 = relay.abs(a)\n\n zeroi = relay.const(1, \"int32\")\n zerof = relay.const(0, \"float32\")\n con = relay.qnn.op.concatenate(\n (a1, b),\n input_scales=(zerof, zerof),\n input_zero_points=(zeroi, zeroi),\n output_scale=zerof,\n output_zero_point=zeroi,\n axis=1,\n )\n\n f = relay.Function([a, b], con)\n mod = tvm.IRModule.from_expr(f)\n mod = transform.InferType()(mod)\n return mod\n\n seq = tvm.transform.Sequential(\n [\n transform.AnnotateTarget(\"const_tuples\"),\n transform.InferType(),\n transform.MergeCompilerRegions(),\n transform.PartitionGraph(),\n ]\n )\n\n partitioned = seq(create_graph())\n\n concat = partitioned[\"tvmgen_default_const_tuples_main_0\"].body\n assert type(concat.args[1]) == relay.Tuple\n assert type(concat.args[2]) == relay.Tuple\n assert type(concat.args[3]) == relay.Constant\n assert type(concat.args[4]) == relay.Constant\n\n\ndef test_flatten_tuple_output():\n target = \"test_flatten_tuple_output\"\n\n @tvm.ir.register_op_attr(\"split\", \"target.\" + target)\n def split(expr): # pylint: disable=unused-variable\n return True\n\n @tvm.ir.register_op_attr(\"abs\", \"target.\" + target)\n def abs(expr): # pylint: disable=unused-variable\n return True\n\n def create_graph():\n a = relay.var(\"a\", shape=(10, 10), dtype=\"uint8\")\n\n a_split = relay.split(a, 2)\n a_split_0 = relay.TupleGetItem(a_split.astuple(), 0)\n a_split_0_abs = relay.abs(a_split_0)\n\n a_con = relay.concatenate(a_split, 0)\n a_split_0_relu = relay.nn.relu(a_split_0_abs)\n\n out = relay.Tuple((a_con, a_split_0_relu))\n f = relay.Function([a], out)\n mod = tvm.IRModule.from_expr(f)\n mod = transform.InferType()(mod)\n return mod\n\n def expected():\n mod = tvm.IRModule()\n\n # function 0\n f0_i0 = relay.var(target + \"_0_i0\", shape=(10, 10), dtype=\"uint8\")\n a_split = relay.split(f0_i0, 2)\n a_split_0 = relay.TupleGetItem(a_split.astuple(), 0)\n a_split_1 = relay.TupleGetItem(a_split.astuple(), 1)\n a_split_abs_in = relay.TupleGetItem(a_split.astuple(), 0)\n abs = relay.abs(a_split_abs_in)\n tuple_out = relay.Tuple((a_split_0, a_split_1, abs))\n func0 = relay.Function([f0_i0], tuple_out)\n\n func0 = func0.with_attr(\"Primitive\", tvm.tir.IntImm(\"int32\", 1))\n func0 = func0.with_attr(\"Inline\", tvm.tir.IntImm(\"int32\", 1))\n func0 = func0.with_attr(\"Compiler\", target)\n func0 = func0.with_attr(\"global_symbol\", \"tvmgen_default_\" + target + \"_main_0\")\n gv0 = relay.GlobalVar(\"tvmgen_default_\" + target + \"_main_0\")\n mod[gv0] = func0\n mod = transform.InferType()(mod)\n\n # body\n data = relay.var(\"a\", shape=(10, 10), dtype=\"uint8\")\n f_out = gv0(data)\n f_out_0 = relay.TupleGetItem(f_out, 0)\n f_out_1 = relay.TupleGetItem(f_out, 1)\n tuple = relay.Tuple((f_out_0, f_out_1))\n concat = relay.concatenate(tuple, 0)\n f_out_2 = relay.TupleGetItem(f_out, 2)\n relu = relay.nn.relu(f_out_2)\n ret_tuple = relay.Tuple((concat, relu))\n mod[\"main\"] = relay.Function([data], ret_tuple)\n mod = transform.InferType()(mod)\n return mod\n\n seq = tvm.transform.Sequential(\n [\n transform.AnnotateTarget(target),\n transform.MergeCompilerRegions(),\n transform.PartitionGraph(),\n ]\n )\n\n partitioned = seq(create_graph())\n partitioned = transform.InferType()(partitioned)\n expected_mod = transform.InferType()(expected())\n assert tvm.ir.structural_equal(partitioned, expected_mod, map_free_vars=True)\n\n\ndef test_tuple_output_exec():\n \"\"\"Test C codegen and runtime for a subgraph with a tuple output\"\"\"\n a = relay.var(\"a\", shape=(10, 10), dtype=\"float32\")\n b = relay.var(\"b\", shape=(10, 10), dtype=\"float32\")\n ba = relay.annotation.compiler_begin(a, \"ccompiler\")\n bb = relay.annotation.compiler_begin(b, \"ccompiler\")\n add = relay.add(ba, bb)\n sub = relay.subtract(ba, bb)\n out = relay.Tuple((add, sub))\n eout = relay.annotation.compiler_end(out, \"ccompiler\")\n func = relay.Function([a, b], eout)\n\n mod = tvm.IRModule()\n mod[\"main\"] = func\n mod = transform.InferType()(mod)\n mod = transform.PartitionGraph()(mod)\n\n a_data = np.random.rand(10, 10).astype(\"float32\")\n b_data = np.random.rand(10, 10).astype(\"float32\")\n\n check_result(\n mod,\n {\"a\": a_data, \"b\": b_data},\n [(10, 10), (10, 10)],\n [(a_data + b_data), (a_data - b_data)],\n )\n\n\ndef test_extern_opt():\n def Optimize(mod):\n return relay.transform.FoldConstant()(mod)\n\n tvm.register_func(\"relay.ext.test_target.optimize\", Optimize)\n\n x = relay.var(\"x\", shape=(2, 2))\n y0 = relay.var(\"y0\", shape=(2, 2))\n y1 = relay.var(\"y1\", shape=(2, 2))\n yy0 = relay.annotation.compiler_begin(y0, \"test_target\")\n yy1 = relay.annotation.compiler_begin(y1, \"test_target\")\n z = yy0 + yy1\n end = relay.annotation.compiler_end(z, \"test_target\")\n f = relay.Function([x, y0, y1], end * x)\n c = np.ones(shape=(2, 2), dtype=\"float32\")\n f = bind_params_by_name(f, {\"y0\": tvm.nd.array(c), \"y1\": tvm.nd.array(c)})\n mod = tvm.IRModule()\n mod[\"main\"] = f\n mod = transform.InferType()(mod)\n mod = transform.PartitionGraph()(mod)\n\n try:\n t0 = mod[\"tvmgen_default_test_target_main_0\"]\n except:\n raise KeyError(\"test_target_main_0 not found\")\n\n assert isinstance(t0.body, relay.Constant)\n expected = np.empty([2, 2])\n expected.fill(2)\n tvm.testing.assert_allclose(t0.body.data.numpy(), expected, rtol=1e-5, atol=1e-5)\n\n\ndef test_preserve_type_import():\n \"\"\"Test to make sure type definition and imports are preserved during the BYOC pipeline.\"\"\"\n from tvm.relay.prelude import Prelude, StaticTensorArrayOps\n\n def run(dtype, shape):\n mod = tvm.IRModule()\n p = Prelude(mod)\n static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)\n static_tensor_array_ops.register()\n\n tensor_array = p.get_global_var_static(\"tensor_array\", dtype, shape)\n tensor = p.get_tensor_ctor_static(\"tensor_constructor\", dtype, shape)\n write = p.get_global_var_static(\"tensor_array_write\", dtype, shape)\n gather = p.get_global_var_static(\"tensor_array_gather\", dtype, shape)\n v = relay.var(\"v\")\n indice = relay.var(\"indice\")\n init_tensor_array = tensor_array(relay.const(3))\n tensor_array1 = write(init_tensor_array, relay.const(0), tensor(v))\n tensor_array2 = write(tensor_array1, relay.const(1), tensor(v))\n tensor_array3 = write(tensor_array2, relay.const(2), tensor(v))\n out = gather(tensor_array3, indice)\n mod[\"main\"] = relay.Function([v, indice], out)\n mod = transform.RemoveUnusedFunctions()(mod)\n mod = transform.PartitionGraph()(mod)\n\n run(\"float32\", [2, 3])\n\n\ndef test_not_bind_constant():\n def get_net(prefix, data, out_channel):\n weight = relay.var(prefix + \"weight\")\n bn_gamma = relay.var(prefix + \"bn_gamma\")\n bn_beta = relay.var(prefix + \"bn_beta\")\n bn_mmean = relay.var(prefix + \"bn_mean\")\n bn_mvar = relay.var(prefix + \"bn_var\")\n\n layer = relay.nn.conv2d(\n data=data, weight=weight, kernel_size=(3, 3), channels=out_channel, padding=(1, 1)\n )\n bn_output = relay.nn.batch_norm(layer, bn_gamma, bn_beta, bn_mmean, bn_mvar)\n out = relay.nn.relu(bn_output[0])\n return relay.Function(relay.analysis.free_vars(out), out)\n\n def get_partitoned_mod(mod, params, pattern_table, bind_constants):\n mod[\"main\"] = bind_params_by_name(mod[\"main\"], params)\n remove_bn_pass = tvm.transform.Sequential(\n [\n transform.InferType(),\n transform.SimplifyInference(),\n transform.FoldConstant(),\n transform.FoldScaleAxis(),\n ]\n )\n composite_partition = tvm.transform.Sequential(\n [\n remove_bn_pass,\n transform.MergeComposite(pattern_table),\n transform.AnnotateTarget(\"dnnl\"),\n transform.PartitionGraph(bind_constants=bind_constants),\n ]\n )\n\n with tvm.transform.PassContext(opt_level=3, disabled_pass=[\"AlterOpLayout\"]):\n return composite_partition(mod)\n\n data = relay.var(\"data\", relay.TensorType((1, 3, 224, 224), \"float32\"))\n net = get_net(\"block_\", data, 8)\n mod, params = tvm.relay.testing.create_workload(net)\n\n mod = get_partitoned_mod(mod, params, get_pattern_table(\"dnnl\"), bind_constants=True)\n len(mod[\"main\"].body.args) == 1\n\n mod = get_partitoned_mod(mod, params, get_pattern_table(\"dnnl\"), bind_constants=False)\n len(mod[\"main\"].body.args) == 3\n\n\nif __name__ == \"__main__\":\n test_multi_node_compiler()\n test_extern_ccompiler_single_op()\n test_extern_ccompiler_default_ops()\n test_extern_ccompiler_multiple_functions()\n test_extern_ccompiler()\n test_extern_dnnl()\n test_extern_dnnl_mobilenet()\n test_function_lifting()\n test_function_lifting_inline()\n test_constant_propagation()\n test_multiple_outputs()\n test_mixed_single_multiple_outputs()\n test_dnnl_fuse()\n test_multiple_use_of_an_output()\n test_duplicate_outputs()\n test_duplicate_merge_and_tuplegetitem()\n test_constant_tuples()\n test_flatten_tuple_output()\n test_tuple_output_exec()\n test_extern_opt()\n test_not_bind_constant()\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=ungrouped-imports, import-outside-toplevel\n\"\"\"Arm(R) Ethos(TM)-U NPU supported operators.\"\"\"\nimport functools\nfrom typing import Dict, List, Tuple, Callable, Optional\n\nimport numpy as np # type: ignore\n\nimport tvm # type: ignore\nfrom tvm import relay\nfrom tvm.relay.expr import Constant, Call # type: ignore\nfrom tvm.relay.op.contrib.register import register_pattern_table # type: ignore\nfrom tvm.relay.dataflow_pattern import wildcard, is_op, is_constant, is_tuple # type: ignore\nfrom tvm.relay.build_module import bind_params_by_name # type: ignore\n\ntry:\n # As ethos-u-vela package is an optional TVM dependency, we want to lazy load it\n # and check whether it is installed or not.\n #\n # In order to show the appropriate error messages when we try to invoke code that\n # rely on imports from ethos-u-vela, we protect them with the decorator @requires_vela\n # implemented below.\n from ethosu.vela import api as vapi # type: ignore\nexcept ImportError:\n vapi = None\n\n\ndef requires_vela(func):\n \"\"\"Decorator to check whether we have the required dependency ethos-u-vela\n installed as a python package\"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if not vapi:\n raise ImportError(\n \"The 'ethos-u-vela' python package is required for the Arm(R) Ethos(TM)-U NPU \"\n \"backend. Please install the dependency using your Python package manager.\"\n ) from None\n return func(*args, **kwargs)\n\n return wrapper\n\n\nclass TensorParams:\n \"\"\"\n This class will parse a tvm Expr along with quantization scale\n and zero point to populate parameters that are required\n for the creation of tensors in Vela.\n \"\"\"\n\n @requires_vela\n def __init__(self, tensor, layout=None, scale=None, zero_point=None):\n self.tensor = tensor\n if isinstance(tensor, Constant):\n self.values = tensor.data.asnumpy()\n else:\n self.values = None\n self.dtype = tensor.checked_type.dtype\n self.shape = [int(i) for i in tensor.checked_type.shape]\n self.layout = layout\n\n if scale is not None and zero_point is not None:\n self.q_params = vapi.NpuQuantization(\n scale.data.asnumpy().astype(\"float32\"), zero_point.data.asnumpy().astype(self.dtype)\n )\n else:\n # put default values\n self.q_params = vapi.NpuQuantization(1.0, 0)\n\n\ndef check_strides(strides: List[int], stride_range=None) -> bool:\n \"\"\"This function checks whether strides are within the limits supported by the NPU\"\"\"\n if stride_range is None:\n stride_range = (1, 3)\n smin, smax = stride_range\n if not smax >= strides[0] >= smin:\n return False\n if not smax >= strides[1] >= smin:\n return False\n return True\n\n\ndef check_valid_dtypes(tensor_params: List[TensorParams], supported_dtypes: List[type]) -> bool:\n \"\"\"This function checks whether dtypes are supported by the NPU\"\"\"\n for tep in tensor_params:\n # Check for dtypes\n if np.dtype(tep.dtype) not in supported_dtypes:\n return False\n # Check for shape sizes\n if any(dimlen > 65536 for dimlen in tep.shape):\n return False\n return True\n\n\ndef check_weights(weights: TensorParams, dilation: List[int]):\n \"\"\"This function checks whether weight tensor is compatible with the NPU\"\"\"\n from tvm.relay.backend.contrib.ethosu.util import get_dim_value\n\n dilated_height_range = (1, 64)\n dilated_hxw_range = (1, 64 * 64)\n weights_limit = 127 * 65536\n dilated_width = (weights.shape[get_dim_value(weights.layout, \"W\")] - 1) * dilation[0] + 1\n dilated_height = (weights.shape[get_dim_value(weights.layout, \"H\")] - 1) * dilation[1] + 1\n dh_min, dh_max = dilated_height_range\n if not dh_min <= dilated_height <= dh_max:\n return False\n dilated_hxw = dilated_height * dilated_width\n dhxw_min, dhxw_max = dilated_hxw_range\n if not dhxw_min <= dilated_hxw <= dhxw_max:\n return False\n # A saturation upper bound check for accumulators\n weights.values = weights.values - weights.q_params.zero_point\n axis = (\n get_dim_value(weights.layout, \"H\"),\n get_dim_value(weights.layout, \"W\"),\n get_dim_value(weights.layout, \"I\"),\n )\n sum_weights = np.amax(np.sum(np.absolute(weights.values), axis=axis))\n return sum_weights <= weights_limit\n\n\ndef check_bias(bias: TensorParams):\n \"\"\"This function checks whether the bias values fit in 40 bits\"\"\"\n if bias and bias.dtype == np.dtype(\"int64\"):\n valid = all(len(bin(bias_value)[2:]) <= 40 for bias_value in bias.values)\n return valid\n return True\n\n\ndef check_batch_size(ifm: TensorParams):\n \"\"\"This function checks for the number of batches vela currently supports\"\"\"\n return ifm.shape[0] == 1\n\n\ndef check_dilation(dilation: List[int], dilation_range=None):\n \"\"\"This function checks whether dilation is within the limits supported by the NPU\"\"\"\n if dilation_range is None:\n dilation_range = (1, 2)\n dmin, dmax = dilation_range\n if not dmin <= dilation[0] <= dmax:\n return False\n if not dmin <= dilation[1] <= dmax:\n return False\n return True\n\n\ndef check_padding(padding: List[int], bounds: List[int]):\n \"\"\"This function checks whether padding is within the limits supported by the NPU\"\"\"\n if len(padding) != 4 or len(bounds) != 4:\n return False\n top, left, bottom, right = padding\n topb, leftb, bottomb, rightb = bounds\n return not (top > topb or left > leftb or bottom > bottomb or right > rightb)\n\n\ndef check_pool_shape(pool_shape: tvm.ir.container.Array) -> bool:\n if len(pool_shape) != 2:\n return False\n if pool_shape[1] > 256:\n return False\n if pool_shape[0] * pool_shape[1] > 256 * 256:\n return False\n return True\n\n\ndef check_dimensions(tensor: TensorParams):\n \"\"\"This function checks that the tensor has no more than 4 dimensions\"\"\"\n return len(tensor.shape) <= 4\n\n\nclass QnnConv2DParams:\n \"\"\"\n This class will parse a Call to a ethosu.qnn_conv2d composite function\n and extract quantization information of all the associated tensors.\n \"\"\"\n\n composite_name = \"ethos-u.qnn_conv2d\"\n # The NPU only supports padding upto the numbers as follows\n padding_bounds = [31, 31, 32, 32]\n activation_map = {\"clip\": \"CLIP\"}\n\n @requires_vela\n def __init__(self, func_body: tvm.relay.Function):\n from tvm.relay.backend.contrib.ethosu.util import QConv2DArgs # type: ignore\n from tvm.relay.backend.contrib.ethosu.util import BiasAddArgs\n from tvm.relay.backend.contrib.ethosu.util import RequantArgs\n\n activation = None\n if str(func_body.op) in self.activation_map.keys():\n activation = func_body\n requantize_op = activation.args[0]\n else:\n requantize_op = func_body\n bias_add = requantize_op.args[0]\n qnn_conv2d = bias_add.args[0]\n data_layout = qnn_conv2d.attrs.data_layout\n self.kernel_layout = qnn_conv2d.attrs.kernel_layout\n # We consider the weights & biases as params as it should be a Constant\n self.weights = TensorParams(\n qnn_conv2d.args[QConv2DArgs.WEIGHTS.value],\n self.kernel_layout,\n qnn_conv2d.args[QConv2DArgs.WEIGHTS_SCALE.value],\n qnn_conv2d.args[QConv2DArgs.WEIGHTS_ZERO_POINT.value],\n )\n\n self.biases = TensorParams(\n bias_add.args[BiasAddArgs.BIASES.value],\n data_layout,\n requantize_op.args[RequantArgs.IFM_SCALE.value],\n requantize_op.args[RequantArgs.IFM_ZERO_POINT.value],\n )\n self.ifm = TensorParams(\n qnn_conv2d.args[QConv2DArgs.IFM.value],\n data_layout,\n qnn_conv2d.args[QConv2DArgs.IFM_SCALE.value],\n qnn_conv2d.args[QConv2DArgs.IFM_ZERO_POINT.value],\n )\n self.ofm = TensorParams(\n func_body,\n data_layout,\n requantize_op.args[RequantArgs.OFM_SCALE.value],\n requantize_op.args[RequantArgs.OFM_ZERO_POINT.value],\n )\n attrs = qnn_conv2d.attrs\n self.padding = attrs.padding\n self.strides = attrs.strides\n self.dilation = attrs.dilation\n self.activation = activation\n self.channels = attrs.channels\n\n # If groups are equal to channel, its a depthwise_conv2d\n self.groups = attrs.groups\n self.is_depthwise = False\n channels_axis = {\"HWIO\": 3, \"HWOI\": 2}\n if self.groups == self.weights.shape[channels_axis[self.kernel_layout]]:\n self.is_depthwise = True\n\n def is_valid(self) -> bool:\n \"\"\"\n This function checks whether QnnConv2D has compatible attributes with the NPU\n \"\"\"\n tensor_params = [self.weights, self.ifm, self.ofm]\n if not check_valid_dtypes(tensor_params, supported_dtypes=[np.uint8, np.int8]):\n return False\n if not check_weights(self.weights, self.dilation):\n return False\n if not check_bias(self.biases):\n return False\n if not check_strides(self.strides):\n return False\n if not check_batch_size(self.ifm):\n return False\n if not check_dilation(self.dilation):\n return False\n if not check_padding(self.padding, self.padding_bounds):\n return False\n legal_groups = [1, self.ofm.shape[3]]\n if self.groups not in legal_groups:\n return False\n # This should be a valid QnnDepthwiseConv2DParams, not QnnConv2DParams\n return not self.is_depthwise\n\n\nclass QnnConv2DTransposeParams:\n \"\"\"\n This class will parse a Call to a ethosu.qnn_conv2d_transpose composite\n function and extract quantization information of all the associated tensors.\n \"\"\"\n\n composite_name = \"ethos-u.qnn_conv2d_transpose\"\n # The NPU only supports padding upto the numbers as follows\n padding_bounds = [31, 31, 32, 32]\n\n @requires_vela\n def __init__(self, func_body: tvm.relay.Function):\n from tvm.relay.backend.contrib.ethosu.util import QConv2DTransposeArgs # type: ignore\n from tvm.relay.backend.contrib.ethosu.util import BiasAddArgs\n from tvm.relay.backend.contrib.ethosu.util import RequantArgs\n\n requantize = func_body\n call = func_body.args[0]\n if str(call.op) == \"nn.bias_add\":\n bias_add = call\n call = call.args[0]\n else:\n bias_add = None\n qnn_conv2d_transpose = call\n\n data_layout = qnn_conv2d_transpose.attrs.data_layout\n self.kernel_layout = qnn_conv2d_transpose.attrs.kernel_layout\n\n self.weights = TensorParams(\n qnn_conv2d_transpose.args[QConv2DTransposeArgs.WEIGHTS.value],\n self.kernel_layout,\n qnn_conv2d_transpose.args[QConv2DTransposeArgs.WEIGHTS_SCALE.value],\n qnn_conv2d_transpose.args[QConv2DTransposeArgs.WEIGHTS_ZERO_POINT.value],\n )\n self.biases = (\n TensorParams(\n bias_add.args[BiasAddArgs.BIASES.value],\n data_layout,\n requantize.args[RequantArgs.IFM_SCALE.value],\n requantize.args[RequantArgs.IFM_ZERO_POINT.value],\n )\n if bias_add\n else None\n )\n self.ifm = TensorParams(\n qnn_conv2d_transpose.args[QConv2DTransposeArgs.IFM.value],\n data_layout,\n qnn_conv2d_transpose.args[QConv2DTransposeArgs.IFM_SCALE.value],\n qnn_conv2d_transpose.args[QConv2DTransposeArgs.IFM_ZERO_POINT.value],\n )\n self.ofm = TensorParams(\n func_body,\n data_layout,\n requantize.args[RequantArgs.OFM_SCALE.value],\n requantize.args[RequantArgs.OFM_ZERO_POINT.value],\n )\n\n attrs = qnn_conv2d_transpose.attrs\n self.strides = attrs.strides\n self.dilation = attrs.dilation\n self.padding = attrs.padding\n self.channels = attrs.channels\n self.groups = attrs.groups\n self.output_padding = attrs.output_padding\n\n kernel_size_map = {\n \"IOHW\": self.weights.shape[2:4],\n }\n self.kernel_shape = kernel_size_map[str(self.weights.layout)]\n\n # Different padding is used in the legalization from conv2d_transpose\n # to conv2d, so we to calculate it here to check that the new size fits\n # within the bounds of the NPU before offloading.\n pad_top = int(self.kernel_shape[0]) - 1 - int(self.padding[0])\n pad_left = int(self.kernel_shape[1]) - 1 - int(self.padding[1])\n pad_bottom = int(self.kernel_shape[0]) - 1 - int(self.padding[2])\n pad_right = int(self.kernel_shape[1]) - 1 - int(self.padding[3])\n if self.strides == [2, 2]:\n pad_bottom -= 1\n pad_right -= 1\n self.legalize_padding = [pad_top, pad_left, pad_bottom, pad_right]\n\n def is_valid(self) -> bool:\n \"\"\"\n This function checks whether QnnConv2D has compatible attributes with the NPU\n \"\"\"\n\n def check_compatible_output_size(ifm_shape, ofm_shape, padding, strides, kernel_shape):\n is_valid_padding = padding == [0, 0, 0, 0]\n if is_valid_padding:\n expected_height = ifm_shape[1] * strides[0] + (kernel_shape[0] - strides[0])\n expected_width = ifm_shape[2] * strides[1] + (kernel_shape[1] - strides[1])\n else:\n expected_height = ifm_shape[1] * strides[0]\n expected_width = ifm_shape[2] * strides[1]\n return ofm_shape[1] == expected_height and ofm_shape[2] == expected_width\n\n tensor_params = [self.weights, self.ifm, self.ofm]\n if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8]):\n return False\n if not check_weights(self.weights, self.dilation):\n return False\n if self.biases and not check_bias(self.biases):\n return False\n if not check_strides(self.strides, stride_range=(2, 2)):\n return False\n if not check_batch_size(self.ifm):\n return False\n if not check_dilation(self.dilation, dilation_range=(1, 1)):\n return False\n if not check_compatible_output_size(\n self.ifm.shape,\n self.ofm.shape,\n [int(x) for x in self.padding],\n self.strides,\n self.kernel_shape,\n ):\n return False\n if not check_padding(self.legalize_padding, self.padding_bounds):\n return False\n if self.kernel_shape[0] - 2 - int(self.padding[2]) < 0:\n return False\n if self.kernel_shape[1] - 2 - int(self.padding[3]) < 0:\n return False\n if self.groups != 1:\n return False\n if list(self.output_padding) != [0, 0]:\n return False\n return True\n\n\nclass QnnDepthwiseConv2DParams(QnnConv2DParams):\n \"\"\"\n This class will parse a call to a ethosu.depthwise_conv2d composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.depthwise_conv2d\"\n # The hardware only supports padding upto the numbers as follows\n padding_bounds = [31, 31, 32, 32]\n\n def __init__(self, func_body: tvm.relay.expr.Call):\n QnnConv2DParams.__init__(self, func_body)\n\n def is_valid(self):\n \"\"\"\n Checks whether QnnDepthwiseConv2D + activation function has compatible attributes with HW\n \"\"\"\n tensor_params = [self.weights, self.ifm, self.ofm]\n if not check_valid_dtypes(tensor_params, supported_dtypes=[np.uint8, np.int8]):\n return False\n if not check_weights(self.weights, self.dilation):\n return False\n if not check_bias(self.biases):\n return False\n if not check_strides(self.strides):\n return False\n if not check_batch_size(self.ifm):\n return False\n if not check_dilation(self.dilation):\n return False\n if not check_padding(self.padding, self.padding_bounds):\n return False\n if self.weights.layout != \"HWOI\":\n return False\n # only depth multiplier of size 1 is supported\n if self.weights.shape[3] != 1:\n return False\n if not self.is_depthwise:\n return False\n return True\n\n\ndef qnn_conv2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:\n \"\"\"\n This function creates the pattern for qnn.conv2D with optional fused RELU activation.\n \"\"\"\n qnn_conv2d = is_op(\"qnn.conv2d\")(\n wildcard(), is_constant(), is_constant(), is_constant(), is_constant(), is_constant()\n ).has_attr({\"kernel_layout\": \"HWIO\"})\n bias_add = is_op(\"nn.bias_add\")(qnn_conv2d, is_constant())\n req = is_op(\"qnn.requantize\")(\n bias_add, is_constant(), is_constant(), is_constant(), is_constant()\n )\n clip_or_req = req.optional(is_op(\"clip\"))\n return clip_or_req\n\n\ndef qnn_depthwise_conv2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:\n \"\"\"\n This function creates the pattern for depthwise qnn.conv2D with optional fused RELU activation.\n \"\"\"\n qnn_conv2d = is_op(\"qnn.conv2d\")(\n wildcard(), is_constant(), is_constant(), is_constant(), is_constant(), is_constant()\n ).has_attr({\"kernel_layout\": \"HWOI\"})\n bias_add = is_op(\"nn.bias_add\")(qnn_conv2d, is_constant())\n req = is_op(\"qnn.requantize\")(\n bias_add, is_constant(), is_constant(), is_constant(), is_constant()\n )\n clip_or_req = req.optional(is_op(\"clip\"))\n return clip_or_req\n\n\ndef qnn_conv2d_transpose_pattern() -> tvm.relay.dataflow_pattern.DFPattern:\n \"\"\"\n This function creates the pattern for qnn.conv2d_transpose.\n \"\"\"\n qnn_conv2d_transpose = is_op(\"qnn.conv2d_transpose\")(\n wildcard(), is_constant(), is_constant(), is_constant(), is_constant(), is_constant()\n ).has_attr({\"kernel_layout\": \"IOHW\"})\n optional_bias_add = (\n is_op(\"nn.bias_add\")(qnn_conv2d_transpose, is_constant()) | qnn_conv2d_transpose\n )\n req = is_op(\"qnn.requantize\")(\n optional_bias_add, is_constant(), is_constant(), is_constant(), is_constant()\n )\n return req\n\n\nclass MaxPool2DParams:\n \"\"\"\n This class will parse a call to a ethos-u.maxpool2d composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.maxpool2d\"\n # The hardware only supports padding upto the numbers as follows\n padding_bounds = [127, 127, 128, 128]\n\n def __init__(self, func_body: Call):\n clip = None\n if str(func_body.op) == \"clip\":\n clip = func_body\n pool_op = clip.args[0]\n else:\n pool_op = func_body\n\n attrs = pool_op.attrs\n self.ifm = TensorParams(pool_op.args[0], attrs.layout)\n self.ofm = TensorParams(pool_op, attrs.layout)\n self.pool_shape = attrs.pool_size\n self.strides = attrs.strides\n self.padding = attrs.padding\n self.activation = clip\n self.pooling_type = \"MAX\"\n\n def is_valid(self):\n \"\"\"\n This function checks whether MaxPool2D has compatible attributes with the NPU\n \"\"\"\n tensor_params = [self.ifm, self.ofm]\n if not check_valid_dtypes(tensor_params, supported_dtypes=[np.uint8, np.int8]):\n return False\n if self.ifm.dtype != self.ofm.dtype:\n return False\n if not check_strides(self.strides):\n return False\n if not check_batch_size(self.ifm):\n return False\n if not check_padding(self.padding, self.padding_bounds):\n return False\n if not check_pool_shape(self.pool_shape):\n return False\n return True\n\n\ndef qnn_maxpool2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:\n \"\"\"\n This function creates the pattern for nn.max_pool2d with optional fused RELU activation.\n \"\"\"\n pattern = is_op(\"nn.max_pool2d\")(wildcard())\n pattern = pattern.optional(is_op(\"clip\"))\n return pattern\n\n\nclass AvgPool2DParams:\n \"\"\"\n This class will parse a call to a ethos-u.avgpool2d composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.avgpool2d\"\n # The hardware only supports padding upto the numbers as follows\n padding_bounds = [127, 127, 128, 128]\n\n def __init__(self, func_body: Call):\n clip = None\n if str(func_body.op) == \"clip\":\n clip = func_body\n cast2 = clip.args[0]\n else:\n cast2 = func_body\n\n avgpool = cast2.args[0]\n cast1 = avgpool.args[0]\n\n attrs = avgpool.attrs\n self.ifm = TensorParams(cast1.args[0], attrs.layout)\n self.ofm = TensorParams(cast2, attrs.layout)\n self.pool_shape = attrs.pool_size\n self.strides = attrs.strides\n self.padding = attrs.padding\n self.activation = clip\n self.pooling_type = \"AVG\"\n\n def is_valid(self):\n \"\"\"\n This function checks whether AvgPool2D has compatible attributes with the NPU\n \"\"\"\n tensor_params = [self.ifm, self.ofm]\n if not check_valid_dtypes(tensor_params, supported_dtypes=[np.uint8, np.int8]):\n return False\n if self.ifm.dtype != self.ofm.dtype:\n return False\n if not check_strides(self.strides):\n return False\n if not check_batch_size(self.ifm):\n return False\n if not check_padding(self.padding, self.padding_bounds):\n return False\n if not check_pool_shape(self.pool_shape):\n return False\n return True\n\n\ndef qnn_avgpool2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:\n \"\"\"\n This function creates the pattern for nn.avg_pool2d with optional fused RELU activation.\n \"\"\"\n pattern = is_op(\"cast\")(wildcard())\n pattern = is_op(\"nn.avg_pool2d\")(pattern)\n pattern = is_op(\"cast\")(pattern)\n pattern = pattern.optional(is_op(\"clip\"))\n return pattern\n\n\nclass BinaryElementwiseParams:\n \"\"\"\n This class will parse a call to a ethosu.binary_elementwise composite function\n and extract the parameter information.\n \"\"\"\n\n def __init__(self, func_body: Call, operator_type: str, has_quantization_parameters: bool):\n from tvm.relay.backend.contrib.ethosu.util import BinaryElementwiseArgs\n\n clip = None\n if str(func_body.op) == \"clip\":\n clip = func_body\n binary_op = clip.args[0]\n else:\n binary_op = func_body\n\n layout = \"NHWC\"\n\n if has_quantization_parameters:\n self.ifm = TensorParams(\n binary_op.args[BinaryElementwiseArgs.IFM.value],\n layout,\n binary_op.args[BinaryElementwiseArgs.IFM_SCALE.value],\n binary_op.args[BinaryElementwiseArgs.IFM_ZERO_POINT.value],\n )\n self.ifm2 = TensorParams(\n binary_op.args[BinaryElementwiseArgs.IFM2.value],\n layout,\n binary_op.args[BinaryElementwiseArgs.IFM2_SCALE.value],\n binary_op.args[BinaryElementwiseArgs.IFM2_ZERO_POINT.value],\n )\n self.ofm = TensorParams(\n binary_op,\n layout,\n binary_op.args[BinaryElementwiseArgs.OFM_SCALE.value],\n binary_op.args[BinaryElementwiseArgs.OFM_ZERO_POINT.value],\n )\n else:\n self.ifm = TensorParams(\n binary_op.args[BinaryElementwiseArgs.IFM.value],\n layout,\n )\n self.ifm2 = TensorParams(\n binary_op.args[BinaryElementwiseArgs.IFM2.value],\n layout,\n )\n self.ofm = TensorParams(\n binary_op,\n layout,\n )\n self.activation = clip\n self.operator_type = operator_type\n\n def can_broadcast(ifm, ifm2):\n if len(ifm.shape) < len(ifm2.shape):\n return False\n for m, n in zip(ifm.shape[::-1], ifm2.shape[::-1]):\n if m != n and m == 1:\n return False\n return True\n\n if can_broadcast(self.ifm, self.ifm2):\n self.reversed_operands = False\n self.valid_broadcast = True\n elif can_broadcast(self.ifm2, self.ifm):\n self.reversed_operands = True\n self.ifm, self.ifm2 = self.ifm2, self.ifm\n self.valid_broadcast = True\n else:\n self.valid_broadcast = False\n\n def is_valid(self):\n \"\"\"\n This function checks whether BinaryElementwise has compatible attributes with the NPU\n \"\"\"\n if np.dtype(self.ofm) == np.int32 and self.activation is not None:\n return False\n # Due to identity operator requiring ofm != int32 for now\n if np.dtype(self.ofm) == np.int32 and len(self.ofm.shape) < 4:\n return False\n if len(self.ifm.shape) > 4 or len(self.ifm2.shape) > 4:\n return False\n if len(self.ifm.shape) == 4 and self.ifm.shape[0] != 1:\n return False\n if len(self.ifm2.shape) == 4 and self.ifm2.shape[0] != 1:\n return False\n if not self.valid_broadcast:\n return False\n return True\n\n\nclass AddParams(BinaryElementwiseParams):\n \"\"\"\n This class will parse a call to a ethosu.binary_elementwise Add composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.add\"\n\n def __init__(self, func_body: Call):\n BinaryElementwiseParams.__init__(self, func_body, \"ADD\", True)\n\n def is_valid(self):\n \"\"\"\n This function checks whether Add has compatible attributes with the NPU\n \"\"\"\n if not super().is_valid():\n return False\n if not check_valid_dtypes(\n [self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8, np.int32]\n ):\n return False\n return True\n\n\ndef qnn_add_pattern() -> tvm.relay.dataflow_pattern.DFPattern:\n \"\"\"\n This function creates the pattern for qnn.add with optional fused RELU activation.\n \"\"\"\n pattern = is_op(\"qnn.add\")(\n wildcard(),\n wildcard(),\n is_constant(),\n is_constant(),\n is_constant(),\n is_constant(),\n is_constant(),\n is_constant(),\n )\n pattern = pattern.optional(is_op(\"clip\"))\n return pattern\n\n\nclass SubParams(BinaryElementwiseParams):\n \"\"\"\n This class will parse a call to a ethosu.binary_elementwise Sub composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.sub\"\n\n def __init__(self, func_body: Call):\n BinaryElementwiseParams.__init__(self, func_body, \"SUB\", True)\n\n def is_valid(self):\n \"\"\"\n This function checks whether Sub has compatible attributes with the NPU\n \"\"\"\n if not super().is_valid():\n return False\n if not check_valid_dtypes(\n [self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8, np.int32]\n ):\n return False\n return True\n\n\ndef qnn_subtract_pattern() -> tvm.relay.dataflow_pattern.DFPattern:\n \"\"\"\n This function creates the pattern for qnn.subtract with optional fused RELU activation.\n \"\"\"\n pattern = is_op(\"qnn.subtract\")(\n wildcard(),\n wildcard(),\n is_constant(),\n is_constant(),\n is_constant(),\n is_constant(),\n is_constant(),\n is_constant(),\n )\n pattern = pattern.optional(is_op(\"clip\"))\n return pattern\n\n\nclass MulParams(BinaryElementwiseParams):\n \"\"\"\n This class will parse a call to a ethosu.binary_elementwise Mul composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.mul\"\n\n def __init__(self, func_body: Call):\n BinaryElementwiseParams.__init__(self, func_body, \"MUL\", True)\n\n def is_valid(self):\n \"\"\"\n This function checks whether Mul has compatible attributes with the NPU\n \"\"\"\n if not super().is_valid():\n return False\n if not check_valid_dtypes(\n [self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8, np.int32]\n ):\n return False\n return True\n\n\ndef qnn_mul_pattern() -> tvm.relay.dataflow_pattern.DFPattern:\n \"\"\"\n This function creates the pattern for qnn.mul with optional fused RELU activation.\n \"\"\"\n pattern = is_op(\"qnn.mul\")(\n wildcard(),\n wildcard(),\n is_constant(),\n is_constant(),\n is_constant(),\n is_constant(),\n is_constant(),\n is_constant(),\n )\n pattern = pattern.optional(is_op(\"clip\"))\n return pattern\n\n\nclass MinParams(BinaryElementwiseParams):\n \"\"\"\n This class will parse a call to a ethosu.binary_elementwise Min composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.min\"\n\n def __init__(self, func_body: Call):\n BinaryElementwiseParams.__init__(self, func_body, \"MIN\", False)\n\n def is_valid(self):\n \"\"\"\n This function checks whether Min has compatible attributes with the NPU\n \"\"\"\n if not super().is_valid():\n return False\n if self.ifm.dtype != self.ifm2.dtype:\n return False\n if not check_valid_dtypes(\n [self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8]\n ):\n return False\n return True\n\n\ndef minimum_pattern() -> tvm.relay.dataflow_pattern.DFPattern:\n \"\"\"\n This function creates the pattern for minimum with optional fused RELU activation.\n \"\"\"\n pattern = is_op(\"minimum\")(wildcard(), wildcard())\n pattern = pattern.optional(is_op(\"clip\"))\n return pattern\n\n\nclass MaxParams(BinaryElementwiseParams):\n \"\"\"\n This class will parse a call to a ethosu.binary_elementwise Max composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.max\"\n\n def __init__(self, func_body: Call):\n BinaryElementwiseParams.__init__(self, func_body, \"MAX\", False)\n\n def is_valid(self):\n \"\"\"\n This function checks whether Max has compatible attributes with the NPU\n \"\"\"\n if not super().is_valid():\n return False\n if self.ifm.dtype != self.ifm2.dtype:\n return False\n if not check_valid_dtypes(\n [self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8]\n ):\n return False\n return True\n\n\ndef maximum_pattern() -> tvm.relay.dataflow_pattern.DFPattern:\n \"\"\"\n This function creates the pattern for maximum with optional fused RELU activation.\n \"\"\"\n pattern = is_op(\"maximum\")(wildcard(), wildcard())\n pattern = pattern.optional(is_op(\"clip\"))\n return pattern\n\n\nclass ShlParams(BinaryElementwiseParams):\n \"\"\"\n This class will parse a call to a ethosu.binary_elementwise Shl composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.shl\"\n\n def __init__(self, func_body: Call):\n BinaryElementwiseParams.__init__(self, func_body, \"SHL\", False)\n\n def is_valid(self):\n \"\"\"\n This function checks whether Shl has compatible attributes with the NPU\n \"\"\"\n if not super().is_valid():\n return False\n if not check_valid_dtypes([self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.int32]):\n return False\n return True\n\n\ndef shl_pattern() -> tvm.relay.dataflow_pattern.DFPattern:\n \"\"\"\n This function creates the pattern for left_shift with optional fused RELU activation.\n \"\"\"\n pattern = is_op(\"left_shift\")(wildcard(), wildcard())\n pattern = pattern.optional(is_op(\"clip\"))\n return pattern\n\n\nclass ReshapeParams:\n \"\"\"\n This class will parse a call to a ethosu.reshape composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.reshape\"\n\n def __init__(self, func_body: Call):\n self.new_shape = func_body.attrs.newshape\n self.ifm = TensorParams(func_body.args[0])\n self.ofm = TensorParams(func_body)\n\n def is_valid(self):\n \"\"\"\n This function checks whether reshape has compatible attributes with the NPU\n \"\"\"\n if not check_dimensions(self.ifm) or not check_dimensions(self.ofm):\n return False\n if not check_valid_dtypes([self.ifm, self.ofm], supported_dtypes=[np.int8]):\n return False\n return True\n\n\ndef reshape_pattern():\n \"\"\"Create pattern for reshape\"\"\"\n pattern = is_op(\"reshape\")(wildcard())\n return pattern\n\n\nclass StridedSliceParams:\n \"\"\"\n This class will parse a call to a ethosu.strided_slice composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.strided_slice\"\n\n def __init__(self, func_body: Call):\n self.ifm = TensorParams(func_body.args[0])\n self.ofm = TensorParams(func_body)\n\n attrs = func_body.attrs\n # The indices where we begin the slice\n self.begin = attrs.begin\n # The indices where we end the slice\n self.end = attrs.end\n self.strides = attrs.strides\n self.axes = attrs.axes\n self.slice_mode = attrs.slice_mode\n\n def is_valid(self):\n \"\"\"\n This function checks whether reshape has compatible attributes with the NPU\n \"\"\"\n if not check_dimensions(self.ifm) or not check_dimensions(self.ofm):\n return False\n if not check_valid_dtypes([self.ifm, self.ofm], supported_dtypes=[np.int8]):\n return False\n if len(self.begin) != len(self.end):\n return False\n\n for begin_idx, end_idx in zip(self.begin, self.end):\n if begin_idx > end_idx:\n return False\n\n # Only strides of 1 are supported\n if self.strides:\n if not all([i == 1 for i in self.strides]):\n return False\n return True\n\n\ndef strided_slice_pattern():\n \"\"\"Create pattern for strided_slice\"\"\"\n pattern = is_op(\"strided_slice\")(wildcard())\n return pattern\n\n\nclass AbsParams:\n \"\"\"\n This class will parse a call to a ethosu.unary_elementwise Abs composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.abs\"\n\n def __init__(self, func_body: Call):\n from tvm.relay.backend.contrib.ethosu.util import QuantizeArgs\n from tvm.relay.backend.contrib.ethosu.util import DequantizeArgs\n\n quantize = func_body\n abs_op = quantize.args[0]\n dequantize = abs_op.args[0]\n\n layout = \"NHWC\"\n\n self.ifm = TensorParams(\n dequantize.args[DequantizeArgs.IFM.value],\n layout,\n dequantize.args[DequantizeArgs.IFM_SCALE.value],\n dequantize.args[DequantizeArgs.IFM_ZERO_POINT.value],\n )\n self.ofm = TensorParams(\n quantize,\n layout,\n quantize.args[QuantizeArgs.OFM_SCALE.value],\n quantize.args[QuantizeArgs.OFM_ZERO_POINT.value],\n )\n\n self.operator_type = \"ABS\"\n self.activation = None\n\n def is_valid(self):\n \"\"\"Checks whether Abs has compatible attributes with HW\"\"\"\n tensor_params = [self.ifm, self.ofm]\n if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8, np.uint8]):\n return False\n if self.ifm.dtype != self.ofm.dtype:\n return False\n if not check_dimensions(self.ifm):\n return False\n if len(self.ifm.shape) == 4 and self.ifm.shape[0] != 1:\n return False\n if self.ifm.shape != self.ofm.shape:\n return False\n return True\n\n\ndef abs_pattern() -> tvm.relay.dataflow_pattern.DFPattern:\n \"\"\"Create pattern for abs\"\"\"\n pattern = is_op(\"qnn.dequantize\")(wildcard(), is_constant(), is_constant())\n pattern = is_op(\"abs\")(pattern)\n pattern = is_op(\"qnn.quantize\")(pattern, is_constant(), is_constant())\n return pattern\n\n\nclass LutActivationParams:\n \"\"\"\n A parent class for LUT based activation functions that extract the input and\n output tensors and check whether they are valid.\n \"\"\"\n\n def __init__(self, func_body: Call):\n from tvm.relay.backend.contrib.ethosu.util import QuantizeArgs\n from tvm.relay.backend.contrib.ethosu.util import DequantizeArgs\n\n layout = \"NHWC\"\n\n quantize = func_body\n activation = quantize.args[0]\n dequantize = activation.args[0]\n in_var = dequantize.args[0]\n\n self.ifm = TensorParams(\n in_var,\n layout=layout,\n scale=dequantize.args[DequantizeArgs.IFM_SCALE.value],\n zero_point=dequantize.args[DequantizeArgs.IFM_ZERO_POINT.value],\n )\n self.ofm = TensorParams(\n quantize,\n layout=layout,\n scale=quantize.args[QuantizeArgs.OFM_SCALE.value],\n zero_point=quantize.args[QuantizeArgs.OFM_ZERO_POINT.value],\n )\n\n def is_valid(self):\n \"\"\"\n This function checks whether activation has compatible attributes with the NPU\n \"\"\"\n if not check_valid_dtypes([self.ifm, self.ofm], supported_dtypes=[np.int8]):\n return False\n return True\n\n\nclass TanhParams(LutActivationParams):\n\n composite_name = \"ethos-u.tanh\"\n\n\ndef tanh_pattern():\n \"\"\"Create pattern for tanh\"\"\"\n dequant = is_op(\"qnn.dequantize\")(wildcard(), is_constant(), is_constant())\n tanh = is_op(\"tanh\")(dequant)\n quant = is_op(\"qnn.quantize\")(tanh, is_constant(), is_constant())\n return quant\n\n\nclass SigmoidParams(LutActivationParams):\n \"\"\"\n This class will parse a call to a ethos-u.sigmoid composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.sigmoid\"\n\n\ndef sigmoid_pattern():\n \"\"\"Create pattern for sigmoid\"\"\"\n dequant = is_op(\"qnn.dequantize\")(wildcard(), is_constant(), is_constant())\n sigmoid = is_op(\"sigmoid\")(dequant)\n quant = is_op(\"qnn.quantize\")(sigmoid, is_constant(), is_constant())\n return quant\n\n\nclass LeakyReLUParams(LutActivationParams):\n \"\"\"\n This class will parse a call to ethos-u.leaky_relu composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.leaky_relu\"\n\n def __init__(self, func_body: Call):\n super().__init__(func_body)\n self.alpha = func_body.args[0].attrs.alpha\n\n\ndef leaky_relu_pattern() -> tvm.relay.dataflow_pattern.DFPattern:\n \"\"\"\n This function creates the pattern for leaky relu.\n \"\"\"\n dequantize = is_op(\"qnn.dequantize\")(wildcard(), is_constant(), is_constant())\n leaky_relu = is_op(\"nn.leaky_relu\")(dequantize)\n return is_op(\"qnn.quantize\")(leaky_relu, is_constant(), is_constant())\n\n\nclass MeanParams:\n \"\"\"\n This class will parse a call to ethosu.mean composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.mean\"\n\n def __init__(self, func_body: Call):\n from tvm.relay.backend.contrib.ethosu.util import RequantArgs\n\n requantize = func_body\n mean_op = requantize.args[0]\n attrs = mean_op.attrs\n cast = mean_op.args[0]\n\n layout = \"NHWC\"\n self.ifm = TensorParams(\n cast.args[0],\n layout,\n requantize.args[RequantArgs.IFM_SCALE.value],\n requantize.args[RequantArgs.IFM_ZERO_POINT.value],\n )\n self.ofm = TensorParams(\n requantize,\n layout,\n requantize.args[RequantArgs.OFM_SCALE.value],\n requantize.args[RequantArgs.OFM_ZERO_POINT.value],\n )\n\n ifm_shape = self.ifm.shape\n self.height = ifm_shape[0] if len(ifm_shape) in (2, 3) else ifm_shape[1]\n self.width = ifm_shape[1] if len(ifm_shape) in (2, 3) else ifm_shape[2]\n self.keepdims = attrs.keepdims\n\n self.axis = list(sorted(attrs.axis))\n if attrs.exclude:\n self.axis = [i for i in range(len(self.ifm.shape)) if i not in self.axis]\n\n def is_valid(self) -> bool:\n \"\"\"\n Checks whether Mean has compatible attributes with HW.\n \"\"\"\n\n def check_axis(num_dims, axis):\n if num_dims in (2, 3):\n return axis in ([0], [1], [0, 1])\n return axis in ([1], [2], [1, 2])\n\n tensor_params = [self.ifm, self.ofm]\n if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8]):\n return False\n if self.ifm.dtype != self.ofm.dtype:\n return False\n if not len(self.ifm.shape) in [2, 3, 4]:\n return False\n if not check_axis(len(self.ifm.shape), self.axis):\n return False\n\n # MEAN has further restrictions on the input size, depending on legalization method.\n input_size = self.height * self.width\n if input_size > 65536:\n return False\n if (\n self.ifm.q_params.scale_f32 != self.ofm.q_params.scale_f32\n or self.ifm.q_params.zero_point != self.ofm.q_params.zero_point\n ) and input_size > 4096:\n return False\n if self.axis == [1, 2] and self.keepdims and self.ifm.dtype == \"int8\" and input_size > 256:\n return False\n # Large kernel height reshape only when axis is [1, 2]\n if self.axis != [1, 2] and self.height > 64:\n return False\n return True\n\n\ndef mean_pattern() -> tvm.relay.dataflow_pattern.DFPattern:\n \"\"\"\n This function creates the pattern for mean.\n \"\"\"\n pattern = is_op(\"cast\")(wildcard())\n pattern = is_op(\"mean\")(pattern)\n pattern = is_op(\"qnn.requantize\")(\n pattern, is_constant(), is_constant(), is_constant(), is_constant()\n )\n return pattern\n\n\nclass ConcatParams:\n \"\"\"\n This class will parse a call to a ethos-u.concat composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.concat\"\n\n def __init__(self, func_body):\n self.concat = func_body\n self.is_qnn_variant = self.concat.op.name == \"qnn.concatenate\"\n self.input_tensors = [TensorParams(tensor) for tensor in list(func_body.args[0])]\n self.axis = func_body.attrs.axis\n\n if self.is_qnn_variant:\n self.input_scales = [s.data.asnumpy() for s in list(func_body.args[1])]\n self.input_zero_points = [zp.data.asnumpy() for zp in list(func_body.args[2])]\n\n def is_valid(self):\n \"\"\"Checks whether Concatenate has compatible attributes with the hardware\"\"\"\n if not check_valid_dtypes(self.input_tensors, supported_dtypes=[np.int8]):\n return False\n # Check that the scales and zero points of input tensors are the same\n if self.is_qnn_variant and not all(self.input_scales == self.input_scales[0]):\n return False\n if self.is_qnn_variant and not all(self.input_zero_points == self.input_zero_points[0]):\n return False\n\n input_dim = len(self.input_tensors[0].shape)\n for tensor in self.input_tensors:\n if len(tensor.shape) != input_dim:\n return False\n\n if self.axis is None:\n return False\n if self.axis < 0:\n return False\n if self.axis >= input_dim:\n return False\n\n output_shape = self.concat.checked_type.shape\n if len(output_shape) != input_dim:\n return False\n if len(output_shape) > 3 and output_shape[0] != 1:\n return False\n return True\n\n\ndef concat_pattern():\n \"\"\"Create pattern for concat\"\"\"\n tensors = is_tuple(None)\n scales = is_tuple(None)\n zero_points = is_tuple(None)\n qnn_concat = is_op(\"qnn.concatenate\")(\n tensors, scales, zero_points, is_constant(), is_constant()\n )\n concat = is_op(\"concatenate\")(tensors)\n return concat | qnn_concat\n\n\nclass SplitParams:\n \"\"\"\n This class will parse a call to a ethos-u.split composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.split\"\n\n def __init__(self, func_body):\n self.split = func_body\n self.input = TensorParams(func_body.args[0])\n self.axis = func_body.attrs.axis\n self.indices_or_sections = self.convert_indices_or_sections(\n func_body.attrs.indices_or_sections\n )\n\n def convert_indices_or_sections(self, indices_or_sections):\n # split_v\n if isinstance(indices_or_sections, tvm.ir.container.Array):\n values = [i.value for i in indices_or_sections]\n # split\n else:\n values = indices_or_sections.value\n return values\n\n def is_valid(self):\n \"\"\"Checks whether split has compatible attributes with the hardware\"\"\"\n if not check_valid_dtypes([self.input], supported_dtypes=[np.int8]):\n return False\n return True\n\n\ndef split_pattern():\n \"Create the pattern for split\"\n split = is_op(\"split\")(wildcard())\n return split\n\n\nclass RequantizeParams:\n \"\"\"\n This class will parse a call to ethos-u.requantize composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.requantize\"\n\n def __init__(self, func_body: Call):\n from tvm.relay.backend.contrib.ethosu.util import RequantArgs\n\n layout = \"NHWC\"\n in_var = func_body.args[0]\n requantize = func_body\n\n self.ifm = TensorParams(\n in_var,\n layout=layout,\n scale=requantize.args[RequantArgs.IFM_SCALE.value],\n zero_point=requantize.args[RequantArgs.IFM_ZERO_POINT.value],\n )\n self.ofm = TensorParams(\n requantize,\n layout=layout,\n scale=requantize.args[RequantArgs.OFM_SCALE.value],\n zero_point=requantize.args[RequantArgs.OFM_ZERO_POINT.value],\n )\n\n attrs = requantize.attrs\n self.out_dtype = attrs.out_dtype\n\n def is_valid(self) -> bool:\n \"\"\"\n Checks whether qnn.requantize has compatible attributes with HW.\n \"\"\"\n tensor_params = [self.ifm, self.ofm]\n if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8]):\n return False\n if not check_dimensions(self.ifm) or not check_dimensions(self.ofm):\n return False\n if self.out_dtype and self.out_dtype != \"int8\":\n return False\n return True\n\n\ndef requantize_pattern() -> tvm.relay.dataflow_pattern.DFPattern:\n \"\"\"\n This function creates the pattern for qnn.requantize.\n \"\"\"\n return is_op(\"qnn.requantize\")(\n wildcard(), is_constant(), is_constant(), is_constant(), is_constant()\n )\n\n\nclass Resize2dParams:\n \"\"\"\n This class will parse a call to ethos-u.resize2d composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.resize2d\"\n\n def __init__(self, func_body: Call):\n layout = \"NHWC\"\n\n resize_2d = func_body\n in_var = func_body.args[0]\n if (\n isinstance(resize_2d, tvm.relay.expr.Call)\n and isinstance(resize_2d.op, tvm.ir.Op)\n and resize_2d.op.name == \"qnn.quantize\"\n ):\n resize_2d = resize_2d.args[0]\n in_var = in_var.args[0].args[0]\n out_var = func_body\n\n self.ifm = TensorParams(in_var, layout=layout)\n self.ofm = TensorParams(out_var, layout=layout)\n\n attrs = resize_2d.attrs\n self.size = attrs.size\n self.method = attrs.method\n self.roi = attrs.roi\n self.coordinate_transformation_mode = attrs.coordinate_transformation_mode\n self.rounding_method = attrs.rounding_method\n self.out_dtype = attrs.out_dtype\n\n def is_valid(self) -> bool:\n \"\"\"\n Checks whether image.resize2d has compatible attributes with HW.\n \"\"\"\n\n def check_compatible_size(mode, method, upscale_size, ifm_size):\n \"\"\"Checking the provided upscale_size is compatible with the NPU. The NPU only\n supports upsampling when the upsampling size is 2 * input_size, or when there is\n no upsampling to be done, so check that this is the case. In the special case of\n resize_bilinear with align_corners=True, the NPU only supports an upsampling\n size of 2 * input_size - 1.\"\"\"\n delta = 1 if mode == \"align_corners\" and method == \"linear\" else 0\n upscale_size = np.array(upscale_size)\n ifm_size = np.array(ifm_size)\n ifm_upscaled = ifm_size * 2 - delta\n return (ifm_upscaled == upscale_size).all() or (ifm_size == upscale_size).all()\n\n tensor_params = [self.ifm, self.ofm]\n if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8]):\n return False\n if len(self.ifm.shape) != 4 or len(self.ofm.shape) != 4:\n return False\n if list(float(x) for x in self.roi) != [0.0] * 4:\n return False\n if self.method not in (\"nearest_neighbor\", \"linear\"):\n return False\n if self.coordinate_transformation_mode not in (\"asymmetric\", \"align_corners\"):\n return False\n if not check_compatible_size(\n self.coordinate_transformation_mode,\n self.method,\n self.size,\n self.ifm.shape[1:3],\n ):\n return False\n if self.rounding_method != \"\":\n return False\n if self.out_dtype and self.out_dtype != \"int8\":\n return False\n return True\n\n\ndef resize2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:\n \"\"\"\n This function creates the pattern for image.resize2d.\n \"\"\"\n dequant = is_op(\"qnn.dequantize\")(wildcard(), is_constant(), is_constant())\n resize_2d = is_op(\"image.resize2d\")(dequant).has_attr({\"method\": \"linear\"})\n quant = is_op(\"qnn.quantize\")(resize_2d, is_constant(), is_constant())\n return quant | is_op(\"image.resize2d\")(wildcard()).has_attr({\"method\": \"nearest_neighbor\"})\n\n\nclass ExpandDimsParams:\n \"\"\"\n This class will parse a call to a ethos-u.expand_dims composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.expand_dims\"\n\n def __init__(self, func_body):\n self.expand_dims = func_body\n self.input = TensorParams(func_body.args[0])\n self.output = TensorParams(func_body)\n\n def is_valid(self):\n \"\"\"Checks whether expand_dims has compatible attributes with the hardware.\"\"\"\n if not check_dimensions(self.input) or not check_dimensions(self.output):\n return False\n if not check_valid_dtypes([self.input, self.output], supported_dtypes=[np.int8]):\n return False\n return True\n\n\ndef expand_dims_pattern():\n \"\"\"Create the pattern for expand_dims.\"\"\"\n return is_op(\"expand_dims\")(wildcard())\n\n\nclass SqueezeParams:\n \"\"\"\n This class will parse a call to a ethos-u.squeeze composite function\n and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.squeeze\"\n\n def __init__(self, func_body):\n self.squeeze = func_body\n self.input = TensorParams(func_body.args[0])\n self.output = TensorParams(func_body)\n\n def is_valid(self):\n \"\"\"Checks whether squeeze has compatible attributes with the hardware.\"\"\"\n if not check_dimensions(self.output):\n return False\n if not check_valid_dtypes([self.input, self.output], supported_dtypes=[np.int8]):\n return False\n return True\n\n\ndef squeeze_pattern():\n \"\"\"Create the pattern for squeeze.\"\"\"\n return is_op(\"squeeze\")(wildcard())\n\n\nclass FullyConnectedParams:\n \"\"\"\n This class will parse a call to an ethos-u.fully_connected composite\n function and extract the parameter information.\n \"\"\"\n\n composite_name = \"ethos-u.fully_connected\"\n\n @requires_vela\n def __init__(self, func_body):\n from tvm.relay.backend.contrib.ethosu.util import QDenseArgs # type: ignore\n from tvm.relay.backend.contrib.ethosu.util import BiasAddArgs\n from tvm.relay.backend.contrib.ethosu.util import RequantArgs\n\n self.activation = None\n if str(func_body.op) == \"clip\":\n self.activation = func_body\n requantize_op = self.activation.args[0]\n else:\n requantize_op = func_body\n\n call = requantize_op.args[0]\n if str(requantize_op.args[0].op) == \"nn.bias_add\":\n bias_add = call\n qnn_dense = call.args[0]\n else:\n bias_add = None\n qnn_dense = call\n\n # weights & biases are params as they should be constant\n self.weights = TensorParams(\n qnn_dense.args[QDenseArgs.WEIGHTS.value],\n None,\n qnn_dense.args[QDenseArgs.WEIGHTS_SCALE.value],\n qnn_dense.args[QDenseArgs.WEIGHTS_ZERO_POINT.value],\n )\n self.biases = (\n TensorParams(\n bias_add.args[BiasAddArgs.BIASES.value],\n None,\n requantize_op.args[RequantArgs.IFM_SCALE.value],\n requantize_op.args[RequantArgs.IFM_ZERO_POINT.value],\n )\n if bias_add\n else None\n )\n self.ifm = TensorParams(\n qnn_dense.args[QDenseArgs.IFM.value],\n None,\n qnn_dense.args[QDenseArgs.IFM_SCALE.value],\n qnn_dense.args[QDenseArgs.IFM_ZERO_POINT.value],\n )\n self.ofm = TensorParams(\n func_body,\n None,\n requantize_op.args[RequantArgs.OFM_SCALE.value],\n requantize_op.args[RequantArgs.OFM_ZERO_POINT.value],\n )\n\n def is_valid(self) -> bool:\n \"\"\"\n Checks whether Fully Connected has compatible attributes with HW\n \"\"\"\n\n def check_weights_fc(weights):\n \"\"\"Checks whether weight tensor is compatible with HW\"\"\"\n weights_limit = 127 * 65536\n # A saturation upper bound check for accumulators\n weights.values = weights.values - weights.q_params.zero_point\n axis = 1\n sum_weights = np.amax(np.sum(np.absolute(weights.values), axis=axis))\n if not sum_weights <= weights_limit:\n return False\n return True\n\n if not check_valid_dtypes([self.ifm, self.ofm], supported_dtypes=[np.int8]):\n return False\n if not check_weights_fc(self.weights):\n return False\n if not check_bias(self.biases):\n return False\n if not check_batch_size(self.ifm):\n return False\n # Check input shape\n if not len(self.ifm.shape) == 2:\n return False\n # Check output shape\n if not len(self.ofm.shape) == 2:\n return False\n return True\n\n\ndef qnn_fc_pattern():\n dense = is_op(\"qnn.dense\")(\n wildcard(), is_constant(), is_constant(), is_constant(), is_constant(), is_constant()\n )\n optional_bias_add = is_op(\"nn.bias_add\")(dense, is_constant())\n req = is_op(\"qnn.requantize\")(\n dense | optional_bias_add, is_constant(), is_constant(), is_constant(), is_constant()\n )\n optional_clip = req.optional(is_op(\"clip\"))\n return optional_clip\n\n\n@register_pattern_table(\"ethos-u\")\ndef pattern_table() -> List[Tuple[str, tvm.relay.dataflow_pattern.DFPattern, Callable]]:\n return [\n (\n QnnConv2DParams.composite_name,\n qnn_conv2d_pattern(),\n lambda pat: QnnConv2DParams(pat).is_valid(),\n ),\n (\n QnnDepthwiseConv2DParams.composite_name,\n qnn_depthwise_conv2d_pattern(),\n lambda pat: QnnDepthwiseConv2DParams(pat).is_valid(),\n ),\n (\n QnnConv2DTransposeParams.composite_name,\n qnn_conv2d_transpose_pattern(),\n lambda pat: QnnConv2DTransposeParams(pat).is_valid(),\n ),\n (\n FullyConnectedParams.composite_name,\n qnn_fc_pattern(),\n lambda pat: FullyConnectedParams(pat).is_valid(),\n ),\n (\n MaxPool2DParams.composite_name,\n qnn_maxpool2d_pattern(),\n lambda pat: MaxPool2DParams(pat).is_valid(),\n ),\n (\n AvgPool2DParams.composite_name,\n qnn_avgpool2d_pattern(),\n lambda pat: AvgPool2DParams(pat).is_valid(),\n ),\n (\n AddParams.composite_name,\n qnn_add_pattern(),\n lambda pat: AddParams(pat).is_valid(),\n ),\n (\n SubParams.composite_name,\n qnn_subtract_pattern(),\n lambda pat: SubParams(pat).is_valid(),\n ),\n (\n MulParams.composite_name,\n qnn_mul_pattern(),\n lambda pat: MulParams(pat).is_valid(),\n ),\n (\n MinParams.composite_name,\n minimum_pattern(),\n lambda pat: MinParams(pat).is_valid(),\n ),\n (\n MaxParams.composite_name,\n maximum_pattern(),\n lambda pat: MaxParams(pat).is_valid(),\n ),\n (\n ShlParams.composite_name,\n shl_pattern(),\n lambda pat: ShlParams(pat).is_valid(),\n ),\n (\n ReshapeParams.composite_name,\n reshape_pattern(),\n lambda pat: ReshapeParams(pat).is_valid(),\n ),\n (\n StridedSliceParams.composite_name,\n strided_slice_pattern(),\n lambda pat: StridedSliceParams(pat).is_valid(),\n ),\n (\n AbsParams.composite_name,\n abs_pattern(),\n lambda pat: AbsParams(pat).is_valid(),\n ),\n (TanhParams.composite_name, tanh_pattern(), lambda pat: TanhParams(pat).is_valid()),\n (\n MeanParams.composite_name,\n mean_pattern(),\n lambda pat: MeanParams(pat).is_valid(),\n ),\n (\n LeakyReLUParams.composite_name,\n leaky_relu_pattern(),\n lambda pat: LeakyReLUParams(pat).is_valid(),\n ),\n (ConcatParams.composite_name, concat_pattern(), lambda pat: ConcatParams(pat).is_valid()),\n (\n SigmoidParams.composite_name,\n sigmoid_pattern(),\n lambda pat: SigmoidParams(pat).is_valid(),\n ),\n (\n SplitParams.composite_name,\n split_pattern(),\n lambda pat: SplitParams(pat).is_valid(),\n ),\n (\n RequantizeParams.composite_name,\n requantize_pattern(),\n lambda pat: RequantizeParams(pat).is_valid(),\n ),\n (\n Resize2dParams.composite_name,\n resize2d_pattern(),\n lambda pat: Resize2dParams(pat).is_valid(),\n ),\n (\n ExpandDimsParams.composite_name,\n expand_dims_pattern(),\n lambda pat: ExpandDimsParams(pat).is_valid(),\n ),\n (\n SqueezeParams.composite_name,\n squeeze_pattern(),\n lambda pat: SqueezeParams(pat).is_valid(),\n ),\n ]\n\n\n# pylint: disable=unused-argument\n@requires_vela\ndef partition_for_ethosu(\n mod: tvm.ir.IRModule, params: Optional[Dict[str, tvm.runtime.NDArray]] = None, **opts\n):\n \"\"\"This helper function partition the relay graph as produced by the\n relay frontend for a given model into external functions\n to be presented to the codegen.\n\n Parameters\n ----------\n mod : tvm.ir.IRModule\n The IRModule that gets generated from a relay frontend\n params : Optional[Dict[str, tvm.runtime.NDArray]]\n Constant input parameters.\n\n Returns\n -------\n mod : IRModule\n The partitioned IRModule with external global functions\n \"\"\"\n from tvm.relay.backend.contrib.ethosu import preprocess\n\n if params:\n mod[\"main\"] = bind_params_by_name(mod[\"main\"], params)\n\n pattern = relay.op.contrib.get_pattern_table(\"ethos-u\")\n mod = relay.transform.InferType()(mod)\n mod = relay.transform.MergeComposite(pattern)(mod)\n mod = relay.transform.AnnotateTarget(\"ethos-u\")(mod)\n mod = relay.transform.MergeCompilerRegions()(mod)\n mod = relay.transform.InferType()(mod)\n mod = relay.transform.PartitionGraph()(mod)\n mod = relay.transform.InferType()(mod)\n mod = preprocess.preprocess_ext_io()(mod)\n return mod\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"\nAuto-scheduling a Neural Network for NVIDIA GPU\n===============================================\n**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_\n\nAuto-tuning for specific devices and workloads is critical for getting the\nbest performance. This is a tutorial on how to tune a whole neural\nnetwork for NVIDIA GPU with the auto-scheduler.\n\nTo auto-tune a neural network, we partition the network into small subgraphs and \ntune them independently. Each subgraph is treated as one search task.\nA task scheduler slices the time and dynamically allocates time resources to\nthese tasks. The task scheduler predicts the impact of each task on the end-to-end\nexecution time and prioritizes the one that can reduce the execution time the most.\n\nFor each subgraph, we use the compute declaration in :code:`tvm/python/topi` to\nget the computational DAG in the tensor expression form.\nWe then use the auto-scheduler to construct a search space of this DAG and search\nfor good schedules (low-level optimizations).\n\nDifferent from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on\nmanual templates to define the search space, the auto-scheduler does not require any\nschedule templates. In other words, the auto-scheduler only uses the compute declarations\nin :code:`tvm/python/topi` and does not use existing schedule templates.\n\nNote that this tutorial will not run on Windows or recent versions of macOS. To\nget it to run, you will need to wrap the body of this tutorial in a :code:`if\n__name__ == \"__main__\":` block.\n\"\"\"\n\nimport numpy as np\n\nimport tvm\nfrom tvm import relay, auto_scheduler\nimport tvm.relay.testing\nfrom tvm.contrib import graph_executor\n\n#################################################################\n# Define a Network\n# ----------------\n# First, we need to define the network with relay frontend API.\n# We can load some pre-defined network from :code:`tvm.relay.testing`.\n# We can also load models from MXNet, ONNX, PyTorch, and TensorFlow\n# (see :ref:`front end tutorials<tutorial-frontend>`).\n#\n# For convolutional neural networks, although auto-scheduler can work correctly\n# with any layout, we found the best performance is typically achieved with NHWC layout.\n# We also implemented more optimizations for NHWC layout with the auto-scheduler.\n# So it is recommended to convert your models to NHWC layout to use the auto-scheduler.\n# You can use :ref:`ConvertLayout <convert-layout-usage>` pass to do the layout conversion in TVM.\n\n\ndef get_network(name, batch_size, layout=\"NHWC\", dtype=\"float32\"):\n \"\"\"Get the symbol definition and random weight of a network\"\"\"\n\n # auto-scheduler prefers NHWC layout\n if layout == \"NHWC\":\n image_shape = (224, 224, 3)\n elif layout == \"NCHW\":\n image_shape = (3, 224, 224)\n else:\n raise ValueError(\"Invalid layout: \" + layout)\n\n input_shape = (batch_size,) + image_shape\n output_shape = (batch_size, 1000)\n\n if name.startswith(\"resnet-\"):\n n_layer = int(name.split(\"-\")[1])\n mod, params = relay.testing.resnet.get_workload(\n num_layers=n_layer,\n batch_size=batch_size,\n layout=layout,\n dtype=dtype,\n image_shape=image_shape,\n )\n elif name.startswith(\"resnet3d-\"):\n n_layer = int(name.split(\"-\")[1])\n mod, params = relay.testing.resnet.get_workload(\n num_layers=n_layer,\n batch_size=batch_size,\n layout=layout,\n dtype=dtype,\n image_shape=image_shape,\n )\n elif name == \"mobilenet\":\n mod, params = relay.testing.mobilenet.get_workload(\n batch_size=batch_size, layout=layout, dtype=dtype, image_shape=image_shape\n )\n elif name == \"squeezenet_v1.1\":\n assert layout == \"NCHW\", \"squeezenet_v1.1 only supports NCHW layout\"\n mod, params = relay.testing.squeezenet.get_workload(\n version=\"1.1\",\n batch_size=batch_size,\n dtype=dtype,\n image_shape=image_shape,\n )\n elif name == \"inception_v3\":\n input_shape = (batch_size, 3, 299, 299) if layout == \"NCHW\" else (batch_size, 299, 299, 3)\n mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)\n elif name == \"mxnet\":\n # an example for mxnet model\n from mxnet.gluon.model_zoo.vision import get_model\n\n assert layout == \"NCHW\"\n\n block = get_model(\"resnet18_v1\", pretrained=True)\n mod, params = relay.frontend.from_mxnet(block, shape={\"data\": input_shape}, dtype=dtype)\n net = mod[\"main\"]\n net = relay.Function(\n net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs\n )\n mod = tvm.IRModule.from_expr(net)\n\n return mod, params, input_shape, output_shape\n\n\n# Define the neural network and compilation target\nnetwork = \"resnet-18\"\nbatch_size = 1\nlayout = \"NHWC\"\ntarget = tvm.target.Target(\"cuda\")\ndtype = \"float32\"\nlog_file = \"%s-%s-B%d-%s.json\" % (network, layout, batch_size, target.kind.name)\n\n#################################################################\n# Extract Search Tasks\n# --------------------\n# Next, we extract the search tasks and their weights from a network.\n# The weight of a task is the number of appearances of the task's subgraph\n# in the whole network.\n# By using the weight, we can approximate the end-to-end latency of the network\n# as :code:`sum(latency[t] * weight[t])`, where :code:`latency[t]` is the\n# latency of a task and :code:`weight[t]` is the weight of the task.\n# The task scheduler will just optimize this objective.\n\n# Extract tasks from the network\nprint(\"Extract tasks...\")\nmod, params, input_shape, output_shape = get_network(network, batch_size, layout, dtype=dtype)\ntasks, task_weights = auto_scheduler.extract_tasks(mod[\"main\"], params, target)\n\nfor idx, task in enumerate(tasks):\n print(\"========== Task %d (workload key: %s) ==========\" % (idx, task.workload_key))\n print(task.compute_dag)\n\n#################################################################\n# Begin Tuning\n# ------------\n# Now, we set some options for tuning and launch the search tasks\n#\n# * :code:`measure_ctx` launches a different process for measurement to\n# provide isolation. It can protect the master process from GPU crashes\n# during measurement and avoid other runtime conflicts.\n# * :code:`min_repeat_ms` defines the minimum duration of one \"repeat\" in every measurement.\n# This can warmup the GPU, which is necessary to get accurate measurement results.\n# Typically, we recommend a value >= 300 ms.\n# * :code:`num_measure_trials` is the number of measurement trials we can use during the tuning.\n# You can set it to a small number (e.g., 200) for a fast demonstrative run.\n# In practice, we recommend setting it around :code:`900 * len(tasks)`,\n# which is typically enough for the search to converge.\n# For example, there are 24 tasks in resnet-18, so we can set it as 20000.\n# You can adjust this parameter according to your time budget.\n# * In addition, we use :code:`RecordToFile` to dump measurement records into a log file,\n# The measurement records can be used to query the history best, resume the search,\n# and do more analyses later.\n# * see :any:`auto_scheduler.TuningOptions`,\n# :any:`auto_scheduler.LocalRPCMeasureContext` for more parameters.\n#\n\n\ndef run_tuning():\n print(\"Begin tuning...\")\n measure_ctx = auto_scheduler.LocalRPCMeasureContext(repeat=1, min_repeat_ms=300, timeout=10)\n\n tuner = auto_scheduler.TaskScheduler(tasks, task_weights)\n tune_option = auto_scheduler.TuningOptions(\n num_measure_trials=200, # change this to 20000 to achieve the best performance\n runner=measure_ctx.runner,\n measure_callbacks=[auto_scheduler.RecordToFile(log_file)],\n )\n\n tuner.tune(tune_option)\n\n\n# We do not run the tuning in our webpage server since it takes too long.\n# Uncomment the following line to run it by yourself.\n\n# run_tuning()\n\n\n######################################################################\n# .. note:: Explain the printed information during tuning\n#\n# During the tuning, a lot of information will be printed on the console.\n# They are used for debugging purposes. The most important info is the output\n# of the task scheduler. The following table is a sample output.\n#\n# .. code-block:: c\n#\n# ----------------------------------------------------------------------\n# ------------------------------ [ Task Scheduler ]\n# ----------------------------------------------------------------------\n# | ID | Latency (ms) | Speed (GFLOPS) | Trials |\n# -------------------------------------------------\n# | 0 | 0.005 | 0.88 | 64 |\n# | 1 | 0.010 | 99.10 | 64 |\n# | 2 | 0.006 | 0.00 | 64 |\n# | 3 | 0.145 | 979.78 | 384 |\n# | 4 | 0.130 | 1097.02 | 384 |\n# | 5 | 0.143 | 992.69 | 384 |\n# | 6 | 0.076 | 1526.86 | 192 |\n# | 7 | 0.115 | 999.44 | 320 |\n# | 8 | 0.079 | 1449.39 | 320 |\n# | 9 | 0.122 | 938.73 | 384 |\n# | 10 | 0.063 | 1832.98 | 192 |\n# | 11 | 0.072 | 1763.62 | 256 |\n# | 12 | 0.062 | 2036.40 | 192 |\n# | 13 | 0.068 | 1874.44 | 192 |\n# | 14 | 0.049 | 2346.50 | 128 |\n# | 15 | 0.076 | 1694.31 | 256 |\n# | 16 | 0.067 | 1933.30 | 448 |\n# | 17 | 0.076 | 1680.90 | 256 |\n# | 18 | 0.022 | 98.43 | 64 |\n# | 19 | 0.076 | 3112.55 | 192 |\n# | 20 | 0.013 | 2026.44 | 64 |\n# | 21 | 0.011 | 1136.69 | 64 |\n# | 22 | 0.013 | 992.47 | 64 |\n# | 23 | 0.020 | 627.56 | 64 |\n# -------------------------------------------------\n# Estimated total latency: 1.587 ms Trials: 4992 Used time : 13296 s Next ID: 3\n#\n# This table lists the latency and (estimated) speed of all tasks.\n# It also lists the allocation of measurement trials for all tasks.\n# The last line prints the total weighted latency of these tasks,\n# which can be a rough estimation of the end-to-end execution time\n# of the network.\n# The last line also prints the total number of measurement trials,\n# total time spent on auto-tuning and the id of the next task to tune.\n#\n# There will also be some \"tvm::Error\"s and CUDA errors, because the\n# auto-scheduler will try some invalid schedules.\n# You can safely ignore them if the tuning can continue, because these\n# errors are isolated from the main process.\n#\n\n######################################################################\n# .. note:: Terminate the tuning earlier\n#\n# You can terminate the tuning earlier by forcibly killing this process.\n# As long as you get at least one valid schedule for each task in the log file,\n# you should be able to do the compilation (the secion below).\n#\n\n\n#################################################################\n# Compile and Evaluate\n# --------------------\n# After auto-tuning, we can compile the network with the best schedules we found.\n# All measurement records are dumped into the log file during auto-tuning,\n# so we can read the log file and load the best schedules.\n\n# Compile with the history best\nprint(\"Compile...\")\nwith auto_scheduler.ApplyHistoryBest(log_file):\n with tvm.transform.PassContext(opt_level=3, config={\"relay.backend.use_auto_scheduler\": True}):\n lib = relay.build(mod, target=target, params=params)\n\n# Create graph executor\ndev = tvm.device(str(target), 0)\nmodule = graph_executor.GraphModule(lib[\"default\"](dev))\ndata_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))\nmodule.set_input(\"data\", data_tvm)\n\n# Evaluate\nprint(\"Evaluate inference time cost...\")\nprint(module.benchmark(dev, repeat=3, min_repeat_ms=500))\n\n\n#################################################################\n# Other Tips\n# ----------\n# 1. During the tuning, the auto-scheduler needs to compile many programs and\n# extract feature from them. This part is CPU-intensive,\n# so a high-performance CPU with many cores is recommended for faster search.\n# 2. You can use :code:`python3 -m tvm.auto_scheduler.measure_record --mode distill -i log.json`\n# to distill the large log file and only save the best useful records.\n# 3. You can resume a search from the previous log file. You just need to\n# add a new argument :code:`load_log_file` when creating the task scheduler\n# in function :code:`run_tuning`. Say,\n# :code:`tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)`\n# 4. If you have multiple target GPUs, you can use all of them for measurements to\n# parallelize the measurements. Check this :ref:`section <tutorials-autotvm-scale-up-rpc-tracker>`\n# to learn how to use the RPC Tracker and RPC Server.\n# To use the RPC Tracker in auto-scheduler, replace the runner in :code:`TuningOptions`\n# with :any:`auto_scheduler.RPCRunner`.\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name, too-many-nested-blocks\n\"Roi pool in python\"\nimport math\nimport numpy as np\n\n\ndef roi_pool_nchw_python(a_np, rois_np, pooled_size, spatial_scale):\n \"\"\"Roi pool in python\"\"\"\n _, channel, height, width = a_np.shape\n num_roi = rois_np.shape[0]\n b_np = np.zeros((num_roi, channel, pooled_size, pooled_size), dtype=a_np.dtype)\n\n if isinstance(pooled_size, int):\n pooled_size_h = pooled_size_w = pooled_size\n else:\n pooled_size_h, pooled_size_w = pooled_size\n\n for i in range(num_roi):\n roi = rois_np[i]\n batch_index = int(roi[0])\n roi_start_w = int(round(roi[1] * spatial_scale))\n roi_start_h = int(round(roi[2] * spatial_scale))\n roi_end_w = int(round(roi[3] * spatial_scale))\n roi_end_h = int(round(roi[4] * spatial_scale))\n roi_h = max(roi_end_h - roi_start_h + 1, 1)\n roi_w = max(roi_end_w - roi_start_w + 1, 1)\n\n bin_h = float(roi_h) / pooled_size_h\n bin_w = float(roi_w) / pooled_size_w\n\n for ph in range(pooled_size_h):\n for pw in range(pooled_size_w):\n hstart = int(math.floor(ph * bin_h))\n wstart = int(math.floor(pw * bin_w))\n hend = int(math.ceil((ph + 1) * bin_h))\n wend = int(math.ceil((pw + 1) * bin_w))\n hstart = min(max(hstart + roi_start_h, 0), height)\n hend = min(max(hend + roi_start_h, 0), height)\n wstart = min(max(wstart + roi_start_w, 0), width)\n wend = min(max(wend + roi_start_w, 0), width)\n is_empty = (hend <= hstart) or (wend <= wstart)\n\n for c in range(channel):\n if is_empty:\n b_np[i, c, ph, pw] = 0.0\n else:\n b_np[i, c, ph, pw] = np.max(a_np[batch_index, c, hstart:hend, wstart:wend])\n return b_np\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring\nimport sys\nfrom typing import Callable, List\n\nimport pytest\nimport tvm\nfrom numpy.testing import assert_allclose\nfrom tvm import meta_schedule as ms\nfrom tvm import te, tir\nfrom tvm.script import tir as T\n\nN_FEATURES = 164\n\n\[email protected]_func\ndef matmul(\n A: T.Buffer[(512, 512), \"float32\"],\n B: T.Buffer[(512, 512), \"float32\"],\n C: T.Buffer[(512, 512), \"float32\"],\n) -> None:\n # function attr dict\n T.func_attr({\"global_symbol\": \"main\", \"tir.noalias\": True})\n # body\n # with T.block(\"root\")\n for i0, i1, i2 in T.grid(512, 512, 512):\n with T.block(\"C\"):\n i, j, k = T.axis.remap(\"SSR\", [i0, i1, i2])\n T.reads(C[i, j], A[i, k], B[k, j])\n T.writes(C[i, j])\n with T.init():\n C[i, j] = T.float32(0)\n C[i, j] = C[i, j] + A[i, k] * B[k, j]\n\n\n# pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument\n# fmt: off\n\n# from tvm.script import tir as T\[email protected]_module\nclass LayoutTransform:\n @T.prim_func\n def main(placeholder: T.Buffer[(1, 16, 7, 7, 32), \"float32\"], placeholder_1: T.Buffer[(25088,), \"float32\"], T_layout_trans: T.Buffer[(1, 1, 7, 7, 512), \"float32\"]) -> None:\n # function attr dict\n T.func_attr({\"tir.noalias\": True, \"global_symbol\": \"main\"})\n # body\n # with T.block(\"root\")\n for i0_i1_i2_i3_i4_fused in T.parallel(25088, annotations={\"pragma_auto_unroll_max_step\":64, \"pragma_unroll_explicit\":1}):\n with T.block(\"T_layout_trans_1\"):\n ax0 = T.axis.spatial(1, 0)\n ax1 = T.axis.spatial(1, 0)\n ax2 = T.axis.spatial(7, i0_i1_i2_i3_i4_fused // 3584)\n ax3 = T.axis.spatial(7, i0_i1_i2_i3_i4_fused % 3584 // 512)\n ax4 = T.axis.spatial(512, i0_i1_i2_i3_i4_fused % 512)\n T.reads(placeholder[0, (ax4 * 49 + ax2 * 7 + ax3) % 25088 // 1568, (ax2 * 7 + ax3) % 49 // 7, ax3 % 7, (ax4 * 49 + ax2 * 7 + ax3) % 1568 // 49], placeholder_1[(ax4 * 49 + ax2 * 7 + ax3) % 25088])\n T.writes(T_layout_trans[ax0, ax1, ax2, ax3, ax4])\n T_layout_trans[ax0, ax1, ax2, ax3, ax4] = T.if_then_else(ax0 < 1 and ax1 * 512 + ax4 < 512 and ax2 < 7 and ax3 < 7, T.Select(T.float32(0) < T.if_then_else(0 < 1 and ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 25088 // 49 < 512 and ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 49 // 7 < 7 and ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 7 < 7, placeholder[0, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 25088 // 49 // 32, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 49 // 7, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 7, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 25088 // 49 % 32], T.float32(0), dtype=\"float32\"), T.if_then_else(0 < 1 and ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 25088 // 49 < 512 and ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 49 // 7 < 7 and ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 7 < 7, placeholder[0, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 25088 // 49 // 32, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 49 // 7, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 7, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 25088 // 49 % 32], T.float32(0), dtype=\"float32\"), T.if_then_else(0 < 1 and ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 25088 // 49 < 512 and ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 49 // 7 < 7 and ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 7 < 7, placeholder[0, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 25088 // 49 // 32, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 49 // 7, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 7, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 25088 // 49 % 32], T.float32(0), dtype=\"float32\") * placeholder_1[((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088]), T.float32(0), dtype=\"float32\")\n\n\n# fmt: on\n# pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument\n\n\ndef _make_context(target) -> ms.TuneContext:\n return ms.TuneContext(\n target=target,\n num_threads=1,\n )\n\n\ndef _make_candidate(f_sch: Callable[[], tir.Schedule]) -> ms.MeasureCandidate:\n return ms.MeasureCandidate(sch=f_sch(), args_info=[])\n\n\ndef _feature_names( # pylint: disable=invalid-name\n buffers_per_store: int = 5,\n arith_intensity_curve_num_samples: int = 10,\n) -> List[str]:\n result = [\n \"float_mad\",\n \"float_addsub\",\n \"float_mul\",\n \"float_divmod\",\n \"float_cmp\",\n \"float_mathfunc\",\n \"float_otherfunc\",\n \"int_mad\",\n \"int_addsub\",\n \"int_mul\",\n \"int_divmod\",\n \"int_cmp\",\n \"int_mathfunc\",\n \"int_otherfunc\",\n \"bool_op\",\n \"select_op\",\n \"vec_num\",\n \"vec_prod\",\n \"vec_len\",\n \"vec_type.kPosNone\",\n \"vec_type.kPosInnerSpatial\",\n \"vec_type.kPosMiddleSpatial\",\n \"vec_type.kPosOuterSpatial\",\n \"vec_type.kPosInnerReduce\",\n \"vec_type.kPosMiddleReduce\",\n \"vec_type.kPosOuterReduce\",\n \"vec_type.kPosMixed\",\n \"unroll_num\",\n \"unroll_prod\",\n \"unroll_len\",\n \"unroll_type.kPosNone\",\n \"unroll_type.kPosInnerSpatial\",\n \"unroll_type.kPosMiddleSpatial\",\n \"unroll_type.kPosOuterSpatial\",\n \"unroll_type.kPosInnerReduce\",\n \"unroll_type.kPosMiddleReduce\",\n \"unroll_type.kPosOuterReduce\",\n \"unroll_type.kPosMixed\",\n \"parallel_num\",\n \"parallel_prod\",\n \"parallel_len\",\n \"parallel_type.kPosNone\",\n \"parallel_type.kPosInnerSpatial\",\n \"parallel_type.kPosMiddleSpatial\",\n \"parallel_type.kPosOuterSpatial\",\n \"parallel_type.kPosInnerReduce\",\n \"parallel_type.kPosMiddleReduce\",\n \"parallel_type.kPosOuterReduce\",\n \"parallel_type.kPosMixed\",\n \"is_gpu\",\n \"blockIdx_x_len\",\n \"blockIdx_y_len\",\n \"blockIdx_z_len\",\n \"threadIdx_x_len\",\n \"threadIdx_y_len\",\n \"threadIdx_z_len\",\n \"vthread_len\",\n ]\n for i in range(buffers_per_store):\n result.extend(\n f\"B{i}.{s}\"\n for s in [\n \"acc_type.kRead\",\n \"acc_type.kWrite\",\n \"acc_type.kReadWrite\",\n \"bytes\",\n \"unique_bytes\",\n \"lines\",\n \"unique_lines\",\n \"reuse_type.kLoopMultipleRead\",\n \"reuse_type.kSerialMultipleReadWrite\",\n \"reuse_type.kNoReuse\",\n \"reuse_dis_iter\",\n \"reuse_dis_bytes\",\n \"reuse_ct\",\n \"bytes_d_reuse_ct\",\n \"unique_bytes_d_reuse_ct\",\n \"lines_d_reuse_ct\",\n \"unique_lines_d_reuse_ct\",\n \"stride\",\n ]\n )\n result.extend(f\"arith_intensity_curve_{i}\" for i in range(arith_intensity_curve_num_samples))\n result.extend(\n [\n \"alloc_size\",\n \"alloc_prod\",\n \"alloc_outer_prod\",\n \"alloc_inner_prod\",\n \"outer_prod\",\n \"num_loops\",\n \"auto_unroll_max_step\",\n ]\n )\n # 57 + 18 * 5 + 10 + 4 + 3\n assert len(result) == N_FEATURES\n return result\n\n\ndef _zip_feature(feature, names):\n assert feature.ndim == 1\n assert feature.shape[0] == N_FEATURES\n assert len(names) == N_FEATURES\n return list(zip(names, feature))\n\n\ndef _print_feature(feature, st, ed): # pylint: disable=invalid-name\n named_feature = _zip_feature(feature, _feature_names())\n for k, v in named_feature[st:ed]:\n print(\"\\t\", k, v)\n\n\ndef test_cpu_matmul():\n def _create_schedule():\n func = matmul\n sch = tir.Schedule(func, debug_mask=\"all\")\n block = sch.get_block(\"C\")\n i, j, k = sch.get_loops(block)\n i_o, i_i = sch.split(i, factors=[None, 16]) # outer: 32\n j_o, j_i = sch.split(j, factors=[None, 8]) # outer: 64\n sch.reorder(i_o, j_o, k, j_i, i_i)\n sch.vectorize(j_i)\n sch.parallel(i_o)\n sch.parallel(j_o)\n sch.unroll(k)\n return sch\n\n extractor = ms.feature_extractor.PerStoreFeature()\n (feature,) = extractor.extract_from(\n _make_context(tvm.target.Target(\"llvm\")),\n candidates=[_make_candidate(_create_schedule)],\n )\n feature = feature.numpy()\n assert feature.shape == (1, N_FEATURES)\n f = feature[0]\n # Group 1.1: arith\n assert_allclose(\n actual=f[0:16],\n # fmt: off\n desired=[\n # float math ops\n 0, 27, 27, 0, 0, 0, 0,\n # int math ops\n 0, 29, 29, 0, 0, 0, 0,\n # bool/select ops\n 0, 0,\n ],\n # fmt: on\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.2: vectorize\n assert_allclose(\n actual=f[16:27],\n desired=[1.0, 3.169924, 3.169924, 0, 0, 0, 0, 0, 0, 0, 1],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.3: unroll\n assert_allclose(\n actual=f[27:38],\n desired=[1.0, 9.002815, 9.002815, 0, 0, 0, 0, 0, 0, 0, 1],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.4: parallel\n assert_allclose(\n actual=f[38:49],\n desired=[1.58496, 11.0007, 6.022368, 0, 0, 0, 0, 0, 0, 0, 1],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.5: is_gpu, blockIdx.x/y/z, threadIdx.x/y/z, vthread\n assert_allclose(\n actual=f[49:57],\n desired=[0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.1: Buffer A\n assert_allclose(\n actual=f[57:75],\n desired=[\n 1,\n 0,\n 0,\n 29,\n 20,\n 27,\n 14,\n 1,\n 0,\n 0,\n 4.087463,\n 7.0552826,\n 3.169925,\n 26,\n 17,\n 24,\n 11.0007038,\n 9.002815,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.2: Buffer C\n assert_allclose(\n actual=f[75:93],\n desired=[\n 0.0,\n 0.0,\n 1.0,\n 29.0,\n 20.000001907348633,\n 27.0,\n 14.00008773803711,\n 1.0,\n 0.0,\n 0.0,\n 7.011227130889893,\n 9.250298500061035,\n 9.002815246582031,\n 20.000001907348633,\n 11.000703811645508,\n 18.0000057220459,\n 5.044394016265869,\n 9.002815246582031,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.3: Buffer B\n assert_allclose(\n actual=f[93:111],\n desired=[\n 1.0,\n 0.0,\n 0.0,\n 29.0,\n 20.000001907348633,\n 19.000001907348633,\n 14.00008773803711,\n 1.0,\n 0.0,\n 0.0,\n 1.0,\n 3.700439691543579,\n 4.087462902069092,\n 25.0,\n 16.000022888183594,\n 15.000043869018555,\n 10.001408194392809,\n 0.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.4: Dummy padding\n assert_allclose(\n actual=f[111:129],\n desired=[0.0] * 18,\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.5: Dummy padding\n assert_allclose(\n actual=f[129:147],\n desired=[0.0] * 18,\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 3: Arithmetic intensity\n assert_allclose(\n actual=f[147:157],\n desired=[\n 0.7097842693328857,\n 0.7408391237258911,\n 0.8750449419021606,\n 0.9449487924575806,\n 1.0148526430130005,\n 1.0847564935684204,\n 1.113688349723816,\n 1.1394684314727783,\n 1.2119636535644531,\n 1.2971993684768677,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 4 & 5\n assert_allclose(\n actual=f[157:164],\n desired=[\n 20.000001907348633,\n 18.0000057220459,\n 1.0,\n 27.0,\n 27.0,\n 2.5849626064300537,\n 0.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n\n\ndef test_cpu_fusion():\n # pylint: disable=all\n @T.prim_func\n def func(a: T.handle, b: T.handle, c: T.handle) -> None:\n A = T.match_buffer(a, [64, 32], dtype=\"float32\")\n B = T.match_buffer(b, [64, 32], dtype=\"float32\")\n C = T.match_buffer(c, [64, 32], dtype=\"float32\")\n for i, j in T.grid(64, 32): # type: ignore\n with T.block():\n T.reads([A[i, j], B[i, j]]) # type: ignore\n T.writes([B[i, j], C[i, j]]) # type: ignore\n with T.block(\"B\"):\n T.reads([A[i, j]]) # type: ignore\n T.writes([B[i, j]]) # type: ignore\n B[i, j] = A[i, j] # type: ignore\n with T.block(\"C\"):\n T.reads([B[i, j]]) # type: ignore\n T.writes([C[i, j]]) # type: ignore\n C[i, j] = B[i, j] # type: ignore\n\n # pylint: enable=all\n\n def _create_schedule():\n return tir.Schedule(func, debug_mask=\"all\")\n\n extractor = ms.feature_extractor.PerStoreFeature()\n (feature,) = extractor.extract_from(\n _make_context(tvm.target.Target(\"llvm\")),\n candidates=[_make_candidate(_create_schedule)],\n )\n feature = feature.numpy()\n assert feature.shape == (2, N_FEATURES)\n ## Features for BufferStore(B)\n f = feature[0]\n # Group 1.1: arith\n assert_allclose(\n actual=f[0:16],\n # fmt: off\n desired=[0.0] * 16,\n # fmt: on\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.2: vectorize\n assert_allclose(\n actual=f[16:27],\n desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.3: unroll\n assert_allclose(\n actual=f[27:38],\n desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.4: parallel\n assert_allclose(\n actual=f[38:49],\n desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.5: is_gpu, blockIdx.x/y/z, threadIdx.x/y/z, vthread\n assert_allclose(\n actual=f[49:57],\n desired=[0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.1: Buffer A\n assert_allclose(\n actual=f[57:75],\n desired=[\n 1.0,\n 0.0,\n 0.0,\n 13.000176429748535,\n 13.000176429748535,\n 7.011227130889893,\n 7.011227130889893,\n 0.0,\n 0.0,\n 1.0,\n 0.0,\n 0.0,\n 0.0,\n 14.00008773803711,\n 14.00008773803711,\n 8.005624771118164,\n 8.005624771118164,\n 1.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.2: Buffer B\n assert_allclose(\n actual=f[75:93],\n desired=[\n 0.0,\n 1.0,\n 0.0,\n 13.000176429748535,\n 13.000176429748535,\n 7.011227130889893,\n 7.011227130889893,\n 0.0,\n 0.0,\n 1.0,\n 0.0,\n 0.0,\n 0.0,\n 14.00008773803711,\n 14.00008773803711,\n 8.005624771118164,\n 8.005624771118164,\n 1.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.3: Dummy padding\n assert_allclose(\n actual=f[93:111],\n desired=[0.0] * 18,\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.4: Dummy padding\n assert_allclose(\n actual=f[111:129],\n desired=[0.0] * 18,\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.5: Dummy padding\n assert_allclose(\n actual=f[129:147],\n desired=[0.0] * 18,\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 3: Arithmetic intensity\n assert_allclose(\n actual=f[147:157],\n desired=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 4 & 5\n assert_allclose(\n actual=f[157:164],\n desired=[\n 13.000176,\n 11.000703811645508,\n 1.0,\n 11.000703811645508,\n 11.000703811645508,\n 1.5849624872207642,\n 0.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n ## Features for BufferStore(C)\n f = feature[1]\n # Group 1.1: arith\n assert_allclose(\n actual=f[0:16],\n # fmt: off\n desired=[0.0] * 16,\n # fmt: on\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.2: vectorize\n assert_allclose(\n actual=f[16:27],\n desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.3: unroll\n assert_allclose(\n actual=f[27:38],\n desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.4: parallel\n assert_allclose(\n actual=f[38:49],\n desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.5: is_gpu, blockIdx.x/y/z, threadIdx.x/y/z, vthread\n assert_allclose(\n actual=f[49:57],\n desired=[0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.1: Buffer B\n assert_allclose(\n actual=f[57:75],\n desired=[\n 1.0,\n 0.0,\n 0.0,\n 13.000176429748535,\n 13.000176429748535,\n 7.011227130889893,\n 7.011227130889893,\n 0.0,\n 1.0,\n 0.0,\n 1.0,\n 4.087462902069092,\n 1.0,\n 13.000176429748535,\n 13.000176429748535,\n 7.011227130889893,\n 7.011227130889893,\n 1.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.2: Buffer C\n assert_allclose(\n actual=f[75:93],\n desired=[\n 0.0,\n 1.0,\n 0.0,\n 13.000176429748535,\n 13.000176429748535,\n 7.011227130889893,\n 7.011227130889893,\n 0.0,\n 0.0,\n 1.0,\n 0.0,\n 0.0,\n 0.0,\n 14.00008773803711,\n 14.00008773803711,\n 8.005624771118164,\n 8.005624771118164,\n 1.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.3: Dummy padding\n assert_allclose(\n actual=f[93:111],\n desired=[0.0] * 18,\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.4: Dummy padding\n assert_allclose(\n actual=f[111:129],\n desired=[0.0] * 18,\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.5: Dummy padding\n assert_allclose(\n actual=f[129:147],\n desired=[0.0] * 18,\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 3: Arithmetic intensity\n assert_allclose(\n actual=f[147:157],\n desired=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 4 & 5\n assert_allclose(\n actual=f[157:164],\n desired=[\n 13.000176429748535,\n 11.000703811645508,\n 1.0,\n 11.000703811645508,\n 11.000703811645508,\n 1.5849624872207642,\n 0.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n\n\ndef test_gpu():\n def _create_schedule():\n func = matmul\n sch = tir.Schedule(func, debug_mask=\"all\")\n c = sch.get_block(\"C\")\n c_local = sch.cache_write(c, 0, \"local\")\n i, j, k = sch.get_loops(c)\n # pylint: disable=invalid-name\n i0, i1, i2, i3, i4 = sch.split(i, factors=[None, 1, 16, 32, 1]) # outer: 1\n j0, j1, j2, j3, j4 = sch.split(j, factors=[None, 4, 1, 1, 16]) # outer: 8\n k0, k1, k2 = sch.split(k, factors=[None, 1, 2]) # outer: 256\n # pylint: enable=invalid-name\n # fmt: off\n sch.reorder(\n i0, j0, # S\n i1, j1, # S\n i2, j2, # S\n k0, # R\n k1, # R\n i3, j3, # S\n k2, # R\n i4, j4, # S\n )\n # fmt: on\n # thread binding\n i0_j0 = sch.fuse(i0, j0)\n i1_j1 = sch.fuse(i1, j1)\n i2_j2 = sch.fuse(i2, j2)\n sch.bind(i0_j0, \"blockIdx.x\")\n sch.bind(i1_j1, \"vthread.x\")\n sch.bind(i2_j2, \"threadIdx.x\")\n # fusion\n sch.reverse_compute_at(c_local, i2_j2)\n # cache read 'A'\n a_shared = sch.cache_read(c, 1, \"shared\")\n sch.compute_at(a_shared, k0)\n _, _, _, _, a_i, a_j = sch.get_loops(a_shared)\n a_ij = sch.fuse(a_i, a_j)\n _, a_j = sch.split(a_ij, factors=[None, 16]) # outer: 64\n sch.bind(a_j, \"threadIdx.x\")\n # cache read 'B'\n b_shared = sch.cache_read(c, 2, \"shared\")\n sch.compute_at(b_shared, k0)\n _, _, _, _, b_i, b_j = sch.get_loops(b_shared)\n b_ij = sch.fuse(b_i, b_j)\n _, b_j = sch.split(b_ij, factors=[None, 16]) # outer: 8\n sch.bind(b_j, \"threadIdx.x\")\n # auto unroll\n sch.annotate(i0_j0, \"pragma_auto_unroll_max_step\", tir.IntImm(\"int32\", 1024))\n sch.annotate(i0_j0, \"pragma_unroll_explicit\", tir.IntImm(\"int32\", 1))\n return sch\n\n extractor = ms.feature_extractor.PerStoreFeature()\n (feature,) = extractor.extract_from(\n _make_context(tvm.target.Target(\"cuda\")),\n candidates=[_make_candidate(_create_schedule)],\n )\n feature = feature.numpy()\n assert feature.shape == (4, N_FEATURES)\n ### Check feature[0]: BufferStore(A_shared) <= A[...]\n f = feature[0]\n # Group 1.1: arith\n assert_allclose(\n actual=f[0:16],\n desired=[\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 24.000000085991324,\n 24.000000085991324,\n 24.000000085991324,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.2: vectorize\n assert_allclose(\n actual=f[16:27],\n desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.3: unroll\n assert_allclose(\n actual=f[27:38],\n desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.4: parallel\n assert_allclose(\n actual=f[38:49],\n desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.5: is_gpu, blockIdx.x/y/z, threadIdx.x/y/z, vthread\n assert_allclose(\n actual=f[49:57],\n desired=[1.0, 3.169925001442312, 1.0, 1.0, 4.087462841250339, 1.0, 1.0, 2.321928094887362],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.1: Buffer A\n assert_allclose(\n actual=f[57:75],\n desired=[\n 1.0,\n 0.0,\n 0.0,\n 25.000000042995662,\n 20.000001375860553,\n 23.00000017198264,\n 14.000088052430122,\n 1.0,\n 0.0,\n 0.0,\n 18.00000550343433,\n 20.00562591970089,\n 2.321928094887362,\n 23.00000017198264,\n 18.00000550343433,\n 21.000000687930438,\n 12.0003521774803,\n 12.0003521774803,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.2: Buffer A.shared\n assert_allclose(\n actual=f[75:93],\n desired=[\n 0.0,\n 1.0,\n 0.0,\n 25.000000042995662,\n 12.0003521774803,\n 23.00000017198264,\n 9.002815015607053,\n 1.0,\n 0.0,\n 0.0,\n 6.022367813028454,\n 11.98049663618346,\n 8.005624549193879,\n 17.000011006847668,\n 4.087462841250339,\n 15.000044026886828,\n 1.584962500721156,\n 4.087462841250339,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.3: Dummy padding\n assert_allclose(\n actual=f[93:111],\n desired=[\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.4: Dummy padding\n assert_allclose(\n actual=f[111:129],\n desired=[\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.5: Dummy padding\n assert_allclose(\n actual=f[129:147],\n desired=[\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 3: Arithmetic intensity\n assert_allclose(\n actual=f[147:157],\n desired=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 4 & 5\n assert_allclose(\n actual=f[157:164],\n desired=[\n 12.0003521774803,\n 27.000000010748916,\n 17.000011006847668,\n 6.022367813028454,\n 23.00000017198264,\n 2.584962500721156,\n 10.001408,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n ### Check feature[1]: BufferStore(B_shared) <= B[...]\n f = feature[1]\n # Group 1.1: arith\n assert_allclose(\n actual=f[0:16],\n desired=[\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 21.584962959341485,\n 21.584962959341485,\n 21.000000687930438,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.2: vectorize\n assert_allclose(\n actual=f[16:27],\n desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.3: unroll\n assert_allclose(\n actual=f[27:38],\n desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.4: parallel\n assert_allclose(\n actual=f[38:49],\n desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.5: is_gpu, blockIdx.x/y/z, threadIdx.x/y/z, vthread\n assert_allclose(\n actual=f[49:57],\n desired=[1.0, 3.169925001442312, 1.0, 1.0, 4.087462841250339, 1.0, 1.0, 2.321928094887362],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.1: Buffer B\n assert_allclose(\n actual=f[57:75],\n desired=[\n 1.0,\n 0.0,\n 0.0,\n 22.00000034396526,\n 20.000001375860553,\n 20.000001375860553,\n 14.000088052430122,\n 1.0,\n 0.0,\n 0.0,\n 15.000044026886828,\n 20.17555076886471,\n 2.321928094887362,\n 20.000001375860553,\n 18.00000550343433,\n 18.00000550343433,\n 12.0003521774803,\n 4.087462841250339,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.2: Buffer B.shared\n assert_allclose(\n actual=f[75:93],\n desired=[\n 0.0,\n 1.0,\n 0.0,\n 22.00000034396526,\n 9.002815015607053,\n 20.000001375860553,\n 3.169925001442312,\n 1.0,\n 0.0,\n 0.0,\n 3.169925001442312,\n 9.61654884377899,\n 8.005624549193879,\n 14.000088052430122,\n 1.584962500721156,\n 12.0003521774803,\n 0.044394119358453436,\n 4.087462841250339,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.3: Dummy padding\n assert_allclose(\n actual=f[93:111],\n desired=[\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.4: Dummy padding\n assert_allclose(\n actual=f[111:129],\n desired=[\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.5: Dummy padding\n assert_allclose(\n actual=f[129:147],\n desired=[\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 3: Arithmetic intensity\n assert_allclose(\n actual=f[147:157],\n desired=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 4 & 5\n assert_allclose(\n actual=f[157:164],\n desired=[\n 9.002815015607053,\n 24.000000085991324,\n 17.000011006847668,\n 3.169925001442312,\n 20.000001375860553,\n 2.584962500721156,\n 10.001408,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n ### Check feature[2]: BufferStore(C_local) <= C_local[...] + A_shared[...] * B_shared[...]\n f = feature[2]\n # Group 1.1: arith\n assert_allclose(\n actual=f[0:16],\n desired=[\n 0.0,\n 27.000000010748916,\n 27.000000010748916,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 28.000000005374456,\n 28.000000005374456,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.2: vectorize\n assert_allclose(\n actual=f[16:27],\n desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.3: unroll\n assert_allclose(\n actual=f[27:38],\n desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.4: parallel\n assert_allclose(\n actual=f[38:49],\n desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.5: is_gpu, blockIdx.x/y/z, threadIdx.x/y/z, vthread\n assert_allclose(\n actual=f[49:57],\n desired=[1.0, 3.169925001442312, 1.0, 1.0, 4.087462841250339, 1.0, 1.0, 2.321928094887362],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.1: Buffer B.shared\n assert_allclose(\n actual=f[57:75],\n desired=[\n 1.0,\n 0.0,\n 0.0,\n 29.00000000268723,\n 9.002815015607053,\n 23.00000017198264,\n 3.169925001442312,\n 1.0,\n 0.0,\n 0.0,\n 5.044394119358453,\n 7.651051691178929,\n 5.044394119358453,\n 24.000000085991324,\n 4.087462841250339,\n 18.00000550343433,\n 0.32192809488736235,\n 1.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.2: Buffer C.local\n assert_allclose(\n actual=f[75:93],\n desired=[\n 0.0,\n 0.0,\n 1.0,\n 29.00000000268723,\n 11.000704269011246,\n 23.00000017198264,\n 5.044394119358453,\n 1.0,\n 0.0,\n 0.0,\n 4.087462841250339,\n 7.05528243550119,\n 1.584962500721156,\n 28.000000005374456,\n 10.001408194392809,\n 22.00000034396526,\n 4.087462841250339,\n 1.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.3: Buffer A.shared\n assert_allclose(\n actual=f[93:111],\n desired=[\n 1.0,\n 0.0,\n 0.0,\n 29.00000000268723,\n 12.0003521774803,\n 19.00000275171979,\n 9.002815015607053,\n 1.0,\n 0.0,\n 0.0,\n 1.0,\n 3.700439718141092,\n 4.087462841250339,\n 25.000000042995662,\n 8.005624549193879,\n 15.000044026886828,\n 5.044394119358453,\n 0.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.4: Dummy padding\n assert_allclose(\n actual=f[111:129],\n desired=[\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.5: Dummy padding\n assert_allclose(\n actual=f[129:147],\n desired=[\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 3: Arithmetic intensity\n assert_allclose(\n actual=f[147:157],\n desired=[\n 0.7097842504665767,\n 0.7548801745187567,\n 0.8775907547541741,\n 0.9957389916154509,\n 1.2446737395193135,\n 1.493608487423176,\n 1.7093103019954263,\n 1.8031580276850985,\n 1.9841832691827785,\n 2.204648076869754,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 4 & 5\n assert_allclose(\n actual=f[157:164],\n desired=[\n 11.000704269011246,\n 18.00000550343433,\n 9.002815015607053,\n 18.00000550343433,\n 27.000000010748916,\n 3.0,\n 10.001408,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n ### Check feature[3]: BufferStore(C) <= C_local[...]\n f = feature[3]\n # Group 1.1: arith\n assert_allclose(\n actual=f[0:16],\n desired=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.2: vectorize\n assert_allclose(\n actual=f[16:27],\n desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.3: unroll\n assert_allclose(\n actual=f[27:38],\n desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.4: parallel\n assert_allclose(\n actual=f[38:49],\n desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 1.5: is_gpu, blockIdx.x/y/z, threadIdx.x/y/z, vthread\n assert_allclose(\n actual=f[49:57],\n desired=[1.0, 3.169925001442312, 1.0, 1.0, 4.087462841250339, 1.0, 1.0, 2.321928094887362],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.1: Buffer C\n assert_allclose(\n actual=f[57:75],\n desired=[\n 0.0,\n 1.0,\n 0.0,\n 20.000001375860553,\n 20.000001375860553,\n 14.000088052430122,\n 14.000088052430122,\n 0.0,\n 0.0,\n 1.0,\n 0.0,\n 0.0,\n 0.0,\n 21.000000687930438,\n 21.000000687930438,\n 15.000044026886828,\n 15.000044026886828,\n 1.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.2: Buffer C.local\n assert_allclose(\n actual=f[75:93],\n desired=[\n 1.0,\n 0.0,\n 0.0,\n 20.000001375860553,\n 11.000704269011246,\n 14.000088052430122,\n 5.044394119358453,\n 1.0,\n 0.0,\n 0.0,\n 9.002815015607053,\n 12.0003521774803,\n 4.087462841250339,\n 16.00002201361136,\n 7.011227255423254,\n 10.001408194392809,\n 1.584962500721156,\n 1.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.3: Dummy padding\n assert_allclose(\n actual=f[93:111],\n desired=[\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.4: Dummy padding\n assert_allclose(\n actual=f[111:129],\n desired=[\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 2.5: Dummy padding\n assert_allclose(\n actual=f[129:147],\n desired=[\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 3: Arithmetic intensity\n assert_allclose(\n actual=f[147:157],\n desired=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n rtol=1e-5,\n atol=1e-5,\n )\n # Group 4 & 5\n assert_allclose(\n actual=f[157:164],\n desired=[\n 20.000001375860553,\n 18.00000550343433,\n 1.0,\n 18.00000550343433,\n 18.00000550343433,\n 2.584962500721156,\n 10.001408,\n ],\n rtol=1e-5,\n atol=1e-5,\n )\n\n\ndef test_cpu_layout_transform():\n extractor = ms.feature_extractor.PerStoreFeature()\n (feature,) = extractor.extract_from(\n _make_context(tvm.target.Target(\"llvm\")),\n candidates=[_make_candidate(lambda: tir.Schedule(LayoutTransform))],\n )\n\n\nif __name__ == \"__main__\":\n sys.exit(pytest.main([__file__] + sys.argv[1:]))\n"
] | [
[
"numpy.array"
],
[
"numpy.concatenate"
],
[
"numpy.expand_dims",
"numpy.asarray",
"numpy.squeeze",
"numpy.median",
"numpy.std",
"numpy.mean",
"numpy.transpose",
"numpy.argsort",
"numpy.array",
"scipy.special.softmax"
],
[
"numpy.random.uniform",
"numpy.random.random",
"numpy.iinfo",
"numpy.random.seed"
],
[
"numpy.zeros"
],
[
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"numpy.random.seed"
],
[
"numpy.matmul"
],
[
"numpy.random.uniform",
"numpy.zeros",
"numpy.prod"
],
[
"numpy.dtype"
],
[
"numpy.log",
"numpy.ones",
"numpy.concatenate",
"numpy.random.randn",
"numpy.random.rand",
"numpy.random.uniform",
"numpy.exp",
"numpy.empty"
],
[
"numpy.array",
"numpy.absolute",
"numpy.dtype"
],
[
"numpy.random.uniform"
],
[
"numpy.max",
"numpy.zeros"
],
[
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"1.5",
"1.2",
"1.7",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
astokely/seekr2 | [
"2fd8496dc885339437678a729b1f97a4b0bf9cfd",
"2fd8496dc885339437678a729b1f97a4b0bf9cfd",
"2fd8496dc885339437678a729b1f97a4b0bf9cfd"
] | [
"seekr2/tests/test_analyze.py",
"seekr2/analyze.py",
"seekr2/modules/elber_base.py"
] | [
"\"\"\"\ntest_analyze.py\n\nTesting analyze.py\n\"\"\"\n\nimport os\nfrom collections import defaultdict\n\nimport numpy as np\n\nimport seekr2.modules.common_analyze as common_analyze\nimport seekr2.modules.mmvt_analyze as mmvt_analyze\nimport seekr2.analyze as analyze\nimport seekr2.modules.common_base as base\nimport seekr2.modules.mmvt_base as mmvt_base\nimport seekr2.tests.smoluchowski_system as smoluchowski\n\nthis_dir = os.path.dirname(os.path.realpath(__file__))\n\ntest_output_filename = os.path.join(this_dir, \"test_analyze_outputfile.txt\")\ntest_statistics_filename = os.path.join(this_dir, \"test_analyze_statistics.txt\")\n\ndef test_read_output_file():\n N_i_j_alpha, R_i_alpha_list, R_i_alpha_average, \\\n R_i_alpha_std_dev, R_i_alpha_total, N_alpha_beta, \\\n T_alpha_list, T_alpha_average, T_alpha_std_dev, \\\n T_alpha_total, existing_lines \\\n = mmvt_analyze.openmm_read_output_file_list(\n [test_output_filename])\n \n N_i_j_alpha_dict1 = N_i_j_alpha\n R_i_alpha_dict1 = R_i_alpha_total\n N_alpha_beta_dict1 = N_alpha_beta\n T_alpha1 = T_alpha_total\n #N_i_j_alpha_dict1, R_i_alpha_dict1, N_alpha_beta_dict1, T_alpha1 = \\\n # analyze.openmm_read_output_file_list([test_output_filename])\n \n N_i_j_alpha_dict2 = {(1, 2): 52, (2, 1): 52}\n R_i_alpha_dict2 = {1: 1658.696, 2: 198.912}\n N_alpha_beta_dict2 = {1: 2423, 2: 98}\n T_alpha2 = 1954.760\n \n for key in N_i_j_alpha_dict1:\n assert key in N_i_j_alpha_dict2\n assert np.isclose(N_i_j_alpha_dict1[key], N_i_j_alpha_dict2[key])\n \n for key in R_i_alpha_dict1:\n assert key in R_i_alpha_dict2\n assert np.isclose(R_i_alpha_dict1[key], R_i_alpha_dict2[key])\n \n for key in N_alpha_beta_dict1:\n assert key in N_alpha_beta_dict2\n assert np.isclose(N_alpha_beta_dict1[key], N_alpha_beta_dict2[key])\n \n assert np.isclose(T_alpha1, T_alpha2)\n \n N_i_j_alpha, R_i_alpha_list, R_i_alpha_average, \\\n R_i_alpha_std_dev, R_i_alpha_total, N_alpha_beta, \\\n T_alpha_list, T_alpha_average, T_alpha_std_dev, \\\n T_alpha_total, existing_lines \\\n = mmvt_analyze.openmm_read_output_file_list([test_output_filename, \n test_output_filename], \n skip_restart_check=True)\n \n N_i_j_alpha_dict1 = N_i_j_alpha\n R_i_alpha_dict1 = R_i_alpha_total\n N_alpha_beta_dict1 = N_alpha_beta\n T_alpha1 = T_alpha_total\n #N_i_j_alpha_dict1, R_i_alpha_dict1, N_alpha_beta_dict1, T_alpha = \\\n # analyze.openmm_read_output_file_list([test_output_filename, \n # test_output_filename])\n \n for key in N_i_j_alpha_dict1:\n assert key in N_i_j_alpha_dict2\n assert np.isclose(N_i_j_alpha_dict1[key], 2*N_i_j_alpha_dict2[key], \n rtol=0.01)\n \n for key in N_alpha_beta_dict1:\n assert key in N_alpha_beta_dict2\n assert np.isclose(N_alpha_beta_dict1[key], 2*N_alpha_beta_dict2[key], \n rtol=0.01)\n \n return\n\ndef test_minor2d():\n A = np.array([[1,2,3],[4,5,6],[7,8,9]])\n B = np.array([[1,3],[7,9]])\n C = np.array([[1,2],[4,5]])\n D = np.array([[2,8],[3,9]])\n assert common_analyze.minor2d(A, 1, 1).all() == B.all()\n assert common_analyze.minor2d(A, 2, 2).all() == C.all()\n assert common_analyze.minor2d(A, 1, 0).all() == D.all()\n return\n \ndef test_minor1d():\n A = np.array([1,2,3])\n B = np.array([1,3])\n C = np.array([2,3])\n D = np.array([1,2])\n assert common_analyze.minor1d(A, 1).all() == B.all()\n assert common_analyze.minor1d(A, 0).all() == C.all()\n assert common_analyze.minor1d(A, 2).all() == D.all()\n return\n\ndef test_pretty_string_value_error():\n mystr = common_analyze.pretty_string_value_error(\n 5.6e-2, 2.0e-3, error_digits=1, use_unicode=False)\n expectedstr = \"5.6 +/- 0.2 * 10^-02\"\n assert(mystr == expectedstr)\n mystr = common_analyze.pretty_string_value_error(\n 5.6e-2, 2.0e-1, error_digits=1, use_unicode=False)\n expectedstr = \"5.6 +/- 20.0 * 10^-02\"\n assert(mystr == expectedstr)\n mystr = common_analyze.pretty_string_value_error(\n 1.23456789e8, 4.5678e5, error_digits=2, use_unicode=False)\n expectedstr = \"1.2346 +/- 0.0046 * 10^+08\"\n assert(mystr == expectedstr)\n\ndef make_fake_output_file_osc(anchor, tmp_path, timestep=1.0):\n num_steps = 50\n \n mmvt_output_filename = os.path.join(\n tmp_path, anchor.name, \"prod\", \n \"%s%d.%s\" % (mmvt_base.OPENMMVT_BASENAME, 1, \n mmvt_base.OPENMMVT_EXTENSION))\n with open(mmvt_output_filename, \"w\") as f:\n if anchor.index == 0:\n for i in range(num_steps+1):\n line = \"%d,%d,%f\\n\" % (1, i, i*timestep)\n f.write(line)\n \n else:\n for i in range(num_steps+1):\n if (i % 2) == 0:\n line = \"%d,%d,%f\\n\" % (2, i, i*timestep)\n f.write(line)\n else:\n line = \"%d,%d,%f\\n\" % (1, i, i*timestep)\n f.write(line)\n return\n\ndef make_fake_output_file2(anchor, tmp_path, ups=1, downs=9, timestep=1.0):\n num_steps = 50\n total = ups + downs\n \n mmvt_output_filename = os.path.join(\n tmp_path, anchor.name, \"prod\", \n \"%s%d.%s\" % (mmvt_base.OPENMMVT_BASENAME, 1, \n mmvt_base.OPENMMVT_EXTENSION))\n with open(mmvt_output_filename, \"w\") as f:\n if anchor.index == 0:\n for i in range(num_steps+1):\n line = \"%d,%d,%f\\n\" % (1, i, i*timestep)\n f.write(line)\n \n else:\n for i in range(num_steps+1):\n if (i % total) < ups:\n line = \"%d,%d,%f\\n\" % (2, i, i*timestep)\n f.write(line)\n else:\n line = \"%d,%d,%f\\n\" % (1, i, i*timestep)\n f.write(line)\n return\n\ndef test_solve_rate_matrix():\n Q = np.array(\n [[-0.5, 0.5, 0.0, 0.0],\n [0.1, -0.3, 0.2, 0.0],\n [0.0, 0.15, -0.3, 0.15],\n [0.0, 0.0, 0.3, -0.4]])\n \n K = np.zeros(Q.shape, dtype=np.longdouble)\n for i in range(Q.shape[0]):\n for j in range(Q.shape[0]):\n if i == j:\n K[i,j] = 0.0\n else:\n K[i,j] = -Q[i,j] / Q[i,i]\n \n for i in range(K.shape[0]-1):\n my_sum = sum(K[i,:])\n for j in range(K.shape[0]):\n K[i,j] = K[i,j] / my_sum\n \n test_times_1 = common_analyze.solve_rate_matrix(Q)\n \n one_vector = np.ones((Q.shape[0]))\n test_times_2 = np.linalg.solve(Q, -one_vector)\n \n error = np.linalg.norm(test_times_2 - test_times_1)\n assert error < 1e-8\n return\n\n\"\"\"\ndef make_smol_calculation(tmp_path, func=None):\n num_anchors = 10\n D = 0.01\n interval = 1.0\n n = 101\n \n intervals = []\n for i in range(num_anchors):\n intervals.append(interval)\n \n if func is None:\n func = smoluchowski.expW_constant\n \n q_s = np.zeros(num_anchors)\n mymodel = smoluchowski.make_smol_model(tmp_path, num_anchors, intervals)\n my_analysis = analyze.Analysis(mymodel)\n elberN_ij = defaultdict(float)\n elberR_i = defaultdict(float)\n smols = []\n for i, anchor in enumerate(mymodel.anchors[:-1]):\n a = interval*i\n b = interval*(i+1)\n smol = smoluchowski.Smoluchowski(a, b, func, n=n, D=D)\n q_s[i] = smol.expWq\n if i == 0:\n smol.reflect_lower = True\n k_backwards, k_forwards, T_alpha, N_backwards, N_forwards, \\\n R_i_backwards, R_i_forwards, N_ij_backwards, N_ij_forwards \\\n = smol.compute_MMVT_kinetics_quantities()\n \n N_i_j_alpha_dict = defaultdict(int)\n R_i_alpha_dict = defaultdict(float)\n N_alpha_beta_dict = defaultdict(int)\n new_time_factor = (R_i_forwards + R_i_backwards) / T_alpha\n new_T_alpha = new_time_factor * T_alpha\n if i == 0:\n N_alpha_beta_dict[1] = new_time_factor\n R_i_alpha_dict[1] = new_T_alpha\n else:\n N_i_j_alpha_dict[(1, 2)] = N_ij_forwards\n N_i_j_alpha_dict[(2, 1)] = N_ij_backwards\n R_i_alpha_dict[1] = R_i_forwards\n R_i_alpha_dict[2] = R_i_backwards\n N_alpha_beta_dict[1] = N_backwards * new_time_factor\n N_alpha_beta_dict[2] = N_forwards * new_time_factor\n \n anchor_stats = mmvt_analyze.MMVT_anchor_statistics(alpha=i)\n anchor_stats.N_i_j_alpha = N_i_j_alpha_dict\n anchor_stats.R_i_alpha_total = R_i_alpha_dict\n anchor_stats.R_i_alpha_std_dev = R_i_alpha_dict\n anchor_stats.R_i_alpha_list = {}\n for key in anchor_stats.R_i_alpha_total:\n anchor_stats.R_i_alpha_list[key] = []\n anchor_stats.N_alpha_beta = N_alpha_beta_dict\n anchor_stats.T_alpha_total = new_T_alpha\n anchor_stats.T_alpha_std_dev = new_T_alpha\n for key in N_alpha_beta_dict:\n anchor_stats.k_alpha_beta[key] = N_alpha_beta_dict[key] \\\n / new_T_alpha\n\n # N_i_j_alpha_dict, R_i_alpha_dict, N_alpha_beta_dict, new_T_alpha, \n # alpha=i)\n # FIll out values here...\n my_analysis.anchor_stats_list.append(anchor_stats)\n smols.append(smol)\n \n for i, anchor in enumerate(mymodel.anchors[:-1]):\n smol1 = smols[i]\n if i == 0:\n smol2 = smols[i+1]\n elberN_ij[(0,1)] = 1.0\n # need to make sure that u and exp(-beta*W) match up\n # on the edge.\n smol1_edge_value = smol1.expWfunc(smol1.b, q=smol1.expWq)\n elberR_i[0] = (smol2.u_q_forward + (1.0/smol1_edge_value)) / (smol2.J_forward)\n elif i == mymodel.num_milestones-1:\n elberN_ij[(mymodel.num_milestones-1,mymodel.num_milestones-2)] = 1.0\n elberR_i[mymodel.num_milestones-1] = (smol1.u_q_backward) / (smol1.J_backward)\n else:\n smol2 = smols[i+1]\n elberN_ij[(i,i+1)] = smol2.J_forward / (smol2.J_forward + smol1.J_backward)\n elberN_ij[(i,i-1)] = smol1.J_backward / (smol2.J_forward + smol1.J_backward)\n elberR_i[i] = (smol2.u_q_forward + smol1.u_q_backward) / (smol2.J_forward + smol1.J_backward)\n \n my_analysis.mmvt_check_anchor_stats()\n \n #my_analyze._calculate_equilibrium_probability()\n #my_analyze._calculate_overall_statistics()\n #my_analysis.extract_data()\n my_analysis.fill_out_data_samples()\n my_analysis.main_data_sample.pi_alpha = np.zeros(mymodel.num_anchors)\n for i, anchor in enumerate(mymodel.anchors[:-1]):\n my_analysis.main_data_sample.pi_alpha[i] = q_s[i] / np.sum(q_s)\n my_analysis.fill_out_data_samples()\n my_analysis.process_data_samples()\n my_analysis.main_data_sample.Q = np.zeros((mymodel.num_milestones, \n mymodel.num_milestones), dtype=np.longdouble)\n elberQ = np.zeros((mymodel.num_milestones, \n mymodel.num_milestones), dtype=np.longdouble)\n for i in range(mymodel.num_milestones):\n for j in range(mymodel.num_milestones):\n if my_analysis.main_data_sample.R_i[i] == 0.0:\n my_analysis.main_data_sample.Q[i,j] = 0.0\n else:\n my_analysis.main_data_sample.Q[i,j] \\\n = my_analysis.main_data_sample.N_ij[i,j] \\\n / my_analysis.main_data_sample.R_i[i]\n if elberR_i[i] > 0.0:\n elberQ[i,j] = elberN_ij[i,j] / elberR_i[i]\n \n for i in range(mymodel.num_milestones):\n my_analysis.main_data_sample.Q[i][i] = \\\n -np.sum(my_analysis.main_data_sample.Q[i])\n elberQ[i][i] = -np.sum(elberQ[i])\n \n #my_analyze._rate_mat_to_prob_mat()\n #print(\"my_analyze.Q:\", my_analyze.Q)\n #print(\"elberQ:\", elberQ)\n #print(\"my_analyze.K:\", my_analyze.K)\n #my_analyze.calculate_kinetics()\n my_analysis.main_data_sample.calculate_kinetics()\n mmvt_time = my_analysis.main_data_sample.MFPTs[(0,\"bulk\")]\n #print(\"mmvt_time:\", mmvt_time)\n my_analysis.main_data_sample.Q = elberQ\n my_analysis.main_data_sample.calculate_kinetics()\n elber_time = my_analysis.main_data_sample.MFPTs[(0,\"bulk\")]\n #print(\"elber_time:\", elber_time)\n \n a1 = 0.0\n b1 = interval\n a2 = interval\n b2 = interval*num_anchors\n smol1 = smoluchowski.Smoluchowski(a1, b1, func, n=n, D=D)\n smol2 = smoluchowski.Smoluchowski(a2, b2, func, n=n, D=D)\n q1 = smol1.expWq\n q2 = smol2.expWq\n k_backwards, k_forwards, T_alpha, N_backwards, N_forwards, R_i_backwards, \\\n R_i_forwards, N_ij_backwards, N_ij_forwards \\\n = smol2.compute_MMVT_kinetics_quantities()\n \n J2 = q2 / (R_i_forwards + R_i_backwards)\n correct_time = R_i_forwards + q1/J2\n #print(\"correct_time:\", correct_time)\n print(\"Time predicted by Elber:\", elber_time, \"Time predicted by MMVT:\", \n mmvt_time, \"Exact time:\", correct_time)\n \n \"\"\n x_s = np.arange(0.0, num_anchors, interval)\n func_vals1 = np.zeros(num_anchors)\n func_vals2 = np.zeros(num_anchors)\n print(\"q_s:\", q_s)\n for i, x in enumerate(x_s):\n print(\"i:\", i, \"my_analyze.pi_alpha[i]:\", my_analyze.pi_alpha[i], \"q_s[i]:\", q_s[i] / np.sum(q_s))\n func_vals1[i] = my_analyze.pi_alpha[i]\n func_vals2[i] = q_s[i] / np.sum(q_s)\n \n plt.plot(x_s, func_vals1, \"g\", x_s, func_vals2, \"r\")\n plt.show()\n \"\"\n return mmvt_time, elber_time, correct_time\n \n\ndef test_smoluchowski_solution_flat_1(tmp_path):\n print(\"Constant PMF:\")\n mmvt_time, elber_time, true_time = make_smol_calculation(tmp_path)\n assert np.isclose(mmvt_time, true_time, rtol=0.001)\n assert np.isclose(elber_time, true_time, rtol=0.001)\n \n print(\"linear PMF:\")\n func = smoluchowski.expW_linear\n mmvt_time, elber_time, true_time = make_smol_calculation(tmp_path, func)\n assert np.isclose(mmvt_time, true_time, rtol=0.001)\n assert np.isclose(elber_time, true_time, rtol=0.001)\n \n print(\"quadratic PMF:\")\n func = smoluchowski.expW_quadratic\n mmvt_time, elber_time, true_time = make_smol_calculation(tmp_path, func)\n assert np.isclose(mmvt_time, true_time, rtol=0.001)\n assert np.isclose(elber_time, true_time, rtol=0.001)\n\"\"\"",
"\"\"\"\nanalyze.py\n\nFunctions and objects for analyzing MMVT simulation outputs\n\"\"\"\n\nimport os\nimport argparse\nimport warnings\nimport glob\nfrom collections import defaultdict\nimport math\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport seekr2.modules.common_base as base\nimport seekr2.modules.common_analyze as common_analyze\nimport seekr2.modules.mmvt_analyze as mmvt_analyze\nimport seekr2.modules.elber_analyze as elber_analyze\nimport seekr2.modules.check as check\n\nclass Analysis:\n \"\"\"\n Compute MMVT thermodynamics and kinetics from model data and \n parameters. Utilizes multiple Data_sample objects to get average\n and uncertainties in kinetics/thermo quantities.\n \n Attributes\n ----------\n model : Model()\n The Model() object that will be used to read transition \n statistics and also provides relevant parameters for \n calculations.\n \n pi_alpha : numpy.array\n The pi_alpha quantity in MMVT theory - it represents the\n relative probability of finding the system within anchor alpha.\n It is an array of floats.\n \n pi_alpha_error : numpy.array\n The uncertainty for each value in pi_alpha.\n \n N_ij : defaultdict\n The N_ij quantity in MMVT theory - It represents an estimate of\n the relative number of transitions starting from milestone i \n and ending in milestone j, regardless of the anchor the system\n is in. In this defaultdict object, the keys are tuples of ints\n (i,j), which represent the milestone indices of the source and\n destination milestones, respectively, and the values are floats\n representing the relative 'counts' of transitions from i to j.\n \n R_i : defaultdict\n The R_i quantity in MMVT theory - it represents an estimate of\n the relative time spent after encountering milestone i, \n regardless of the anchor the system is in. In this defaultdict\n object, the keys are ints i representing the milestone index\n and the values are floats representing time.\n \n T : float\n The T quantity in MMVT theory - it represents the \"total\" time\n spent in simulation. It is merely used as a normalization and\n to cancel units in the theory.\n \n Q : numpy.array\n The rate matrix Q in MMVT theory - most of the important\n quantities in MMVT are computed using Q. Each element in row\n i and column j represents the rate of transition between i and\n j.\n \n K : numpy.array\n The transition matrix K, which appears in MMVT theory as well\n as classical milestoning theory. K represents probabilities of\n transitions (whereas Q represents rates of transitions). Each\n element in row i and column j represent the probability of \n transition between i and j.\n \n p_i : numpy.array\n The probability vector p_i, which appears in MMVT theory as \n well as classical milestoning theory. p_i represents \n the probabilites of finding the system within the vicinity of\n milestone i. Therefore, p_i can be used to obtain thermodynamic\n quantities such as the free energy of milestone i.\n \n p_i_error : numpy.array\n The uncertainty for each value in p_i.\n \n free_energy_profile : numpy.array\n The free energy profile as computed from the Boltzmann \n distribution of p_i.\n \n free_energy_profile_err : numpy.array\n The uncertainty of each value in p_i.\n \n MFPTs : dict\n The mean first passage times (MFPTs) between various end states\n as indicated by the model. The MFPT dict has keys (i,j) where\n i and j are the indices of system end states and the dict has\n values which represent the expectation values of time spent \n transitioning between states i and j.\n \n MFPT_errors : dict\n The errors in the MFPTs values. THe dictionary keys are \n structured in an identical manner as MFPTs.\n \n k_off : float\n The k-off (off-rate) value. That is, the expected rate of\n unbinding from all states indicated as an end state (weighted\n by their relative probabilities defined by p_i.\n \n k_off_error : float\n The error (uncertainty) in the k-off value.\n \n k_ons : dict\n The k-ons predicted to the various end states within the\n system. The keys are the indeces of the end state milestones\n and the values are the k-ons.\n \n k_on_errors : dict\n The errors in the k-on values. The keys are structured in an\n identical manner as the k_ons dictionary.\n \n force_warning : bool\n Whether to bypass certain errors with a mere warning.\n \n num_error_samples : int\n The number of data samples to create for generating uncertainty\n values.\n \"\"\"\n \n def __init__(self, model, force_warning=False, num_error_samples=0):\n \"\"\"\n Creates the Analyze() object, which applies transition \n statistics and times, as well as MMVT theory, to compute \n kinetics and thermodynamics quantities.\n \"\"\"\n \n self.model = model\n self.anchor_stats_list = []\n self.main_data_sample = None\n self.data_sample_list = []\n self.pi_alpha = None\n self.pi_alpha_error = None\n self.p_i = None\n self.p_i_error = None\n self.free_energy_profile = None\n self.free_energy_profile_err = None\n self.MFPTs = {}\n self.MFPTs_error = {}\n self.k_off = None\n self.k_off_error = None\n self.k_ons = {}\n self.k_ons_error = {}\n self.force_warning = force_warning\n self.num_error_samples = num_error_samples\n return\n \n def elber_check_anchor_stats(self, silent=False):\n \"\"\"\n Check the anchor statistics to make sure that enough bounces\n have been observed to perform the analysis\n \"\"\"\n \n anchors_missing_statistics = []\n for i, anchor in enumerate(self.model.anchors):\n if anchor.bulkstate:\n continue\n anchor_stats = self.anchor_stats_list[i]\n existing_alias_ids = []\n existing_alias_transitions = []\n for key in anchor_stats.N_i_j:\n existing_alias_transitions.append(key)\n \n # Hacky!\n existing_alias_transitions.append(2)\n \n for milestone in anchor.milestones:\n found_problem = False\n if milestone.alias_index not in existing_alias_transitions:\n anchors_missing_statistics.append(anchor.index)\n break\n \n if found_problem:\n break\n \n if len(anchors_missing_statistics) > 0:\n if silent:\n return False\n else:\n error_warning_string = \"Anchor(s) {0} are missing sufficient \"\\\n \"statistics. Consider running simulations of anchor(s) {0} \"\\\n \"for longer time scales or readjust anchor locations to \"\\\n \"make transitions more frequent. You may skip this check \"\\\n \"with the --skip_checks (-s) option.\".format(\n anchors_missing_statistics)\n if self.force_warning:\n warnings.warn(error_warning_string)\n else:\n raise common_analyze.MissingStatisticsError(\n error_warning_string)\n \n return True\n \n def mmvt_check_anchor_stats(self, silent=False):\n \"\"\"\n Check the anchor statistics to make sure that enough transitions\n have been observed to perform the analysis\n \"\"\"\n \n anchors_missing_statistics = []\n for i, anchor in enumerate(self.model.anchors):\n if anchor.bulkstate:\n continue\n anchor_stats = self.anchor_stats_list[i]\n existing_alias_ids = []\n existing_alias_transitions = []\n for key in anchor_stats.k_alpha_beta:\n existing_alias_ids.append(key)\n for key in anchor_stats.N_i_j_alpha:\n existing_alias_transitions.append(key)\n \n for milestone in anchor.milestones:\n found_problem = False\n if milestone.alias_index not in existing_alias_ids:\n anchors_missing_statistics.append(anchor.index)\n break\n \n for milestone2 in anchor.milestones:\n if milestone.alias_index == milestone2.alias_index:\n continue\n if (milestone.alias_index, milestone2.alias_index) \\\n not in existing_alias_transitions:\n anchors_missing_statistics.append(anchor.index)\n found_problem = True\n break\n if found_problem:\n break\n \n if len(anchors_missing_statistics) > 0:\n if silent:\n return False\n else:\n error_warning_string = \"Anchor(s) {0} are missing sufficient \"\\\n \"statistics. Consider running simulations of anchor(s) {0} \"\\\n \"for longer time scales or readjust anchor locations to \"\\\n \"make transitions more frequent. You may skip this check \"\\\n \"with the --skip_checks (-s) option.\".format(\n anchors_missing_statistics)\n if self.force_warning:\n warnings.warn(error_warning_string)\n else:\n raise common_analyze.MissingStatisticsError(\n error_warning_string)\n \n return True\n \n def extract_data(self, max_step_list=None, silence_errors=True):\n \"\"\"\n Extract the data from simulations used in this analysis.\n \"\"\"\n \n # If possible, avoid expensive I/O\n files_already_read = False\n if len(self.anchor_stats_list) > 0:\n files_already_read = True\n \n if self.model.openmm_settings is not None:\n timestep = self.model.openmm_settings.langevin_integrator.timestep\n elif self.model.namd_settings is not None:\n timestep = self.model.namd_settings.langevin_integrator.timestep\n else:\n raise Exception(\"No OpenMM or NAMD simulation settings in model.\")\n \n for alpha, anchor in enumerate(self.model.anchors):\n if anchor.bulkstate:\n continue\n if max_step_list is not None:\n max_time = max_step_list[alpha] * timestep\n else:\n max_time = None\n # These contain only alias_id keys, not the true id values\n if not files_already_read:\n if self.model.get_type() == \"mmvt\":\n anchor_stats = mmvt_analyze.MMVT_anchor_statistics(alpha)\n elif self.model.get_type() == \"elber\":\n anchor_stats = elber_analyze.Elber_anchor_statistics(alpha)\n else:\n anchor_stats = self.anchor_stats_list[alpha]\n \n if anchor.md:\n output_file_glob = os.path.join(\n self.model.anchor_rootdir, anchor.directory, \n anchor.production_directory, anchor.md_output_glob)\n \n output_file_list = glob.glob(output_file_glob)\n output_file_list = base.order_files_numerically(\n output_file_list)\n if not silence_errors:\n assert len(output_file_list) > 0, \\\n \"Files not found: %s\" % output_file_glob\n if self.model.openmm_settings is not None:\n anchor_stats.read_output_file_list(\n \"openmm\", output_file_list, max_time, anchor, timestep)\n elif self.model.namd_settings is not None:\n anchor_stats.read_output_file_list(\n \"namd\", output_file_list, max_time, anchor, timestep)\n else:\n raise Exception(\"Both OpenMM and NAMD settings missing. \"\\\n \"One of these must be present in the \"\\\n \"model XML.\")\n else:\n pass \n \n if not files_already_read:\n self.anchor_stats_list.append(anchor_stats)\n \n return\n \n def check_extraction(self, silent=False):\n \"\"\"\n Check whether sufficient and correct anchor statistics can \n be used for analysis.\n \"\"\"\n if self.model.get_type() == \"mmvt\":\n result = self.mmvt_check_anchor_stats(silent)\n if self.model.get_type() == \"elber\":\n result = self.elber_check_anchor_stats(silent)\n return result\n \n def fill_out_data_samples_mmvt(self):\n \"\"\"\n Now that the statistics for each anchor have been extracted\n from the output files, construct the global transition\n statistics objects. Applies to systems using MMVT milestoning.\n \"\"\"\n N_alpha_beta = defaultdict(int)\n k_alpha_beta = defaultdict(float)\n N_i_j_alpha = []\n R_i_alpha_total = []\n R_i_alpha_average = []\n R_i_alpha_std_dev = []\n R_i_alpha_count = []\n T_alpha_total = []\n T_alpha_average = []\n T_alpha_std_dev = []\n T_alpha_count = []\n for alpha, anchor1 in enumerate(self.model.anchors):\n if anchor1.bulkstate:\n continue\n anchor_N_alpha_beta = self.anchor_stats_list[alpha].N_alpha_beta\n anchor_k_alpha_beta = self.anchor_stats_list[alpha].k_alpha_beta\n for beta, anchor2 in enumerate(self.model.anchors):\n if anchor2.bulkstate:\n continue\n if alpha == beta:\n continue\n id_alias = anchor1.alias_from_neighbor_id(anchor2.index)\n if id_alias is None:\n continue\n if id_alias in anchor_N_alpha_beta:\n N_alpha_beta[(alpha, beta)] = anchor_N_alpha_beta[id_alias]\n k_alpha_beta[(alpha, beta)] = anchor_k_alpha_beta[id_alias]\n else:\n N_alpha_beta[(alpha, beta)] = 0\n k_alpha_beta[(alpha, beta)] = 0.0\n \n anchor_N_i_j_alpha = self.anchor_stats_list[alpha].N_i_j_alpha\n N_i_j_alpha_element = defaultdict(int)\n for key in anchor_N_i_j_alpha:\n (alias_id_i, alias_id_j) = key\n id_i = anchor1.id_from_alias(alias_id_i)\n id_j = anchor1.id_from_alias(alias_id_j)\n new_key = (id_i, id_j)\n N_i_j_alpha_element[new_key] = anchor_N_i_j_alpha[key]\n N_i_j_alpha.append(N_i_j_alpha_element)\n \n anchor_R_i_alpha = self.anchor_stats_list[alpha].R_i_alpha_total\n anchor_R_i_alpha_std = self.anchor_stats_list[alpha].R_i_alpha_std_dev\n anchor_R_i_alpha_list = self.anchor_stats_list[alpha].R_i_alpha_list\n R_i_alpha_element = defaultdict(float)\n R_i_alpha_count_element = defaultdict(int)\n R_i_alpha_std_element = defaultdict(float)\n for key in anchor_R_i_alpha:\n alias_id_i = key\n id_i = anchor1.id_from_alias(alias_id_i)\n R_i_alpha_element[id_i] = anchor_R_i_alpha[key]\n R_i_alpha_std_element[id_i] = anchor_R_i_alpha_std[key]\n R_i_alpha_count_element[id_i] = len(anchor_R_i_alpha_list[key])\n \n R_i_alpha_total.append(R_i_alpha_element)\n R_i_alpha_std_dev.append(R_i_alpha_std_element)\n R_i_alpha_count.append(R_i_alpha_count_element) \n anchor_T_alpha = self.anchor_stats_list[alpha].T_alpha_total\n anchor_T_alpha_std = self.anchor_stats_list[alpha].T_alpha_std_dev\n anchor_T_alpha_list = self.anchor_stats_list[alpha].T_alpha_list\n T_alpha_total.append(anchor_T_alpha)\n T_alpha_std_dev.append(anchor_T_alpha_std)\n T_alpha_count.append(len(anchor_T_alpha_list))\n \n self.main_data_sample = mmvt_analyze.MMVT_data_sample(\n self.model, N_alpha_beta, k_alpha_beta, N_i_j_alpha, \n R_i_alpha_total, T_alpha_total)\n \n for i in range(self.num_error_samples):\n sampled_k_alpha_beta, sampled_N_i_j_alpha, \\\n sampled_R_i_alpha_total, sampled_T_alpha_total \\\n = self.resample_k_N_R_T(\n N_alpha_beta, N_i_j_alpha, R_i_alpha_total,\n R_i_alpha_average, R_i_alpha_std_dev, R_i_alpha_count,\n T_alpha_total, T_alpha_average, T_alpha_std_dev, \n T_alpha_count)\n data_sample = mmvt_analyze.MMVT_data_sample(\n self.model, N_alpha_beta, sampled_k_alpha_beta, \n sampled_N_i_j_alpha, sampled_R_i_alpha_total, \n sampled_T_alpha_total)\n self.data_sample_list.append(data_sample)\n return\n\n def process_data_samples_mmvt(self, pre_equilibrium_approx=False):\n \"\"\"\n Since the global, system-side statistics have been gathered, \n compute the thermodynamic and kinetic quantities and their\n uncertainties. Applies to systems using MMVT milestoning.\n \"\"\"\n self.main_data_sample.calculate_pi_alpha()\n self.main_data_sample.fill_out_data_quantities()\n self.main_data_sample.compute_rate_matrix()\n self.main_data_sample.calculate_thermodynamics()\n self.main_data_sample.calculate_kinetics(pre_equilibrium_approx)\n # do data_sample_list here\n \n k_offs = []\n p_i_list = []\n pi_alpha_list = []\n free_energy_profile_list = []\n MFPTs_list = defaultdict(list)\n k_ons_list = defaultdict(list)\n for i in range(self.num_error_samples):\n data_sample = self.data_sample_list[i]\n data_sample.calculate_pi_alpha()\n data_sample.fill_out_data_quantities()\n data_sample.compute_rate_matrix()\n data_sample.calculate_thermodynamics()\n data_sample.calculate_kinetics(pre_equilibrium_approx, \n bd_sample_from_normal=True)\n k_offs.append(data_sample.k_off)\n p_i_list.append(data_sample.p_i)\n pi_alpha_list.append(data_sample.pi_alpha)\n free_energy_profile_list.append(data_sample.free_energy_profile)\n for key in data_sample.MFPTs:\n MFPTs_list[key].append(data_sample.MFPTs[key])\n for key in data_sample.k_ons:\n k_ons_list[key].append(data_sample.k_ons[key])\n \n pi_alpha_error = np.zeros(self.main_data_sample.pi_alpha.shape[0])\n p_i_error = np.zeros(self.main_data_sample.p_i.shape)\n free_energy_profile_err = np.zeros(\n self.main_data_sample.free_energy_profile.shape)\n k_off_error = None\n MFPTs_error = {}\n k_ons_error = {}\n if len(k_offs) > 0:\n k_off_error = np.std(k_offs)\n for i in range(pi_alpha_error.shape[0]):\n pi_alpha_val_list = []\n for j in range(len(pi_alpha_list)):\n pi_alpha_val_list.append(pi_alpha_list[j][i])\n pi_alpha_error[i] = np.std(pi_alpha_val_list)\n \n for i in range(p_i_error.shape[0]):\n p_i_val_list = []\n for j in range(len(p_i_list)):\n p_i_val_list.append(p_i_list[j][i])\n p_i_error[i] = np.std(p_i_val_list)\n \n for i in range(free_energy_profile_err.shape[0]):\n free_energy_profile_val_list = []\n for j in range(len(free_energy_profile_list)):\n free_energy_profile_val_list.append(free_energy_profile_list[j][i])\n free_energy_profile_err[i] = np.std(free_energy_profile_val_list)\n \n for key in self.main_data_sample.MFPTs:\n MFPTs_error[key] = np.std(MFPTs_list[key])\n \n for key in self.main_data_sample.k_ons:\n k_ons_error[key] = np.std(k_ons_list[key])\n \n self.pi_alpha = self.main_data_sample.pi_alpha\n self.pi_alpha_error = pi_alpha_error\n self.p_i = self.main_data_sample.p_i\n self.p_i_error = p_i_error\n self.free_energy_profile = self.main_data_sample.free_energy_profile\n self.free_energy_profile_err = free_energy_profile_err\n self.MFPTs = self.main_data_sample.MFPTs\n self.MFPTs_error = MFPTs_error\n self.k_off = self.main_data_sample.k_off\n self.k_off_error = k_off_error\n self.k_ons = self.main_data_sample.k_ons\n self.k_ons_error = k_ons_error\n return\n \n def fill_out_data_samples_elber(self):\n \"\"\"\n Now that the statistics for each anchor have been extracted\n from the output files, construct the global transition\n statistics objects. Applies to systems using Elber milestoning.\n \"\"\"\n N_i_j_list = []\n R_i_total = []\n R_i_average = []\n R_i_std_dev = []\n R_i_count = []\n bulkstate = None\n for i, anchor1 in enumerate(self.model.anchors):\n if anchor1.bulkstate:\n bulkstate = i\n continue\n \n anchor_N_i_j = self.anchor_stats_list[i].N_i_j\n N_i_j_element = defaultdict(int)\n for key in anchor_N_i_j:\n alias_id_j = key\n id_j = anchor1.id_from_alias(alias_id_j)\n new_key = (i, id_j)\n N_i_j_element[new_key] = anchor_N_i_j[key]\n N_i_j_list.append(N_i_j_element)\n \n anchor_R_i = self.anchor_stats_list[i].R_i_total\n anchor_R_i_std = self.anchor_stats_list[i].R_i_std_dev\n anchor_R_i_list = self.anchor_stats_list[i].R_i_list\n R_i_element = defaultdict(float)\n R_i_count_element = defaultdict(int)\n R_i_std_element = defaultdict(float)\n \n \n R_i_element[i] = anchor_R_i\n R_i_std_element[i] = anchor_R_i_std\n R_i_count_element[i] = len(anchor_R_i_list)\n \n R_i_total.append(R_i_element)\n R_i_std_dev.append(R_i_std_element)\n R_i_count.append(R_i_count_element) \n \n self.main_data_sample = elber_analyze.Elber_data_sample(\n self.model, N_i_j_list, R_i_total)\n self.main_data_sample.fill_out_data_quantities()\n error_sample = elber_analyze.Elber_data_sample(\n self.model, N_i_j_list, R_i_total)\n error_sample.fill_out_data_quantities()\n self.data_sample_list.append(error_sample)\n return\n \n \n def process_data_samples_elber(self, pre_equilibrium_approx=False):\n \"\"\"\n Since the global, system-side statistics have been gathered, \n compute the thermodynamic and kinetic quantities and their\n uncertainties. Applies to systems using Elber milestoning.\n \"\"\"\n self.main_data_sample.compute_rate_matrix()\n #self.main_data_sample.Q = common_analyze.minor2d(\n # self.main_data_sample.Q, bulkstate, bulkstate)\n #self.main_data_sample.K = common_analyze.minor2d(\n # self.main_data_sample.K, bulkstate, bulkstate)\n self.main_data_sample.calculate_thermodynamics()\n self.main_data_sample.calculate_kinetics(pre_equilibrium_approx)\n error_sample = self.data_sample_list[0]\n error_sample.compute_rate_matrix()\n #self.main_data_sample.Q = common_analyze.minor2d(\n # self.main_data_sample.Q, bulkstate, bulkstate)\n #self.main_data_sample.K = common_analyze.minor2d(\n # self.main_data_sample.K, bulkstate, bulkstate)\n error_sample.calculate_thermodynamics()\n error_sample.calculate_kinetics(pre_equilibrium_approx, \n bd_sample_from_normal=True)\n p_i_error, free_energy_profile_err, MFPTs_error, k_off_error, \\\n k_ons_error = error_sample.monte_carlo_milestoning_error(\n num=self.num_error_samples,\n pre_equilibrium_approx=pre_equilibrium_approx)\n \n self.p_i = self.main_data_sample.p_i\n self.p_i_error = p_i_error\n self.free_energy_profile = self.main_data_sample.free_energy_profile\n self.free_energy_profile_err = free_energy_profile_err\n self.MFPTs = self.main_data_sample.MFPTs\n self.MFPTs_error = MFPTs_error\n self.k_off = self.main_data_sample.k_off\n self.k_off_error = k_off_error\n self.k_ons = self.main_data_sample.k_ons\n self.k_ons_error = k_ons_error\n return\n \n def fill_out_data_samples(self):\n \"\"\"\n Based on the type of milestoning, construct the data samples\n and fill out their statistics.\n \"\"\"\n if self.model.get_type() == \"mmvt\":\n self.fill_out_data_samples_mmvt()\n elif self.model.get_type() == \"elber\":\n self.fill_out_data_samples_elber()\n return\n \n def process_data_samples(self, pre_equilibrium_approx=False):\n \"\"\"\n Based on the type of milestoning, use the data samples to \n compute thermo and kinetics quantities and their uncertainties.\n \"\"\"\n if self.model.get_type() == \"mmvt\":\n self.process_data_samples_mmvt(pre_equilibrium_approx)\n elif self.model.get_type() == \"elber\":\n self.process_data_samples_elber(pre_equilibrium_approx)\n return\n \n def resample_k_N_R_T(self, N_alpha_beta, N_i_j_alpha, R_i_alpha_total,\n R_i_alpha_average, R_i_alpha_std_dev, R_i_alpha_count,\n T_alpha_total, T_alpha_average, T_alpha_std_dev, \n T_alpha_count):\n \"\"\"\n Create data samples from a distribution for computing the\n uncertainties of the thermo and kinetics.\n \"\"\"\n sampled_k_alpha_beta = {}\n sampled_T_alpha_total = []\n sampled_R_i_alpha_total = []\n for alpha, anchor in enumerate(self.model.anchors):\n if anchor.bulkstate:\n continue\n element_R_i_alpha_total = {}\n for key in R_i_alpha_total[alpha]:\n n_R_i = R_i_alpha_count[alpha][key]\n if n_R_i != 0:\n R_i_total_std_dev = R_i_alpha_std_dev[alpha][key] * np.sqrt(n_R_i)\n R_fluctuation = np.random.normal(scale=R_i_total_std_dev)\n else:\n R_fluctuation = 0.0\n element_R_i_alpha_total[key] = abs(R_i_alpha_total[alpha][key] + R_fluctuation)\n \n n_T = T_alpha_count[alpha]\n if n_T != 0:\n T_total_std_dev = T_alpha_std_dev[alpha] * np.sqrt(n_T)\n T_fluctuation = np.random.normal(scale=T_total_std_dev)\n else:\n T_fluctuation = 0.0\n element_T_alpha_total = abs(T_alpha_total[alpha] + T_fluctuation)\n # This way preserves T = sum of R_i\n sampled_T_alpha_total.append(np.sum(list(element_R_i_alpha_total.values())))\n # In contrast, this way samples T and R_i independently\n #sampled_T_alpha_total.append(element_T_alpha_total)\n sampled_R_i_alpha_total.append(element_R_i_alpha_total)\n \n for beta, anchor2 in enumerate(self.model.anchors):\n key = (alpha, beta)\n sampled_k_alpha_beta[key] = N_alpha_beta[key] / sampled_T_alpha_total[alpha]\n \n sampled_N_alpha_beta = N_alpha_beta\n sampled_N_i_j_alpha = N_i_j_alpha\n \n return sampled_k_alpha_beta, sampled_N_i_j_alpha, \\\n sampled_R_i_alpha_total, sampled_T_alpha_total\n \n def print_results(self):\n \"\"\"Print all results of the analysis calculation.\"\"\"\n print(\"Printing results from MMVT SEEKR calculation\")\n print(\"k_off (1/s):\", common_analyze.pretty_string_value_error(\n self.k_off, self.k_off_error))\n print(\"k_ons :\")\n for key in self.k_ons:\n k_on = float(self.k_ons[key])\n diss_constant = self.k_off / k_on\n delta_G = common_analyze.GAS_CONSTANT*self.model.temperature\\\n *math.log(diss_constant)\n if key in self.k_ons_error:\n k_on_err = float(self.k_ons_error[key])\n print(\" k_on (1/s * 1/M) to state\", key, \":\", \n common_analyze.pretty_string_value_error(k_on, k_on_err))\n diss_constant_err = diss_constant * common_analyze.quadriture(\n k_on_err/k_on, self.k_off_error/self.k_off)\n delta_G_err = diss_constant_err*common_analyze.GAS_CONSTANT\\\n *self.model.temperature/diss_constant\n else:\n print(\" k_on (1/s * 1/M) to state\", key, \":\", \n common_analyze.pretty_string_value_error(k_on, None))\n diss_constant_err = None\n \n print(\" Dissociation constant (M) to state\", key, \":\", \n common_analyze.pretty_string_value_error(\n diss_constant, diss_constant_err))\n print(\" \\u0394G (kcal/mol) to state\", key, \":\", \n common_analyze.pretty_string_value_error(\n delta_G, delta_G_err))\n \n print(\"Mean first passage times (s):\")\n for key in self.MFPTs:\n state1 = key[0]\n state2 = key[1]\n if key in self.MFPTs_error:\n print(\" MFPT from state\", state1, \"to state\", state2, \":\",\n common_analyze.pretty_string_value_error(\n float(self.MFPTs[key]*1.0e-12), \n float(self.MFPTs_error[key]*1.0e-12)))\n else:\n print(\" MFPT from state\", state1, \"to state\", state2, \":\",\n common_analyze.pretty_string_value_error(\n float(self.MFPTs[key]*1.0e-12), None))\n return\n \n def save_plots(self, image_directory):\n \"\"\"\n Save a potentially useful series of plots of some quantities\n obtained during the analysis.\n \n TODO: interact with model, because the way these plots are saved\n depends on the structure of the CVs.\n \"\"\"\n \n anchor_indices = np.zeros(len(self.model.anchors), dtype=np.int8)\n for i, anchor in enumerate(self.model.anchors):\n anchor_indices[i] = anchor.index\n milestone_indices = np.zeros(self.p_i.shape[0], dtype=np.int8)\n for i in range(self.p_i.shape[0]):\n milestone_indices[i] = i\n # save pi_alpha\n if self.model.get_type() == \"mmvt\":\n pi_fig, ax = plt.subplots()\n plt.errorbar(anchor_indices, self.pi_alpha, yerr=self.pi_alpha_error, \n ecolor=\"k\", capsize=2)\n #ax.plot(anchor_indices, self.pi_alpha, linestyle='-', \n # marker=\"o\", markersize = 1)\n plt.ylabel(\"\\u03C0_\\u03B1\")\n plt.xlabel(\"anchors\")\n pi_fig.savefig(os.path.join(image_directory, \"pi_alpha.png\"))\n \n # save p_i\n pi_fig, ax = plt.subplots()\n plt.errorbar(milestone_indices, self.p_i, yerr=self.p_i_error, \n ecolor=\"k\", capsize=2)\n plt.ylabel(\"p_i\")\n plt.xlabel(\"milestones\")\n pi_fig.savefig(os.path.join(image_directory, \"p_i.png\"))\n # save free energy profile\n pi_fig, ax = plt.subplots()\n plt.errorbar(milestone_indices, self.free_energy_profile, \n yerr=self.free_energy_profile_err, ecolor=\"k\", capsize=2)\n plt.ylabel(\"\\u0394G(milestone) (kcal/mol)\")\n plt.xlabel(\"milestones\")\n pi_fig.savefig(os.path.join(image_directory, \"free_energy_profile.png\"))\n return\n\ndef analyze(model, force_warning=False, num_error_samples=1000, \n pre_equilibrium_approx=False, skip_checks=False):\n \"\"\"Perform all the analysis steps at once.\"\"\"\n analysis = Analysis(model, force_warning, num_error_samples)\n analysis.extract_data()\n if not skip_checks:\n analysis.check_extraction()\n analysis.fill_out_data_samples()\n analysis.process_data_samples(pre_equilibrium_approx)\n return analysis\n\nif __name__ == \"__main__\":\n argparser = argparse.ArgumentParser(description=__doc__)\n argparser.add_argument(\n \"input_file\", metavar=\"INPUT_FILE\", type=str, \n help=\"name of model file for OpenMMVT calculation. This would be the \"\\\n \"XML file generated in the prepare stage.\")\n argparser.add_argument(\n \"-f\", \"--force_warning\", dest=\"force_warning\", default=False, \n help=\"By default, missing statistics for any anchors will generate \"\\\n \"fatal errors. This option will instead raise a warning and attempt \"\\\n \"the calculation anyway.\", \n action=\"store_true\")\n argparser.add_argument(\n \"-n\", \"--num_error_samples\", dest=\"num_error_samples\", \n default=1000, type=int, help=\"Specify the number of error samples\" \\\n \" to generate for estimating error/uncertainty of computed \"\\\n \"values.\")\n argparser.add_argument(\n \"-p\", \"--pre_equilibrium_approx\", dest=\"pre_equilibrium_approx\", \n default=False, help=\"Optionally use the pre-equilibrium approximation \"\\\n \"when computing system kinetics. This setting may be desirable for \"\\\n \"very long-timescale kinetic processes, which would cause the typical \"\\\n \"SEEKR2 analysis approach to fail.\", action=\"store_true\")\n argparser.add_argument(\n \"-d\", \"--image_directory\", dest=\"image_directory\", \n default=None, type=str,\n help=\"Define the directory where all plots and images will be saved. \"\\\n \"By default, graphics will be saved to the \"\\\n \"'%s' directory in the model's anchor root directory.\"\\\n % common_analyze.DEFAULT_IMAGE_DIR)\n argparser.add_argument(\n \"-s\", \"--skip_checks\", dest=\"skip_checks\", default=False, \n help=\"By default, pre-simulation checks will be run after the \"\\\n \"preparation is complete, and if the checks fail, the SEEKR2 \"\\\n \"model will not be saved. This argument bypasses those \"\\\n \"checks and allows the model to be generated anyways.\",\n action=\"store_true\")\n \n args = argparser.parse_args() # parse the args into a dictionary\n args = vars(args)\n xmlfile = args[\"input_file\"]\n force_warning = args[\"force_warning\"]\n num_error_samples = args[\"num_error_samples\"]\n pre_equilibrium_approx = args[\"pre_equilibrium_approx\"]\n image_directory = args[\"image_directory\"]\n skip_checks = args[\"skip_checks\"]\n \n model = base.Model()\n model.deserialize(xmlfile)\n if model.anchor_rootdir == \".\":\n model_dir = os.path.dirname(xmlfile)\n model.anchor_rootdir = os.path.abspath(model_dir)\n \n if image_directory is None:\n image_directory = os.path.join(model.anchor_rootdir, \n common_analyze.DEFAULT_IMAGE_DIR)\n if not os.path.exists(image_directory):\n os.mkdir(image_directory)\n \n if not skip_checks:\n check.check_post_simulation_all(model, long_check=True)\n \n analysis = analyze(model, force_warning=force_warning, \n num_error_samples=num_error_samples, \n pre_equilibrium_approx=pre_equilibrium_approx, \n skip_checks=skip_checks)\n \n analysis.print_results()\n \n print(\"All plots being saved to:\", image_directory)\n analysis.save_plots(image_directory)",
"\"\"\"\nelber_base.py\n\nBase classes, objects, and constants used in multiple stages of the\nElber milestoning calculations.\n\"\"\"\n\n# TODO: update this code and documentation once the Elber plugin is\n# fixed.\n\nimport numpy as np\nfrom parmed import unit\nimport mdtraj\n\nfrom abserdes import Serializer\n\nOPENMM_ELBER_BASENAME = \"forward\"\nOPENMM_ELBER_EXTENSION = \"out\"\nOPENMM_ELBER_GLOB = \"%s*.%s\" % (OPENMM_ELBER_BASENAME, OPENMM_ELBER_EXTENSION)\nNAMD_ELBER_BASENAME = \"forward\"\nNAMD_ELBER_EXTENSION = \"out\" # TODO: consolidate OpenMM vs. NAMD output: not necessary\nNAMD_ELBER_GLOB = \"%s*.%s*\" % (NAMD_ELBER_BASENAME, NAMD_ELBER_EXTENSION)\n\nELBER_UMBRELLA_BASENAME = \"umbrella\"\nELBER_FWD_BASENAME = \"reverse\"\nELBER_FWD_EXTENSION = \"out\"\nELBER_FWD_GLOB = \"%s*.%s\" % (ELBER_FWD_BASENAME, ELBER_FWD_EXTENSION)\nELBER_REV_BASENAME = \"reverse\"\nELBER_REV_EXTENSION = \"out\"\nELBER_REV_GLOB = \"%s*.%s\" % (ELBER_REV_BASENAME, ELBER_REV_EXTENSION)\n\nclass Elber_settings(Serializer):\n \"\"\"\n Settings that are specific to an Elber milestoning calculation.\n \n Attributes:\n -----------\n temperature_equil_progression : list\n A list of temperatures (in Kelvin) to warm the simulation to\n during the temperature equilibration stage.\n num_temperature_equil_steps : int\n The number of steps to do per temperature during the\n temperature equilibration stage.\n temperature_equil_trajectory_interval : int or None\n The interval to write trajectory frames during the temperature\n equilibration stage. If None, then the trajectory won't be \n written\n num_umbrella_stage_steps : int\n The number of steps to take within a given MMVT production\n run for a Voronoi cell.\n umbrella_stage_trajectory_interval : int\n The interval to write trajectory frames during the umbrella\n stage.\n \"\"\"\n #num_equilibration_steps : int\n # The number of steps to take during an equilibration run, where\n # no statistics will be reported\n def __init__(self):\n #self.temperature_equil_progression = [\n # 300., 310., 320., 330., 340., 350., 340., 330., 320., 310., 300]\n self.temperature_equil_progression = []\n self.num_temperature_equil_steps = 1000\n self.num_umbrella_stage_steps = 50000\n self.umbrella_force_constant = 9000.0\n self.fwd_rev_interval = 500\n self.num_rev_launches = 1\n self.umbrella_energy_reporter_interval = None\n self.umbrella_trajectory_reporter_interval = None\n self.rev_energy_reporter_interval = None\n self.rev_trajectory_reporter_interval = None\n self.fwd_energy_reporter_interval = None\n self.fwd_trajectory_reporter_interval = None\n\nclass Elber_collective_variable(Serializer):\n \"\"\"\n Collective variables represent the function of system positions\n and velocities so that Elber milestones can be defined\n \n Attributes:\n -----------\n index : int\n Every collective variable needs an index so that it may be\n quickly and easily referenced by one of the many milestones\n in the model.\n \n name : str\n Each type of collective variable has a shorthand 'name' for\n quick reference and identification. Example: 'elber_spherical'.\n \n openmm_umbrella_expression : str\n In order to restrain a system along a milestone, an umbrella \n sampling potential energy expression must be applied.\n \n num_groups : int\n The number of atomic groups that are needed for the function\n describing this collective variable. Example: 2 for spherical\n CVs because a distance requires two points.\n \n groups : list\n A list of lists of integers. The length of the outer list is\n equal to self.num_groups. The inner lists contain integer\n values representing the indices of atoms in that group.\n \n per_dof_variables : list\n A list of strings of the names of variables used in \n self.expression that apply to individual degrees of \n freedom.\n \n global_variables : list\n A list of strings of the names of variables used in\n self.expression that apply globally, regardless of the degrees\n of freedom.\n \n \"\"\"\n def __init__(self, index, groups):\n self.index = index\n self.groups = groups\n return\n\n def __name__(self):\n return \"elber_baseCV\"\n \n def make_force_object(self):\n raise Exception(\"This base class cannot be used for creating a \"\\\n \"collective variable boundary definition.\")\n \n def make_namd_colvar_umbrella_string(self):\n raise Exception(\"This base class cannot be used for creating a \"\\\n \"collective variable boundary definition.\")\n \n def add_parameters(self):\n raise Exception(\"This base class cannot be used for creating a \"\\\n \"collective variable boundary definition.\")\n \n def add_groups_and_variables(self):\n raise Exception(\"This base class cannot be used for creating a \"\\\n \"collective variable boundary definition.\")\n \n def get_variable_values_list(self):\n raise Exception(\"This base class cannot be used for creating a \"\\\n \"collective variable boundary definition.\")\n \n def get_namd_evaluation_string(self):\n raise Exception(\"This base class cannot be used for creating a \"\\\n \"collective variable boundary definition.\")\n \n def check_mdtraj_within_boundary(self, parmed_structure, \n milestone_variables):\n raise Exception(\"This base class cannot be used for creating a \"\\\n \"collective variable boundary definition.\")\n \n def get_atom_groups(self):\n raise Exception(\"This base class cannot be used for creating a \"\\\n \"collective variable boundary definition.\")\n\nclass Elber_spherical_CV(Elber_collective_variable):\n \"\"\"\n A spherical collective variable represents the distance between two\n different groups of atoms.\n \n \"\"\"+Elber_collective_variable.__doc__\n \n def __init__(self, index, groups):\n self.index = index\n self.group1 = groups[0]\n self.group2 = groups[1]\n self.name = \"elber_spherical\"\n self.openmm_umbrella_expression = \"0.5*k*(distance(g1,g2)-radius)^2\"\n self.openmm_fwd_rev_expression \\\n = \"step(k*(distance(g1, g2)^2 - radius^2))\"\n self.num_groups = 2\n self.per_dof_variables = [\"k\", \"radius\"]\n self.global_variables = []\n self._mygroup_list = None\n self.variable_name = \"r\"\n return\n\n def __name__(self):\n return \"Elber_spherical_CV\"\n \n def make_umbrella_force_object(self):\n \"\"\"\n Make an umbrella sampling force object, which will constrain\n the system to the milestone.\n \"\"\"\n try:\n import openmm\n except ImportError:\n import simtk.openmm as openmm\n \n assert self.num_groups == 2\n return openmm.CustomCentroidBondForce(\n self.num_groups, self.openmm_umbrella_expression)\n \n def make_fwd_rev_force_object(self):\n \"\"\"\n Make a list of reversal force objects, which will be used to\n monitor milestone crossing during the reversal stage.\n \"\"\"\n try:\n import openmm\n except ImportError:\n import simtk.openmm as openmm\n \n assert self.num_groups == 2\n return openmm.CustomCentroidBondForce(\n self.num_groups, self.openmm_fwd_rev_expression)\n \n def make_namd_colvar_umbrella_string(self):\n \"\"\"\n This string will be put into a NAMD colvar file for applying\n an umbrella sampling force to constrain the system to the\n milestone.\n \"\"\"\n serial_group1 = [str(index+1) for index in self.group1]\n serial_group2 = [str(index+1) for index in self.group2]\n serial_group1_str = \" \".join(serial_group1)\n serial_group2_str = \" \".join(serial_group2)\n namd_colvar_string = \"\"\"\ncolvar {{\n name collective_variable_{0}\n outputappliedforce off\n distance {{\n group1 {{ atomNumbers {1} }}\n group2 {{ atomNumbers {2} }}\n }}\n}}\n\"\"\".format(self.index, serial_group1_str, serial_group2_str)\n return namd_colvar_string\n \n def add_fwd_rev_parameters(self, force):\n \"\"\"\n An OpenMM custom force object needs a list of variables\n provided to it that will occur within its expression. Both\n the per-dof and global variables are combined within the\n variable_names_list. The numerical values of these variables\n will be provided at a later step.\n \"\"\"\n self._mygroup_list = []\n mygroup1 = force.addGroup(self.group1)\n self._mygroup_list.append(mygroup1)\n mygroup2 = force.addGroup(self.group2)\n self._mygroup_list.append(mygroup2)\n variable_names_list = []\n if self.per_dof_variables is not None:\n for per_dof_variable in self.per_dof_variables:\n force.addPerBondParameter(per_dof_variable)\n variable_names_list.append(per_dof_variable)\n \n if self.global_variables is not None:\n for global_variable in self.global_variables:\n force.addGlobalParameter(global_variable)\n variable_names_list.append(global_variable)\n \n return variable_names_list, self._mygroup_list\n \n def add_umbrella_parameters(self, force):\n \"\"\"\n \n \"\"\"\n variable_names_list = []\n if self.per_dof_variables is not None:\n for per_dof_variable in self.per_dof_variables:\n force.addPerBondParameter(per_dof_variable)\n variable_names_list.append(per_dof_variable)\n \n if self.global_variables is not None:\n for global_variable in self.global_variables:\n force.addGlobalParameter(global_variable)\n variable_names_list.append(global_variable)\n \n return variable_names_list\n \n def add_groups_and_variables(self, force, group_list, variables):\n \"\"\"\n Provide the custom force with additional information it needs,\n which includes a list of the groups of atoms involved with the\n CV, as well as a list of the variables' *values*.\n \"\"\"\n assert len(group_list) == self.num_groups\n force.addBond(group_list, variables)\n return\n \n def get_variable_values_list(self, milestone):\n \"\"\"\n Create the list of CV variables' values in the proper order\n so they can be provided to the custom force object.\n \"\"\"\n assert milestone.cv_index == self.index\n values_list = []\n k = milestone.variables['k'] * unit.kilojoules_per_mole\n radius = milestone.variables['radius'] * unit.nanometers\n values_list.append(k)\n values_list.append(radius)\n \n return values_list\n \n def get_namd_fwd_rev_evaluation_string(self, milestone, cv_val_var=\"cv_val\"):\n \"\"\"\n For a given milestone, return a string that can be evaluated\n my NAMD to monitor for a crossing event. Essentially, if the \n function defined by the string ever returns True, then a\n bounce will occur\n \"\"\"\n assert milestone.cv_index == self.index\n k = milestone.variables['k']\n radius_in_nm = milestone.variables['radius'] * unit.nanometers\n radius_in_A = radius_in_nm.value_in_unit(unit.angstroms)\n eval_string = \"{0} * (${1}_{2} - {3}) > 0\".format(\n k, cv_val_var, self.index, radius_in_A)\n return eval_string\n \n def check_mdtraj_close_to_boundary(self, traj, milestone_variables, \n verbose=False, max_avg=0.03, max_std=0.05):\n \"\"\"\n \n \"\"\"\n traj1 = traj.atom_slice(self.group1)\n traj2 = traj.atom_slice(self.group2)\n com1_array = mdtraj.compute_center_of_mass(traj1)\n com2_array = mdtraj.compute_center_of_mass(traj2)\n distances = []\n for frame_index in range(traj.n_frames):\n com1 = com1_array[frame_index,:]\n com2 = com2_array[frame_index,:]\n radius = np.linalg.norm(com2-com1)\n milestone_radius = milestone_variables[\"radius\"]\n distances.append(radius - milestone_radius)\n \n avg_distance = np.mean(distances)\n std_distance = np.std(distances)\n if abs(avg_distance) > max_avg or std_distance > max_std:\n if verbose:\n warnstr = \"\"\"The distance between the system and central \n milestone were found on average to be {:.4f} nm apart.\n The standard deviation was {:.4f} nm.\"\"\".format(avg_distance, std_distance)\n print(warnstr)\n return False\n \n return True\n \n def get_atom_groups(self):\n \"\"\"\n \n \"\"\"\n return[self.group1, self.group2]\n\nclass Elber_anchor(Serializer):\n \"\"\"\n An anchor object for representing a Voronoi cell in an Elber \n milestoning calculation.\n \n Attributes\n ----------\n index : int\n The index of this anchor (cell) within the model.\n \n directory : str\n The directory (within the model's root directory) that contains\n the information and calculations for this Voronoi cell.\n \n amber_params : Amber_params\n Settings if this anchor starts the simulation using the\n AMBER forcefield and files.\n \n charmm_params : Charmm_params\n Settings if this anchor starts the simulation using the\n CHARMM forcefield and files.\n \n forcefield_params : Forcefield_params\n Settings if this anchor starts the simulation using an XML\n forcefield file and a PDB.\n \n md_directory : str or None\n The directory within the 'directory' argument above which \n contains the MD simulation information. If None, then no MD\n is performed for this anchor.\n \n bd_directory : str or None\n The directory within the 'directory' argument above which\n contains the BD simulation information. If None, then no BD\n is performed for this anchor.\n \n production_directory : str\n The directory within the MD or BD directory above in which the\n simulations will be performed.\n \n md_output_glob : str\n A glob to select all the MD output files within the production\n directory above.\n \n name : str\n A unique name for this anchor.\n \n md : bool\n A boolean of whether MD is performed in this Voronoi cell.\n \n bd : bool\n A boolean of whether BD is performed in this Voronoi cell.\n \n endstate : bool\n A boolean of whether this is an end state or not - does it\n act as the bulk or a bound state or another state of interest?\n All end states will have kinetics calculated to all other\n end states.\n \n bulkstate : bool\n A boolean of whether this state acts as the bulk state (That\n is, the state represents a large separation distance between\n ligand and receptor.\n \n milestones : list\n A list of Milestone() objects, which are the boundaries \n bordering this cell.\n \"\"\"\n def __init__(self):\n self.index = 0\n self.directory = \"\"\n self.amber_params = None\n self.charmm_params = None\n self.forcefield_params = None\n self.building_directory = \"building\"\n self.production_directory = \"prod\"\n self.md_output_glob = OPENMM_ELBER_GLOB\n self.name = \"\"\n self.md = False\n self.endstate = False\n self.bulkstate = False\n self.milestones = []\n self.variables = {}\n return\n \n def _make_milestone_collection(self):\n \"\"\"\n Make the dictionaries that allow for easy access of milestone\n indices, aliases, and neighboring indices.\n \"\"\"\n id_key_alias_value_dict = {}\n alias_key_id_value_dict = {}\n neighbor_id_key_alias_value_dict = {}\n \n for milestone in self.milestones:\n index = milestone.index\n neighbor_index = milestone.neighbor_anchor_index\n alias_index = milestone.alias_index\n id_key_alias_value_dict[index] = alias_index\n neighbor_id_key_alias_value_dict[neighbor_index] = alias_index\n alias_key_id_value_dict[alias_index] = index\n \n return id_key_alias_value_dict, alias_key_id_value_dict, \\\n neighbor_id_key_alias_value_dict\n \n def id_from_alias(self, alias_id):\n \"\"\"\n Accept the alias index of a milestone and return the model-wide\n index.\n \"\"\"\n id_key_alias_value_dict, alias_key_id_value_dict, \\\n neighbor_id_key_alias_value_dict = self._make_milestone_collection()\n if alias_id in alias_key_id_value_dict:\n return alias_key_id_value_dict[alias_id]\n else:\n return None\n \n def alias_from_id(self, my_id):\n \"\"\"\n Accept the model-wide index and return the milestone's alias\n index.\n \"\"\"\n id_key_alias_value_dict, alias_key_id_value_dict, \\\n neighbor_id_key_alias_value_dict = self._make_milestone_collection()\n if my_id in id_key_alias_value_dict:\n return id_key_alias_value_dict[my_id]\n else:\n return None\n \n def alias_from_neighbor_id(self, neighbor_id):\n \"\"\"\n Take the index of the neighbor anchor's index and provide the\n milestone's alias index.\n \"\"\"\n id_key_alias_value_dict, alias_key_id_value_dict, \\\n neighbor_id_key_alias_value_dict = self._make_milestone_collection()\n if neighbor_id in neighbor_id_key_alias_value_dict:\n return neighbor_id_key_alias_value_dict[neighbor_id]\n else:\n return None\n \n def get_ids(self):\n \"\"\"\n Return a list of model-wide incides.\n \"\"\"\n id_key_alias_value_dict, alias_key_id_value_dict, \\\n neighbor_id_key_alias_value_dict = self._make_milestone_collection()\n return id_key_alias_value_dict.keys()\n \n \n"
] | [
[
"numpy.linalg.solve",
"numpy.linalg.norm",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.isclose"
],
[
"numpy.sqrt",
"matplotlib.pyplot.subplots",
"numpy.std",
"numpy.random.normal",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.xlabel",
"numpy.zeros",
"matplotlib.pyplot.ylabel"
],
[
"numpy.std",
"numpy.mean",
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
marlene09/skan | [
"97a217d36ec1393b380d4a797b5b7ceb68e824ec"
] | [
"skan/pipe.py"
] | [
"import os\nfrom . import pre, csr\nimport imageio\nfrom tqdm import tqdm\nimport numpy as np\nfrom skimage import morphology\nimport pandas as pd\nfrom .image_stats import image_summary\nfrom skimage.feature import shape_index\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nimport multiprocessing as mp\n\n\nCPU_COUNT = int(os.environ.get('CPU_COUNT', mp.cpu_count()))\n\ndef _get_scale(image, md_path_or_scale):\n \"\"\"Get a valid scale from an image and a metadata path or scale.\n\n Parameters\n ----------\n image : np.ndarray\n The input image.\n md_path_or_scale : float or image filename\n The path to the file containing the metadata, or the scale.\n\n Returns\n -------\n scale : float\n \"\"\"\n scale = None\n try:\n scale = float(md_path_or_scale)\n except ValueError:\n pass\n if md_path_or_scale is not None and scale is None:\n md_path = md_path_or_scale.split(sep='/')\n meta = image.meta\n for key in md_path:\n meta = meta[key]\n scale = float(meta)\n else:\n if scale is None:\n scale = 1 # measurements will be in pixel units\n return scale\n\n\ndef process_single_image(filename, image_format, scale_metadata_path,\n threshold_radius, smooth_radius,\n brightness_offset, crop_radius, smooth_method):\n image = imageio.imread(filename, format=image_format)\n scale = _get_scale(image, scale_metadata_path)\n if crop_radius > 0:\n c = crop_radius\n image = image[c:-c, c:-c]\n pixel_threshold_radius = int(np.ceil(threshold_radius / scale))\n\n pixel_smoothing_radius = smooth_radius * pixel_threshold_radius\n thresholded = pre.threshold(image, sigma=pixel_smoothing_radius,\n radius=pixel_threshold_radius,\n offset=brightness_offset,\n smooth_method=smooth_method)\n quality = shape_index(image, sigma=pixel_smoothing_radius,\n mode='reflect')\n skeleton = morphology.skeletonize(thresholded) * quality\n framedata = csr.summarise(skeleton, spacing=scale)\n framedata['squiggle'] = np.log2(framedata['branch-distance'] /\n framedata['euclidean-distance'])\n framedata['scale'] = scale\n framedata.rename(columns={'mean pixel value': 'mean shape index'},\n inplace=True)\n framedata['filename'] = filename\n return image, thresholded, skeleton, framedata\n\n\ndef process_images(filenames, image_format, threshold_radius,\n smooth_radius, brightness_offset, scale_metadata_path,\n crop_radius=0, smooth_method='Gaussian',\n num_threads=CPU_COUNT):\n \"\"\"Full pipeline from images to skeleton stats with local median threshold.\n\n Parameters\n ----------\n filenames : list of string\n The list of input filenames.\n image_format : string\n The format of the files. 'auto' is automatically determined by the\n imageio library. See imageio documentation for valid image formats.\n threshold_radius : float\n The radius for median thresholding,\n smooth_radius : float in [0, 1]\n The value of sigma with which to Gaussian-smooth the image,\n **relative to `threshold_radius`**.\n brightness_offset : float\n The standard brightness value with which to threshold is the local\n median, `m(x, y)`. Use this value to offset from there: the threshold\n used will be `m(x, y) + brightness_offset`.\n scale_metadata_path : string\n The path in the image dictionary to find the metadata on pixel scale,\n separated by forward slashes ('/').\n crop_radius : int, optional\n Crop `crop_radius` pixels from each margin of the image before\n processing.\n smooth_method : {'Gaussian', 'TV', 'NL'}, optional\n Which method to use for smoothing.\n num_threads : int, optional\n How many threads to use for computation. This should generally be\n set to the number of CPU cores available to you.\n\n Returns\n -------\n results : generator\n The pipeline yields individual image results in the form of a tuple\n of ``(filename, image, thresholded_image, skeleton, data_frame)``.\n Finally, after all the images have been processed, the pipeline yields\n a DataFrame containing all the collated branch-level results.\n \"\"\"\n image_format = None if image_format == 'auto' else image_format\n results = []\n image_results = []\n with ThreadPoolExecutor(max_workers=num_threads) as ex:\n future_data = {ex.submit(process_single_image, filename,\n image_format, scale_metadata_path,\n threshold_radius, smooth_radius,\n brightness_offset, crop_radius,\n smooth_method): filename\n for filename in filenames}\n for completed_data in tqdm(as_completed(future_data)):\n image, thresholded, skeleton, framedata = completed_data.result()\n filename = future_data[completed_data]\n results.append(framedata)\n image_stats = image_summary(skeleton,\n spacing=framedata['scale'][0])\n image_stats['filename'] = filename\n image_stats['branch density'] = (framedata.shape[0] /\n image_stats['area'])\n j2j = framedata[framedata['branch-type'] == 2]\n image_stats['mean J2J branch distance'] = (\n j2j['branch-distance'].mean())\n image_results.append(image_stats)\n yield filename, image, thresholded, skeleton, framedata\n yield pd.concat(results), pd.concat(image_results)\n"
] | [
[
"numpy.ceil",
"pandas.concat",
"numpy.log2"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
kohjingyu/prob-stats | [
"df396686b641079b5db93118b1b2373d79de7d7a"
] | [
"linear_regression.py"
] | [
"import matplotlib\nimport matplotlib.pyplot as plt\n\nx = [20, 23, 29, 27, 30, 34, 35, 37, 40, 43]\ny = [1.32, 1.67, 2.17, 2.70, 2.75, 2.87, 3.65, 2.86, 3.61, 4.25]\nn = len(x)\nassert(n == len(y))\n\n# Means\nbar_x = sum(x) / n\nbar_y = sum(y) / n\n\n# Sum of squares\nsxy = sum([(x[i] - bar_x) * (y[i] - bar_y) for i in range(n)])\nsxx = sum([(x[i] - bar_x)**2 for i in range(n)]) \nsyy = sum([(y[i] - bar_y)**2 for i in range(n)]) \n\nprint(\"S_xy = {0:5f}, S_xx = {1:5f}, S_yy = {2:5f}\".format(sxy ,sxx, syy))\n\n# Point estimates for \\beta_0 and \\beta_1\nb1 = sxy / sxx\nb0 = bar_y - b1 * bar_x\n\nprint(\"n = {0}\".format(n))\nprint(\"\\\\bar{{x}} = {0:5f}\".format(bar_x))\nprint(\"\\\\bar{{y}} = {0:5f}\".format(bar_y))\n\nprint(\"Estimated regression line: y = {0:5f} + {1:5f} x\".format(b0, b1))\n\n# Plot x and y and save it\nfig = plt.figure()\nax = plt.subplot(111)\nax.plot(x, y)\nx_values = range(min(x), max(x))\nax.plot(x_values, [b0 + b1 * xi for xi in x_values])\nfig.savefig(\"plot.png\")\n\n# error sum of squares\nsse = sum([(y[i] - (b0 + b1 * x[i]))**2 for i in range(n)])\n# total sum of squares\nsst = sum([y[i]**2 for i in range(n)]) - sum(y)**2 / n \nsigma_square = sse / (n - 2)\n\nprint(\"SSE: {0:5f}\".format(sse))\nprint(\"SST: {0:5f}\".format(sst))\nprint(\"\\sigma^2 = {0:5f}\".format(sigma_square))\nprint(\"\\sigma = {0:5f}\".format(sigma_square ** 0.5))\nprint(\"r^2 = {0:5f}\".format(1 - sse / sst))\n"
] | [
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JamesSample/icpw | [
"47562f601fc8fe23720267d083dabc540889565e"
] | [
"toc_trends_analysis.py"
] | [
"#------------------------------------------------------------------------------\n# Name: toc_trends_analysis.py\n# Purpose: Analyse RESA2 data for trends.\n#\n# Author: James Sample\n#\n# Created: Fri Jul 15 11:35:12 2016\n# Copyright: (c) James Sample and NIVA\n# Licence: \n#------------------------------------------------------------------------------\n\"\"\" Tore has previously written code to perform trend analyses on the data in\n RESA2. I haven't been able to find the code, but it appears to shift data \n between RESA2, Excel and Access, which seems a bit messy.\n \n In the notebook updated_toc_trends_analysis.ipynb, I tested some code which\n refactors all the analysis into Python, interfacing directly with the \n database and returning results as dataframes. This seems to have worked \n well.\n \n The code below takes the main functions from this notebook and tidies them\n up a bit. This file can then be imported into new notebooks, which should\n make it easy to re-run trend analyses on different datasets in the future.\n\"\"\"\n\ndef mk_test(x, stn_id, par, alpha=0.05):\n \"\"\" Adapted from http://pydoc.net/Python/ambhas/0.4.0/ambhas.stats/\n by Sat Kumar Tomer.\n \n Perform the MK test for monotonic trends. Uses the \"normal\n approximation\" to determine significance and therefore should \n only be used if the number of values is >= 10.\n \n Args:\n x: 1D array of data\n name: Name for data series (string)\n alpha: Significance level\n \n Returns:\n var_s: Variance of test statistic\n s: M-K test statistic\n z: Normalised test statistic \n p: p-value of the significance test\n trend: Whether to reject the null hypothesis (no trend) at\n the specified significance level. One of: \n 'increasing', 'decreasing' or 'no trend'\n \"\"\"\n import numpy as np\n from scipy.stats import norm\n \n n = len(x)\n \n if n < 10:\n print (' Data series for %s at site %s has fewer than 10 non-null values. '\n 'Significance estimates may be unreliable.' % (par, int(stn_id)))\n \n # calculate S \n s = 0\n for k in range(n-1):\n for j in range(k+1,n):\n s += np.sign(x[j] - x[k])\n \n # calculate the unique data\n unique_x = np.unique(x)\n g = len(unique_x)\n \n # calculate the var(s)\n if n == g: # there is no tie\n var_s = (n*(n-1)*(2*n+5))/18. \n else: # there are some ties in data\n tp = np.zeros(unique_x.shape)\n for i in range(len(unique_x)):\n tp[i] = sum(unique_x[i] == x)\n # Sat Kumar's code has \"+ np.sum\", which is incorrect\n var_s = (n*(n-1)*(2*n+5) - np.sum(tp*(tp-1)*(2*tp+5)))/18.\n \n if s>0:\n z = (s - 1)/np.sqrt(var_s)\n elif s == 0:\n z = 0\n elif s<0:\n z = (s + 1)/np.sqrt(var_s)\n else:\n z = np.nan\n \n # calculate the p_value\n p = 2*(1-norm.cdf(abs(z))) # two tail test\n h = abs(z) > norm.ppf(1-alpha/2.) \n\n if (z<0) and h:\n trend = 'decreasing'\n elif (z>0) and h:\n trend = 'increasing'\n elif np.isnan(z):\n trend = np.nan\n else:\n trend = 'no trend'\n \n return var_s, s, z, p, trend\n\ndef wc_stats(raw_df, st_yr=None, end_yr=None, plot=False, fold=None):\n \"\"\" Calculate key statistics for the TOC trends analysis:\n \n 'station_id'\n 'par_id'\n 'non_missing'\n 'median'\n 'mean'\n 'std_dev'\n 'period'\n 'mk_std_dev'\n 'mk_stat'\n 'norm_mk_stat'\n 'mk_p_val'\n 'trend'\n 'sen_slp'\n \n Args:\n raw_df: Dataframe with annual data for a single station. Columns must \n be: [station_id, year, par1, par2, ... parn]\n st_yr: First year to include in analysis. Pass None to start\n at the beginning of the series\n end_year: Last year to include in analysis. Pass None to start\n at the beginning of the series\n plot: Whether to generate a PNG plot of the Sen's slope \n regression\n fold: Folder in which to save PNGs if plot=True\n \n Returns:\n df of key statistics.\n \"\"\"\n import numpy as np, pandas as pd\n import seaborn as sn, matplotlib.pyplot as plt, os\n from scipy.stats import theilslopes\n sn.set_context('poster')\n \n # Checking\n df = raw_df.copy()\n assert list(df.columns[:2]) == ['STATION_ID', 'YEAR'], 'Columns must be: [STATION_ID, YEAR, par1, par2, ... parn]'\n assert len(df['STATION_ID'].unique()) == 1, 'You can only process data for one site at a time'\n \n # Get just the period of interest\n if st_yr:\n df = df.query('YEAR >= @st_yr')\n if end_yr:\n df = df.query('YEAR <= @end_yr')\n\n # Only continue if data\n if len(df) > 0:\n # Get stn_id\n stn_id = df['STATION_ID'].iloc[0]\n \n # Tidy up df\n df.index = df['YEAR']\n df.sort_index(inplace=True)\n del df['STATION_ID'], df['YEAR']\n \n # Container for results\n data_dict = {'station_id':[],\n 'par_id':[],\n 'non_missing':[],\n 'n_start':[],\n 'n_end':[],\n 'median':[],\n 'mean':[],\n 'std_dev':[],\n 'period':[],\n 'mk_std_dev':[],\n 'mk_stat':[],\n 'norm_mk_stat':[],\n 'mk_p_val':[],\n 'trend':[],\n 'sen_slp':[]}\n \n # Loop over pars\n for col in df.columns:\n # 1. Station ID\n data_dict['station_id'].append(stn_id)\n \n # 2. Par ID\n data_dict['par_id'].append(col)\n \n # 3. Non-missing\n data_dict['non_missing'].append(pd.notnull(df[col]).sum())\n \n # 4. Number of non nulls at start\n if st_yr:\n # Record the number of non-nulls within 5 years of start year\n data_dict['n_start'].append(pd.notnull(df[df.index<(st_yr+5)][col]).sum())\n else:\n # Record the number of non-nulls in first 5 years of record\n data_dict['n_start'].append(pd.notnull(df[col].head(5)).sum())\n \n # 5. Number of non nulls at end\n if end_yr:\n # Record the number of non-nulls within 5 years of end year\n data_dict['n_end'].append(pd.notnull(df[df.index>(end_yr-5)][col]).sum())\n else:\n # Record the number of non-nulls in last 5 years of record\n data_dict['n_end'].append(pd.notnull(df[col].tail(5)).sum())\n \n # 6. Median\n data_dict['median'].append(df[col].median())\n \n # 7. Mean\n data_dict['mean'].append(df[col].mean())\n \n # 8. Std dev\n data_dict['std_dev'].append(df[col].std())\n \n # 9. Period\n st_yr = df.index.min()\n end_yr = df.index.max()\n per = '%s-%s' % (int(st_yr), int(end_yr))\n data_dict['period'].append(per)\n \n # 10. M-K test\n # Drop missing values\n mk_df = df[[col]].dropna(how='any')\n \n # Only run stats if more than 1 valid value\n if len(mk_df) > 1:\n var_s, s, z, p, trend = mk_test(mk_df[col].values, stn_id, col)\n data_dict['mk_std_dev'].append(np.sqrt(var_s)) \n data_dict['mk_stat'].append(s)\n data_dict['norm_mk_stat'].append(z)\n data_dict['mk_p_val'].append(p)\n data_dict['trend'].append(trend) \n \n # 11. Sen's slope. Returns:\n # Median slope, median intercept, 95% CI lower bound, \n # 95% CI upper bound\n sslp, icpt, lb, ub = theilslopes(mk_df[col].values, \n mk_df.index, 0.95)\n data_dict['sen_slp'].append(sslp)\n \n # 12. Plot if desired\n if plot:\n fig = plt.figure()\n plt.plot(mk_df.index, mk_df[col].values, 'bo-')\n plt.plot(mk_df.index, mk_df.index*sslp + icpt, 'k-')\n if col in ('Al', 'TOC'):\n plt.ylabel('%s (mg/l)' % col, fontsize=24)\n else:\n plt.ylabel('%s (ueq/l)' % col, fontsize=24)\n plt.title('%s at station %s' % (col, int(stn_id)),\n fontsize=32)\n plt.tight_layout()\n \n # Save fig\n out_path = os.path.join(fold,\n '%s_%s_%s-%s.png' % (int(stn_id), col, \n st_yr, end_yr))\n plt.savefig(out_path, dpi=150)\n plt.close()\n \n # Otherwise all NaN\n else:\n for par in ['mk_std_dev', 'mk_stat', 'norm_mk_stat', \n 'mk_p_val', 'trend', 'sen_slp']:\n data_dict[par].append(np.nan)\n \n # Build to df\n res_df = pd.DataFrame(data_dict)\n res_df = res_df[['station_id', 'par_id', 'period', 'non_missing', 'n_start',\n 'n_end', 'mean', 'median', 'std_dev', 'mk_stat', \n 'norm_mk_stat', 'mk_p_val', 'mk_std_dev', 'trend', 'sen_slp']] \n \n return res_df\n \ndef read_resa2(proj_list, engine):\n \"\"\" Reads raw data for the specified projects from RESA2. Extracts only\n the parameters required for the trends analysis and calculates \n aggregated annual values by taking medians.\n \n Args:\n proj_list: List of RESA2 project names for which to extract data\n engine: SQLAlchemy 'engine' object already connected to RESA2\n \n Returns: \n [stn_df, wc_df, dup_df]. Dataframe of stations; Dataframe of annual \n water chemistry values; dataframe of duplicates to check\n \"\"\"\n import pandas as pd \n\n # Get par IDs etc. for pars of interest\n par_list = ['SO4', 'Cl', 'Ca', 'Mg', 'NO3-N', 'TOC', \n 'Al', 'K', 'Na', 'NH4-N', 'pH']\n \n sql = ('SELECT * FROM resa2.parameter_definitions '\n 'WHERE name in %s' % str(tuple(par_list)))\n \n par_df = pd.read_sql_query(sql, engine)\n \n # Get stations for a specified list of projects\n if len(proj_list) == 1:\n sql = (\"SELECT station_id, station_code \"\n \"FROM resa2.stations \"\n \"WHERE station_id IN (SELECT UNIQUE(station_id) \"\n \"FROM resa2.projects_stations \"\n \"WHERE project_id IN (SELECT project_id \"\n \"FROM resa2.projects \"\n \"WHERE project_name = '%s'))\"\n % proj_list[0])\n else:\n sql = ('SELECT station_id, station_code '\n 'FROM resa2.stations '\n 'WHERE station_id IN (SELECT UNIQUE(station_id) '\n 'FROM resa2.projects_stations '\n 'WHERE project_id IN (SELECT project_id '\n 'FROM resa2.projects '\n 'WHERE project_name IN %s))'\n % str(tuple(proj_list))) \n stn_df = pd.read_sql(sql, engine)\n\n # Get results for ALL pars for these sites\n if len(stn_df)==1:\n sql = (\"SELECT * FROM resa2.water_chemistry_values2 \"\n \"WHERE sample_id IN (SELECT water_sample_id FROM resa2.water_samples \"\n \"WHERE station_id = %s)\"\n % stn_df['station_id'].iloc[0]) \n else:\n sql = (\"SELECT * FROM resa2.water_chemistry_values2 \"\n \"WHERE sample_id IN (SELECT water_sample_id FROM resa2.water_samples \"\n \"WHERE station_id IN %s)\"\n % str(tuple(stn_df['station_id'].values)))\n \n wc_df = pd.read_sql_query(sql, engine)\n\n # Get all sample dates for sites\n if len(stn_df)==1:\n sql = (\"SELECT water_sample_id, station_id, sample_date, depth1, depth2 \"\n \"FROM resa2.water_samples \"\n \"WHERE station_id = %s \"\n % stn_df['station_id'].iloc[0]) \n else:\n sql = (\"SELECT water_sample_id, station_id, sample_date, depth1, depth2 \"\n \"FROM resa2.water_samples \"\n \"WHERE station_id IN %s \"\n % str(tuple(stn_df['station_id'].values)))\n \n samp_df = pd.read_sql_query(sql, engine)\n \n # Join in par IDs based on method IDs\n sql = ('SELECT * FROM resa2.wc_parameters_methods')\n meth_par_df = pd.read_sql_query(sql, engine)\n \n wc_df = pd.merge(wc_df, meth_par_df, how='left',\n left_on='method_id', right_on='wc_method_id')\n \n # Get just the parameters of interest\n wc_df = wc_df.query('wc_parameter_id in %s' \n % str(tuple(par_df['parameter_id'].values)))\n \n # Join in sample dates\n wc_df = pd.merge(wc_df, samp_df, how='left',\n left_on='sample_id', right_on='water_sample_id')\n\n # Get just the near-surface samples\n wc_df = wc_df.query('(depth1 <= 1) and (depth2 <= 1)')\n \n # Join in parameter units\n sql = ('SELECT * FROM resa2.parameter_definitions')\n all_par_df = pd.read_sql_query(sql, engine)\n \n wc_df = pd.merge(wc_df, all_par_df, how='left',\n left_on='wc_parameter_id', right_on='parameter_id')\n \n # Join in station codes\n wc_df = pd.merge(wc_df, stn_df, how='left',\n left_on='station_id', right_on='station_id')\n \n # Convert units\n wc_df['value'] = wc_df['value'] * wc_df['conversion_factor']\n \n # Extract columns of interest\n wc_df = wc_df[['station_id', 'sample_date', 'name', \n 'value', 'entered_date_x']]\n \n # Check for duplicates\n dup_df = wc_df[wc_df.duplicated(subset=['station_id',\n 'sample_date',\n 'name'], \n keep=False)].sort_values(by=['station_id', \n 'sample_date', \n 'name'])\n\n if len(dup_df) > 0:\n print (' The database contains duplicate values for some station-'\n 'date-parameter combinations.\\n Only the most recent values '\n 'will be used, but you should check the repeated values are not '\n 'errors.\\n The duplicated entries are returned in a separate '\n 'dataframe.\\n')\n \n # Choose most recent record for each duplicate\n wc_df.sort_values(by='entered_date_x', inplace=True, ascending=True)\n\n # Drop duplicates\n wc_df.drop_duplicates(subset=['station_id', 'sample_date', 'name'],\n keep='last', inplace=True)\n \n # Sort\n wc_df.sort_values(by=['station_id', 'sample_date', 'name'],\n inplace=True)\n \n # Tidy\n del wc_df['entered_date_x'] \n wc_df.reset_index(inplace=True, drop=True)\n\n # Unstack\n wc_df.set_index(['station_id', 'sample_date', 'name'], inplace=True)\n wc_df = wc_df.unstack(level='name')\n wc_df.columns = wc_df.columns.droplevel()\n wc_df.reset_index(inplace=True)\n wc_df.columns.name = None\n\n # Extract year from date column\n wc_df['year'] = wc_df['sample_date'].map(lambda x: x.year)\n del wc_df['sample_date']\n \n # Groupby station_id and year\n grpd = wc_df.groupby(['station_id', 'year'])\n \n # Calculate median\n wc_df = grpd.agg('median')\n\n return stn_df, wc_df, dup_df\n\ndef conv_units_and_correct(wc_df):\n \"\"\" Take a dataframe of aggregated annual values in the units specified by\n RESA2.PARAMETERS and performs unit conversions to ueq/l. Also applies\n sea-salt correction where necessary.\n \n Args:\n wc_df: Dataframe in original units\n \n Returns:\n Dataframe in converted units\n \"\"\"\n import pandas as pd\n \n # Tabulate chemical properties\n chem_dict = {'molar_mass':[96, 35, 40, 24, 14, 39, 23, 14],\n 'valency':[2, 1, 2, 2, 1, 1, 1, 1],\n 'resa2_ref_ratio':[0.103, 1., 0.037, 0.196, \n 'N/A', 0.018, 0.859, 'N/A']}\n \n chem_df = pd.DataFrame(chem_dict, index=['SO4', 'Cl', 'Ca', 'Mg', \n 'NO3-N', 'K', 'Na', 'NH4-N'])\n chem_df = chem_df[['molar_mass', 'valency', 'resa2_ref_ratio']]\n\n # Fill NoData for ANC calculation. Assume that NH4 can be ignored if not \n # present.\n # If have data for NH4, fill data gaps with 0\n if 'NH4-N' in wc_df.columns:\n wc_df['NH4-N'].fillna(value=0, inplace=True)\n else: # Just assume 0\n wc_df['NH4-N'] = 0 \n \n # 1. Convert to ueq/l\n # 1.1. pH to H+\n wc_df['EH'] = 1E6 * 10**(-wc_df['pH'])\n \n # 1.2. Other pars\n for par in ['SO4', 'Cl', 'Mg', 'Ca', 'NO3-N', 'K', 'Na', 'NH4-N']:\n val = chem_df.at[par, 'valency']\n mm = chem_df.at[par, 'molar_mass']\n \n if par == 'NO3-N':\n wc_df['ENO3'] = wc_df[par] * val / mm\n elif par == 'NH4-N':\n wc_df['ENH4'] = wc_df[par] * val / mm\n else:\n wc_df['E%s' % par] = wc_df[par] * val * 1000. / mm\n \n # 2. Apply sea-salt correction\n for par in ['ESO4', 'EMg', 'ECa']:\n ref = chem_df.at[par[1:], 'resa2_ref_ratio']\n wc_df['%sX' % par] = wc_df[par] - (ref*wc_df['ECl'])\n \n # 3. Calculate combinations\n # 3.1. ESO4 + ECl\n wc_df['ESO4_ECl'] = wc_df['ESO4'] + wc_df['ECl']\n \n # 3.2. ECa + EMg\n wc_df['ECa_EMg'] = wc_df['ECa'] + wc_df['EMg']\n \n # 3.3. ECaX + EMgX\n wc_df['ECaX_EMgX'] = wc_df['ECaX'] + wc_df['EMgX']\n \n # 3.4. ANC = (ECa+EMg+EK+ENa+ENH4) - (ECl+ESO4+ENO3)\n wc_df['ANC'] = ((wc_df['ECa'] + wc_df['EMg'] + wc_df['EK'] + \n wc_df['ENa'] + wc_df['ENH4']) - \n (wc_df['ECl'] + wc_df['ESO4'] + wc_df['ENO3']))\n\n # 3.5. ANCX = (ECaX+EMgX+EK+ENa+ENH4) - (ECl+ESO4X+ENO3)\n wc_df['ANCX'] = ((wc_df['ECaX'] + wc_df['EMgX'] + wc_df['EK'] + \n wc_df['ENa'] + wc_df['ENH4']) - \n (wc_df['ECl'] + wc_df['ESO4X'] + wc_df['ENO3']))\n \n # 4. Delete unnecessary columns and tidy\n for col in ['SO4', 'Cl', 'Mg', 'Ca', 'NO3-N', 'K', 'Na', 'NH4-N', 'pH',\n 'EMg', 'ECa', 'EK', 'ENa', 'ENH4', 'EMgX', 'ECaX']:\n del wc_df[col]\n \n wc_df.reset_index(inplace=True)\n \n return wc_df\n\ndef run_trend_analysis(proj_list, engine, st_yr=None, end_yr=None,\n plot=False, fold=None):\n \"\"\" Run the trend analysis for the specified projects and time period.\n \n Args:\n proj_list: List of RESA2 project names for which to extract data\n engine: SQLAlchemy 'engine' object already connected to RESA2\n st_yr: First year to include in analysis. Pass None to start\n at the beginning of the series\n end_year: Last year to include in analysis. Pass None to start\n at the beginning of the series\n plot: Whether to generate a PNG plot of the Sen's slope \n regression\n fold: Folder in which to save PNGs if plot=True\n \n Returns: \n [res_df, dup_df, no_data_df]. Dataframe of statistics; dataframe of \n duplicated water chemistry values for investigation; dataframe of \n stations with no relevant data in the period of interest\n \"\"\"\n import pandas as pd, os\n \n # Check paths valid\n if plot:\n assert os.path.isdir(fold), 'The specified folder does not exist.'\n \n # Get raw data from db\n print ('Extracting data from RESA2...')\n stn_df, wc_df, dup_df = read_resa2(proj_list, engine)\n \n # Identify stations with no relevant records\n stns_no_data = (set(stn_df['station_id'].values) - \n set(wc_df.index.get_level_values('station_id')))\n \n if len(stns_no_data) > 0:\n print (' Some stations have no relevant data in the period '\n 'specified. Their IDs are returned in a separate dataframe.\\n')\n no_data_df = pd.DataFrame({'station_id':list(stns_no_data)})\n else:\n no_data_df = None\n \n print (' Done.')\n \n # Convert units and apply sea-salt correction\n print ('\\nConverting units and applying sea-salt correction...')\n wc_df = conv_units_and_correct(wc_df)\n print (' Done.')\n \n # Calculate stats \n # Container for output\n df_list = []\n\n # Loop over sites\n print ('\\nCalculating statistics...')\n for stn_id in wc_df['station_id'].unique():\n # Extract data for this site\n df = wc_df.query('station_id == @stn_id')\n\n # Modify col names\n names = list(df.columns)\n names[:2] = ['STATION_ID', 'YEAR']\n df.columns = names\n\n # Heleen wants the annual time series for each site for further analysis\n # Write df to output\n #out_ann_fold = (r'../../../Thematic_Trends_Report_2019/results/annual_chemistry_series')\n #out_ann_path = os.path.join(out_ann_fold, 'stn_%s.csv' % stn_id)\n #df_trunc = df.query('(YEAR>=1990) & (YEAR<=2016)') # Truncate to 1990 to 2016\n #df_trunc.to_csv(out_ann_path) \n \n # Run analysis\n df_list.append(wc_stats(df, st_yr=st_yr, end_yr=end_yr,\n plot=plot, fold=fold))\n \n res_df = pd.concat(df_list, axis=0)\n\n # Convert station_id cols to ints\n res_df['station_id'] = res_df['station_id'].map(int)\n dup_df['station_id'] = dup_df['station_id'].map(int)\n if no_data_df is not None:\n no_data_df['station_id'] = no_data_df['station_id'].map(int)\n \n print (' Done.') \n print ('\\nFinished.')\n \n return res_df, dup_df, no_data_df"
] | [
[
"scipy.stats.norm.ppf",
"pandas.merge",
"numpy.sqrt",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.tight_layout",
"numpy.unique",
"matplotlib.pyplot.close",
"numpy.zeros",
"pandas.read_sql",
"matplotlib.pyplot.figure",
"pandas.concat",
"pandas.notnull",
"numpy.isnan",
"matplotlib.pyplot.savefig",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"pandas.read_sql_query",
"numpy.sign",
"scipy.stats.theilslopes"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
zfisher/trax | [
"c8187944fc036418a5c3b0491fc53c223e73faa6",
"bbabf6cc8a0682218927080bce33a4f90591aa0b",
"bbabf6cc8a0682218927080bce33a4f90591aa0b"
] | [
"trax/layers/normalization_test.py",
"trax/rl/envs/async_trajectory_collector_lib.py",
"trax/tf_numpy/numpy/tests/array_manipulation_test.py"
] | [
"# coding=utf-8\n# Copyright 2020 The Trax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for normalization layers.\"\"\"\n\nfrom absl.testing import absltest\nimport numpy as onp\n\nfrom trax.layers import base\nfrom trax.layers import normalization\nfrom trax.math import numpy as np\nfrom trax.shapes import ShapeDtype\n\n\nclass NormalizationLayerTest(absltest.TestCase):\n\n def test_batch_norm_shape(self):\n input_signature = ShapeDtype((29, 5, 7, 20))\n result_shape = base.check_shape_agreement(normalization.BatchNorm(),\n input_signature)\n self.assertEqual(result_shape, input_signature.shape)\n\n def test_batch_norm(self):\n input_shape = (2, 3, 4)\n input_dtype = np.float32\n input_signature = ShapeDtype(input_shape, input_dtype)\n eps = 1e-5\n inp1 = np.reshape(np.arange(np.prod(input_shape), dtype=input_dtype),\n input_shape)\n m1 = 11.5 # Mean of this random input.\n v1 = 47.9167 # Variance of this random input.\n layer = normalization.BatchNorm(axis=(0, 1, 2))\n _, _ = layer.init(input_signature)\n state = layer.state\n onp.testing.assert_allclose(state[0], 0)\n onp.testing.assert_allclose(state[1], 1)\n self.assertEqual(state[2], 0)\n out = layer(inp1)\n state = layer.state\n onp.testing.assert_allclose(state[0], m1 * 0.001)\n onp.testing.assert_allclose(state[1], 0.999 + v1 * 0.001, rtol=1e-6)\n self.assertEqual(state[2], 1)\n onp.testing.assert_allclose(out, (inp1 - m1) / np.sqrt(v1 + eps),\n rtol=1e-6)\n\n def test_layer_norm_shape(self):\n input_signature = ShapeDtype((29, 5, 7, 20))\n result_shape = base.check_shape_agreement(\n normalization.LayerNorm(), input_signature)\n self.assertEqual(result_shape, input_signature.shape)\n\n def test_frn_shape(self):\n B, H, W, C = 64, 5, 7, 3 # pylint: disable=invalid-name\n input_signature = ShapeDtype((B, H, W, C))\n result_shape = base.check_shape_agreement(\n normalization.FilterResponseNorm(), input_signature)\n self.assertEqual(result_shape, input_signature.shape)\n\n result_shape = base.check_shape_agreement(\n normalization.FilterResponseNorm(learn_epsilon=False),\n input_signature)\n self.assertEqual(result_shape, input_signature.shape)\n\n\nif __name__ == '__main__':\n absltest.main()\n",
"# coding=utf-8\n# Copyright 2020 The Trax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Uitlity functions for the async trajectory collector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport random\nimport time\n\nfrom absl import logging\nfrom tensor2tensor.envs import trajectory\nfrom tensorflow.compat.v1.io import gfile\nfrom trax.rl import ppo\nfrom trax.rl import trainers as rl_trainers\n\nLARGE_MAX_TRIES_FOR_POLICY_FILE = 100\n\n\n# TODO(afrozm): Is there a better way to poll for a file on CNS?\ndef get_newer_policy_model_file(output_dir,\n min_epoch=-1,\n sleep_time_secs=0.1,\n max_sleep_time_secs=1.0,\n max_tries=1,\n wait_forever=False,):\n \"\"\"Gets a policy model file subject to availability and wait time.\"\"\"\n\n while max_tries or wait_forever:\n max_tries -= 1\n policy_files = ppo.get_policy_model_files(output_dir)\n\n def do_wait(t):\n time.sleep(t)\n t *= 2\n return min(t, max_sleep_time_secs)\n\n # No policy files at all.\n if not policy_files:\n logging.info('There are no policy files in [%s], waiting for %s secs.',\n output_dir, sleep_time_secs)\n sleep_time_secs = do_wait(sleep_time_secs)\n continue\n\n # Check if we have a newer epoch.\n policy_file = policy_files[0]\n epoch = ppo.get_epoch_from_policy_model_file(policy_file)\n\n # We don't - wait.\n if epoch <= min_epoch:\n logging.info('epoch [%s] <= min_epoch [%s], waiting for %s secs.', epoch,\n min_epoch, sleep_time_secs)\n sleep_time_secs = do_wait(sleep_time_secs)\n continue\n\n # We do have a new file, return it.\n policy_file = policy_files[0]\n epoch = ppo.get_epoch_from_policy_model_file(policy_file)\n logging.info('Found epoch [%s] and policy file [%s]', epoch, policy_file)\n return policy_file, epoch\n\n # Exhausted our waiting limit.\n return None\n\n\ndef dump_trajectory(output_dir, epoch, env_id, temperature, random_string,\n trajs):\n \"\"\"Write the trajectory to disk.\"\"\"\n\n assert 1 == len(trajs)\n traj = trajs[0]\n\n trajectory_file_name = trajectory.TRAJECTORY_FILE_FORMAT.format(\n epoch=epoch, env_id=env_id, temperature=temperature, r=random_string)\n\n with gfile.GFile(os.path.join(output_dir, trajectory_file_name), 'w') as f:\n trajectory.get_pickle_module().dump(traj, f)\n\n\ndef continuously_collect_trajectories(output_dir,\n train_env,\n eval_env,\n trajectory_dump_dir=None,\n env_id=None,\n max_trajectories_to_collect=None,\n try_abort=True):\n \"\"\"Instantiates a PPO trainer and collects trajectories.\"\"\"\n\n # Make the PPO trainer.\n ppo_trainer = rl_trainers.PPO(\n output_dir=output_dir,\n train_env=train_env,\n eval_env=eval_env,\n trajectory_dump_dir=trajectory_dump_dir,\n )\n\n # TODO(afrozm): Update base_trainer interface to support SimPLe as well.\n assert isinstance(ppo_trainer, rl_trainers.PPO)\n\n assert env_id is not None\n\n # Get an initial policy and wait a forever to get it if needed.\n policy_and_epoch = get_newer_policy_model_file(output_dir, wait_forever=True)\n assert policy_and_epoch\n policy_file, epoch = policy_and_epoch\n logging.info('Read initial policy for epoch [%s] -> [%s]', epoch, policy_file)\n\n # Returns immediately if there is a newer epoch available.\n def is_newer_policy_file_available(epoch_, sleep_time_secs_=0.1):\n return get_newer_policy_model_file(\n output_dir, min_epoch=epoch_, sleep_time_secs=sleep_time_secs_)\n\n # Does a __done__ file exist?\n def done_file_exists():\n return gfile.exists(os.path.join(output_dir, '__done__'))\n\n assert 1 == train_env.batch_size\n assert 1 == eval_env.batch_size\n\n temperature = 1.0\n\n trajectories_collected = 0\n\n train_env_trajectory_dump_dir = os.path.join(output_dir, 'trajectories/train')\n eval_env_trajectory_dump_dir = os.path.join(output_dir, 'trajectories/eval')\n\n gfile.makedirs(train_env_trajectory_dump_dir)\n gfile.makedirs(eval_env_trajectory_dump_dir)\n\n while max_trajectories_to_collect is None or trajectories_collected < int(\n max_trajectories_to_collect):\n logging.info('Collecting a trajectory, trajectories_collected = %s',\n trajectories_collected)\n\n # Abort function -- if something newever is available, then abort the\n # current computation and reload.\n\n # Useful if env.step is long.\n def long_abort_fn():\n # We want this to be as quick as possible.\n return (is_newer_policy_file_available(epoch, 0) is not None) or (\n done_file_exists())\n\n abort_fn = long_abort_fn if try_abort else None\n\n # Collect a training trajectory.\n trajs, n_done, unused_timing_info, unused_model_state = (\n ppo_trainer.collect_trajectories(train=True,\n temperature=temperature,\n abort_fn=abort_fn,\n raw_trajectory=True))\n\n if done_file_exists():\n logging.info('__done__ file found in %s, we are done here.', output_dir)\n break\n\n if trajs and n_done > 0:\n assert 1 == n_done\n trajectories_collected += n_done\n\n # Write the trajectory down.\n logging.info(\n 'Dumping the collected trajectory, trajectories_collected = %s',\n trajectories_collected)\n dump_trajectory(train_env_trajectory_dump_dir, epoch, env_id, temperature,\n str(random.randint(0, 2**31 - 1)), trajs)\n else:\n logging.info('Computation was aborted, a new policy is available.')\n\n # This maybe useless, since `abort_fn` will take care of it. We might want\n # to have this here if abort_fn is False always.\n # Do we have a newer policy?\n policy_file_and_epoch = is_newer_policy_file_available(epoch)\n if policy_file_and_epoch is None:\n # Continue churning out these policies.\n logging.info(\"We don't have a newer policy, continuing with the old one.\")\n continue\n\n # We have a newer policy, read it and update the parameters.\n policy_file, epoch = policy_file_and_epoch\n logging.info(\n 'We have a newer policy epoch [%s], file [%s], updating parameters.',\n epoch, policy_file)\n ppo_trainer.update_optimization_state(output_dir)\n logging.info('Parameters of PPOTrainer updated.')\n\n # Check that the epochs match.\n assert epoch == ppo_trainer.epoch\n",
"# coding=utf-8\n# Copyright 2020 The Trax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tf numpy array manipulation methods.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom trax.tf_numpy.numpy import array_creation\nfrom trax.tf_numpy.numpy import array_manipulation\nfrom trax.tf_numpy.numpy import arrays\n\n\nclass ArrayManipulationTest(tf.test.TestCase):\n\n def setUp(self):\n super(ArrayManipulationTest, self).setUp()\n self.array_transforms = [\n lambda x: x,\n tf.convert_to_tensor,\n np.array,\n array_creation.array,\n ]\n\n def testBroadcastTo(self):\n\n def run_test(arr, shape):\n for fn in self.array_transforms:\n arg1 = fn(arr)\n self.match(\n array_manipulation.broadcast_to(arg1, shape),\n np.broadcast_to(arg1, shape))\n\n run_test(1, 2)\n run_test(1, (2, 2))\n run_test([1, 2], (2, 2))\n run_test([[1], [2]], (2, 2))\n run_test([[1, 2]], (3, 2))\n run_test([[[1, 2]], [[3, 4]], [[5, 6]]], (3, 4, 2))\n\n def match_shape(self, actual, expected, msg=None):\n if msg:\n msg = 'Shape match failed for: {}. Expected: {} Actual: {}'.format(\n msg, expected.shape, actual.shape)\n self.assertEqual(actual.shape, expected.shape, msg=msg)\n if msg:\n msg = 'Shape: {} is not a tuple for {}'.format(actual.shape, msg)\n self.assertIsInstance(actual.shape, tuple, msg=msg)\n\n def match_dtype(self, actual, expected, msg=None):\n if msg:\n msg = 'Dtype match failed for: {}. Expected: {} Actual: {}.'.format(\n msg, expected.dtype, actual.dtype)\n self.assertEqual(actual.dtype, expected.dtype, msg=msg)\n\n def match(self, actual, expected, msg=None):\n msg_ = 'Expected: {} Actual: {}'.format(expected, actual)\n if msg:\n msg = '{} {}'.format(msg_, msg)\n else:\n msg = msg_\n self.assertIsInstance(actual, arrays.ndarray)\n self.match_dtype(actual, expected, msg)\n self.match_shape(actual, expected, msg)\n if not actual.shape:\n self.assertEqual(actual.tolist(), expected.tolist())\n else:\n self.assertSequenceEqual(actual.tolist(), expected.tolist())\n\n\nif __name__ == '__main__':\n tf.compat.v1.enable_eager_execution()\n tf.test.main()\n"
] | [
[
"numpy.testing.assert_allclose"
],
[
"tensorflow.compat.v1.io.gfile.makedirs"
],
[
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.compat.v1.enable_eager_execution",
"numpy.broadcast_to"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
watanka/CRAFTS-implementation | [
"bc514638755fe798a0d5eb583d6d477e8eb55bff"
] | [
"file_utils.py"
] | [
"# -*- coding: utf-8 -*-\nimport os\nimport numpy as np\nimport cv2\nimport imgproc\nfrom PIL import Image, ImageDraw\n\n\n\n# borrowed from https://github.com/lengstrom/fast-style-transfer/blob/master/src/utils.py\ndef get_files(img_dir):\n imgs, masks, xmls = list_files(img_dir)\n return imgs, masks, xmls\n\ndef list_files(in_path):\n img_files = []\n mask_files = []\n gt_files = []\n for (dirpath, dirnames, filenames) in os.walk(in_path):\n for file in filenames:\n filename, ext = os.path.splitext(file)\n ext = str.lower(ext)\n if ext == '.jpg' or ext == '.jpeg' or ext == '.gif' or ext == '.png' or ext == '.pgm':\n img_files.append(os.path.join(dirpath, file))\n elif ext == '.bmp':\n mask_files.append(os.path.join(dirpath, file))\n elif ext == '.xml' or ext == '.gt' or ext == '.txt':\n gt_files.append(os.path.join(dirpath, file))\n elif ext == '.zip':\n continue\n # img_files.sort()\n # mask_files.sort()\n # gt_files.sort()\n return img_files, mask_files, gt_files\n\ndef saveResult(img_file, img, boxes, font,dirname='./result/', verticals=None, texts=None):\n \"\"\" save text detection result one by one\n Args:\n img_file (str): image file name\n img (array): raw image context\n boxes (array): array of result file\n Shape: [num_detections, 4] for BB output / [num_detections, 4] for QUAD output\n Return:\n None\n \"\"\"\n img = np.array(img)\n img_pil = Image.fromarray(img)\n imgdraw = ImageDraw.Draw(img_pil)\n # make result file list\n filename, file_ext = os.path.splitext(os.path.basename(img_file))\n\n # result directory\n res_file = dirname + \"res_\" + filename + '.txt'\n res_img_file = dirname + \"res_\" + filename + '.jpg'\n\n if not os.path.isdir(dirname):\n os.mkdir(dirname)\n\n with open(res_file, 'w') as f:\n \n if texts is not None :\n for i, (box, text) in enumerate(zip(boxes, texts)):\n poly = np.array(box).astype(np.int32).reshape((-1))\n strResult = ','.join([str(p) for p in poly]) +','+text +'\\r\\n'\n # poly = np.array(box).astype(np.int32)\n # min_x = np.min(poly[:,0])\n # max_x = np.max(poly[:,0])\n # min_y = np.min(poly[:,1])\n # max_y = np.max(poly[:,1])\n # strResult = ','.join([str(min_x), str(min_y), str(max_x), str(max_y)]) + '\\r\\n'\n f.write(strResult)\n\n poly = poly.reshape(-1, 2)\n# cv2.polylines(img, [poly.reshape((-1, 1, 2))], True, color=(0, 0, 255), thickness=2)\n# cv2.putText(img, text, tuple(poly[1]), cv2.FONT_HERSHEY_SIMPLEX, fontScale = 0.1, color = (0,0,255), thickness= 1)\n imgdraw.polygon(poly.flatten().tolist(), fill = None, outline = (0,0,255))\n imgdraw.text(tuple(poly[1]), text,font = font, fill = (0,0,255))\n \n ptColor = (0, 255, 255)\n if verticals is not None:\n if verticals[i]:\n ptColor = (255, 0, 0)\n \n else : \n \n for i, box in enumerate(boxes):\n poly = np.array(box).astype(np.int32).reshape((-1))\n strResult = ','.join([str(p) for p in poly]) + '\\r\\n'\n # poly = np.array(box).astype(np.int32)\n # min_x = np.min(poly[:,0])\n # max_x = np.max(poly[:,0])\n # min_y = np.min(poly[:,1])\n # max_y = np.max(poly[:,1])\n # strResult = ','.join([str(min_x), str(min_y), str(max_x), str(max_y)]) + '\\r\\n'\n f.write(strResult)\n\n poly = poly.reshape(-1, 2)\n# cv2.polylines(img, [poly.reshape((-1, 1, 2))], True, color=(0, 0, 255), thickness=2)\n \n imgdraw.polygon([poly.reshape((-1,1,2))], fill = None, outline =(0,0,255))\n\n ptColor = (0, 255, 255)\n if verticals is not None:\n if verticals[i]:\n ptColor = (255, 0, 0)\n #\n # if texts is not None:\n # font = cv2.FONT_HERSHEY_SIMPLEX\n # font_scale = 0.5\n # cv2.putText(img, \"{}\".format(texts[i]), (poly[0][0]+1, poly[0][1]+1), font, font_scale, (0, 0, 0), thickness=1)\n # cv2.putText(img, \"{}\".format(texts[i]), tuple(poly[0]), font, font_scale, (0, 255, 255), thickness=1)\n #\n # #Save result image\n cv2.imwrite(res_img_file, np.array(img_pil))\n\ndef load_txt(file, delimiter = ',') :\n ## character bbox는 \\n\\n으로 box별 구분\n coords_ls = []\n with open(file, 'r', encoding = 'utf-8-sig') as f :\n boxes_list = f.read().split('\\n\\n')\n for boxes in boxes_list :\n if boxes.strip() == '' :\n continue\n char_boxes = boxes.split('\\n')\n # char_txt는 라벨이 따로 없다\n charbox_ls = []\n for charbox in char_boxes :\n if len(char_boxes) == 0 :\n continue\n coords = charbox.split(delimiter)\n coords = [float(c) for c in coords if c != '']\n if len(coords) == 0 :\n continue\n coords = np.array(coords).reshape(-1,2)\n \n charbox_ls.append(coords)\n if len(charbox_ls) != 0 :\n coords_ls.append(np.array(charbox_ls))\n \n \n return coords_ls\n "
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SaraR-1/model-patching | [
"97b30bad4bb4575a5f3a4cc23fbd333b10a057a8"
] | [
"augmentation/methods/cyclegan/utils.py"
] | [
"import datetime\n\nimport tensorflow as tf\nimport random\nimport wandb\nfrom tensorflow_examples.models.pix2pix import pix2pix\n\nfrom augmentation.dataflows.utils import create_paired_direct_dataflow, \\\n create_paired_parallel_dataflow_via_numpy\nfrom augmentation.methods.cyclegan.models import mnist_unet_generator, mnist_discriminator, unet_generator\nfrom augmentation.utilities.optim import build_lr_scheduler\nfrom augmentation.utilities.visualize import gallery\n\n\n# Other places to look for training GANs\n# https://github.com/eriklindernoren/Keras-GAN\n\ndef gradient_penalty(f, real, fake, mode, scale=10.0):\n # https://github.com/LynnHo/CycleGAN-Tensorflow-2/blob/master/tf2gan/loss.py\n def _gradient_penalty(f, real, fake=None):\n def _interpolate(a, b=None):\n if b is None: # interpolation in DRAGAN\n beta = tf.random.uniform(shape=tf.shape(a), minval=0., maxval=1.)\n b = a + 0.5 * tf.math.reduce_std(a) * beta\n shape = [tf.shape(a)[0]] + [1] * (a.shape.ndims - 1)\n alpha = tf.random.uniform(shape=shape, minval=0., maxval=1.)\n inter = a + alpha * (b - a)\n inter.set_shape(a.shape)\n return inter\n\n x = _interpolate(real, fake)\n with tf.GradientTape() as t:\n t.watch(x)\n pred = tf.reduce_mean(tf.reshape(f(x), [tf.shape(real)[0], -1]), axis=1)\n grad = t.gradient(pred, x)\n norm = tf.norm(tf.reshape(grad, [tf.shape(grad)[0], -1]), axis=1)\n gp = tf.reduce_mean((norm - 1.) ** 2)\n\n return gp\n\n if mode == 'none':\n gp = tf.constant(0, dtype=real.dtype)\n elif mode == 'dragan':\n gp = _gradient_penalty(f, real)\n elif mode == 'wgan-gp':\n gp = _gradient_penalty(f, real, fake)\n else:\n raise NotImplementedError\n\n return gp * scale\n\n\nclass ReplayBuffer(object):\n \"\"\"\n Adapted from https://github.com/tensorflow/models/blob/master/research/pcl_rl/replay_buffer.py\n \"\"\"\n\n def __init__(self, max_size):\n self.max_size = max_size\n self.cur_size = 0\n self.buffer = {}\n self.oldest_idx = 0\n self.init_length = 0\n\n def __len__(self):\n return self.cur_size\n\n def add(self, images):\n idx = 0\n while self.cur_size < self.max_size and idx < len(images):\n self.buffer[self.cur_size] = images[idx]\n self.cur_size += 1\n idx += 1\n\n if idx < len(images):\n remove_idxs = self.remove_n(len(images) - idx)\n for remove_idx in remove_idxs:\n self.buffer[remove_idx] = images[idx]\n idx += 1\n\n assert len(self.buffer) == self.cur_size\n\n def remove_n(self, n):\n return random.sample(range(self.init_length, self.cur_size), n)\n\n def get_batch(self, n):\n idxs = random.sample(range(self.cur_size), n)\n return [self.buffer[idx] for idx in idxs]\n\n def get_tf_batch(self, n):\n idxs = random.sample(range(self.cur_size), n)\n return tf.convert_to_tensor([self.buffer[idx] for idx in idxs])\n\n\ndef wgan_loss(targets, predictions):\n return tf.reduce_mean((-2 * targets + 1.) * predictions)\n\n\ndef build_gan_loss_fn(loss_name):\n if loss_name == 'bce':\n return tf.keras.losses.BinaryCrossentropy(from_logits=True)\n elif loss_name == 'lsgan':\n return tf.keras.losses.MeanSquaredError()\n elif loss_name == 'wgan':\n return wgan_loss\n else:\n raise NotImplementedError\n\n\ndef discriminator_loss(real, generated, loss_fn):\n # Classification loss for the discriminator, maximize log-prob of the real example\n real_loss = loss_fn(tf.ones_like(real), real)\n generated_loss = loss_fn(tf.zeros_like(generated), generated)\n total_disc_loss = real_loss + generated_loss\n return total_disc_loss * 0.5\n\n\ndef generator_loss(generated, loss_fn):\n # The discriminator's probability (generated) for realness is maximized\n return loss_fn(tf.ones_like(generated), generated)\n\n\ndef cycle_loss(real_image, cycled_image, scale):\n # Cycle-consistency using an L! loss\n return scale * tf.reduce_mean(tf.abs(real_image - cycled_image))\n\n\ndef identity_loss(real_image, same_image, scale):\n # Map the image to itself and compute the L1 loss\n return scale * 0.5 * tf.reduce_mean(tf.abs(real_image - same_image))\n\n\ndef build_cyclegan_models(n_channels, norm_type):\n assert norm_type in ['instancenorm', 'batchnorm']\n generator_g = pix2pix.unet_generator(n_channels, norm_type=norm_type)\n generator_f = pix2pix.unet_generator(n_channels, norm_type=norm_type)\n\n discriminator_x = pix2pix.discriminator(norm_type=norm_type, target=False)\n discriminator_y = pix2pix.discriminator(norm_type=norm_type, target=False)\n\n return generator_g, generator_f, discriminator_x, discriminator_y\n\n\ndef build_mnist_cyclegan_models(norm_type):\n assert norm_type in ['instancenorm', 'batchnorm']\n generator_g = mnist_unet_generator(norm_type=norm_type)\n generator_f = mnist_unet_generator(norm_type=norm_type)\n\n discriminator_x = mnist_discriminator(norm_type=norm_type, target=False)\n discriminator_y = mnist_discriminator(norm_type=norm_type, target=False)\n\n return generator_g, generator_f, discriminator_x, discriminator_y\n\n\ndef get_models_from_input_shape(input_shape, norm_type, output_init=0.02, residual_output=False):\n if input_shape == (28, 28, 1):\n # MNIST-like data\n return mnist_unet_generator(norm_type=norm_type), \\\n mnist_discriminator(norm_type=norm_type, target=False)\n elif input_shape == (256, 256, 3):\n # TODO: just use our unet_generator fn\n if residual_output is True or output_init != 0.02:\n raise NotImplementedError\n return pix2pix.unet_generator(output_channels=3, norm_type=norm_type), \\\n pix2pix.discriminator(norm_type=norm_type, target=False)\n else:\n return unet_generator(output_channels=3, input_shape=input_shape, norm_type=norm_type,\n output_init=output_init, residual_output=residual_output), \\\n pix2pix.discriminator(norm_type=norm_type, target=False)\n\n\ndef build_models(source_input_shape, target_input_shape, norm_type, output_init=0.02, residual_output=False):\n assert norm_type in ['instancenorm', 'batchnorm']\n generator_s_to_t, discriminator_s = get_models_from_input_shape(source_input_shape, norm_type, output_init, residual_output)\n generator_t_to_s, discriminator_t = get_models_from_input_shape(target_input_shape, norm_type, output_init, residual_output)\n\n return generator_s_to_t, generator_t_to_s, discriminator_s, discriminator_t\n\n\ndef build_optimizers(lr_gen=2e-4, lr_disc=2e-4,\n beta_1_gen=0.5, beta_1_disc=0.5,\n lr_scheduler='constant', lr_decay_steps=None):\n generator_g_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_gen,\n lr_decay_steps=lr_decay_steps),\n beta_1=beta_1_gen)\n generator_f_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_gen,\n lr_decay_steps=lr_decay_steps),\n beta_1=beta_1_gen)\n\n discriminator_x_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_disc,\n lr_decay_steps=lr_decay_steps),\n beta_1=beta_1_disc)\n discriminator_y_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_disc,\n lr_decay_steps=lr_decay_steps),\n beta_1=beta_1_disc)\n\n return generator_g_optimizer, generator_f_optimizer, discriminator_x_optimizer, discriminator_y_optimizer\n\n\ndef create_cyclegan_data_generator(source_dataset, target_dataset, batch_size, augmentations,\n dataflow, cache_dir):\n if dataflow == 'disk_cached':\n cache_dir = cache_dir + datetime.datetime.now().strftime('%d_%m_%y__%H_%M_%S')\n # Shuffle hangs sometimes (e.g. for horse2zebra)\n return create_paired_direct_dataflow(source_dataset, target_dataset, batch_size,\n augmentations, x_only=True,\n cache_dir1=cache_dir + '1',\n cache_dir2=cache_dir + '2',\n shuffle=True)\n elif dataflow == 'in_memory':\n return create_paired_parallel_dataflow_via_numpy(source_dataset, target_dataset,\n batch_size, augmentations, x_only=True)\n else:\n raise NotImplementedError\n\n\ndef generate_and_log_one_image_batch(data_generator,\n generator_g,\n generator_f,\n step):\n # Grab a batch from the dataset\n for real_x, real_y in data_generator:\n # Convert to tensors\n real_x, real_y = tf.convert_to_tensor(real_x), tf.convert_to_tensor(real_y)\n\n # Compute the fake examples\n fake_y = generator_g(real_x, training=True)\n fake_x = generator_f(real_y, training=True)\n\n # Cycle the fake examples\n cycled_x = generator_f(fake_y, training=True)\n cycled_y = generator_g(fake_x, training=True)\n\n # Compute the identity examples\n same_x = generator_f(real_x, training=True)\n same_y = generator_g(real_y, training=True)\n\n # Log everything to Weights and Biases\n wandb.log({'test/real_x': wandb.Image(gallery(real_x.numpy() * 0.5 + 0.5)),\n 'test/fake_x': wandb.Image(gallery(fake_x.numpy() * 0.5 + 0.5)),\n 'test/cycled_x': wandb.Image(gallery(cycled_x.numpy() * 0.5 + 0.5)),\n 'test/same_x': wandb.Image(gallery(same_x.numpy() * 0.5 + 0.5)),\n 'test/real_y': wandb.Image(gallery(real_y.numpy() * 0.5 + 0.5)),\n 'test/fake_y': wandb.Image(gallery(fake_y.numpy() * 0.5 + 0.5)),\n 'test/cycled_y': wandb.Image(gallery(cycled_y.numpy() * 0.5 + 0.5)),\n 'test/same_y': wandb.Image(gallery(same_y.numpy() * 0.5 + 0.5))}, step=step)\n\n # Break after a single batch: note, this will not run if you remove the break due to wandb reasons (ask Karan)\n break\n\n\nif __name__ == '__main__':\n buffer = ReplayBuffer(1)\n buffer.add([1])\n buffer.add([2])\n buffer.add([3])\n print(buffer.get_batch(1))\n print(buffer.get_batch(1))\n print(buffer.get_batch(1))\n buffer.add([4])\n print(buffer.get_batch(1))\n print(buffer.buffer)\n\n buffer = ReplayBuffer(1)\n buffer.add(tf.convert_to_tensor([1]))\n buffer.add(tf.convert_to_tensor([2]))\n buffer.add(tf.convert_to_tensor([3]))\n print(tf.convert_to_tensor(buffer.get_batch(1)))\n print(buffer.get_batch(1))\n print(buffer.get_batch(1))\n buffer.add(tf.convert_to_tensor([4]))\n print(buffer.get_batch(1))\n print(buffer.buffer)\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.shape",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.random.uniform",
"tensorflow.ones_like",
"tensorflow.keras.losses.BinaryCrossentropy",
"tensorflow.math.reduce_std",
"tensorflow.zeros_like",
"tensorflow.abs",
"tensorflow.GradientTape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"1.10",
"1.12"
]
}
] |
globusgenomics/galaxy | [
"7caf74d9700057587b3e3434c64e82c5b16540f1",
"7caf74d9700057587b3e3434c64e82c5b16540f1",
"7caf74d9700057587b3e3434c64e82c5b16540f1",
"7caf74d9700057587b3e3434c64e82c5b16540f1",
"7caf74d9700057587b3e3434c64e82c5b16540f1",
"7caf74d9700057587b3e3434c64e82c5b16540f1",
"7caf74d9700057587b3e3434c64e82c5b16540f1",
"7caf74d9700057587b3e3434c64e82c5b16540f1",
"7caf74d9700057587b3e3434c64e82c5b16540f1",
"7caf74d9700057587b3e3434c64e82c5b16540f1",
"7caf74d9700057587b3e3434c64e82c5b16540f1",
"7caf74d9700057587b3e3434c64e82c5b16540f1",
"7caf74d9700057587b3e3434c64e82c5b16540f1",
"7caf74d9700057587b3e3434c64e82c5b16540f1"
] | [
"tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/lsmr.py",
"tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/spatial/__init__.py",
"tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/signal/tests/test_filter_design.py",
"tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/sparse/lil.py",
"tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/optimize/_basinhopping.py",
"tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/fftpack/tests/test_basic.py",
"tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_connected_components.py",
"tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/optimize/nnls.py",
"tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/lib/blas/__init__.py",
"tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/signal/tests/test_array_tools.py",
"tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp.py",
"tools/intogen/runtime/pyenv/lib/python2.7/site-packages/pandas/tools/tile.py",
"tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/interpolate/__init__.py",
"tools/intogen/runtime/pyenv/lib/python2.7/site-packages/pandas/core/panelnd.py"
] | [
"\"\"\"\nCopyright (C) 2010 David Fong and Michael Saunders\n\nLSMR uses an iterative method.\n\n07 Jun 2010: Documentation updated\n03 Jun 2010: First release version in Python\n\nDavid Chin-lung Fong [email protected]\nInstitute for Computational and Mathematical Engineering\nStanford University\n\nMichael Saunders [email protected]\nSystems Optimization Laboratory\nDept of MS&E, Stanford University.\n\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\n__all__ = ['lsmr']\n\nfrom numpy import zeros, infty\nfrom numpy.linalg import norm\nfrom math import sqrt\nfrom scipy.sparse.linalg.interface import aslinearoperator\n\nfrom .lsqr import _sym_ortho\n\ndef lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,\n maxiter=None, show=False):\n \"\"\"Iterative solver for least-squares problems.\n\n lsmr solves the system of linear equations ``Ax = b``. If the system\n is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.\n A is a rectangular matrix of dimension m-by-n, where all cases are\n allowed: m = n, m > n, or m < n. B is a vector of length m.\n The matrix A may be dense or sparse (usually sparse).\n\n .. versionadded:: 0.11.0\n\n Parameters\n ----------\n A : {matrix, sparse matrix, ndarray, LinearOperator}\n Matrix A in the linear system.\n b : (m,) ndarray\n Vector b in the linear system.\n damp : float\n Damping factor for regularized least-squares. `lsmr` solves\n the regularized least-squares problem::\n\n min ||(b) - ( A )x||\n ||(0) (damp*I) ||_2\n\n where damp is a scalar. If damp is None or 0, the system\n is solved without regularization.\n atol, btol : float\n Stopping tolerances. `lsmr` continues iterations until a\n certain backward error estimate is smaller than some quantity\n depending on atol and btol. Let ``r = b - Ax`` be the\n residual vector for the current approximate solution ``x``.\n If ``Ax = b`` seems to be consistent, ``lsmr`` terminates\n when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.\n Otherwise, lsmr terminates when ``norm(A^{T} r) <=\n atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (say),\n the final ``norm(r)`` should be accurate to about 6\n digits. (The final x will usually have fewer correct digits,\n depending on ``cond(A)`` and the size of LAMBDA.) If `atol`\n or `btol` is None, a default value of 1.0e-6 will be used.\n Ideally, they should be estimates of the relative error in the\n entries of A and B respectively. For example, if the entries\n of `A` have 7 correct digits, set atol = 1e-7. This prevents\n the algorithm from doing unnecessary work beyond the\n uncertainty of the input data.\n conlim : float\n `lsmr` terminates if an estimate of ``cond(A)`` exceeds\n `conlim`. For compatible systems ``Ax = b``, conlim could be\n as large as 1.0e+12 (say). For least-squares problems,\n `conlim` should be less than 1.0e+8. If `conlim` is None, the\n default value is 1e+8. Maximum precision can be obtained by\n setting ``atol = btol = conlim = 0``, but the number of\n iterations may then be excessive.\n maxiter : int\n `lsmr` terminates if the number of iterations reaches\n `maxiter`. The default is ``maxiter = min(m, n)``. For\n ill-conditioned systems, a larger value of `maxiter` may be\n needed.\n show : bool\n Print iterations logs if ``show=True``.\n\n Returns\n -------\n x : ndarray of float\n Least-square solution returned.\n istop : int\n istop gives the reason for stopping::\n\n istop = 0 means x=0 is a solution.\n = 1 means x is an approximate solution to A*x = B,\n according to atol and btol.\n = 2 means x approximately solves the least-squares problem\n according to atol.\n = 3 means COND(A) seems to be greater than CONLIM.\n = 4 is the same as 1 with atol = btol = eps (machine\n precision)\n = 5 is the same as 2 with atol = eps.\n = 6 is the same as 3 with CONLIM = 1/eps.\n = 7 means ITN reached maxiter before the other stopping\n conditions were satisfied.\n\n itn : int\n Number of iterations used.\n normr : float\n ``norm(b-Ax)``\n normar : float\n ``norm(A^T (b - Ax))``\n norma : float\n ``norm(A)``\n conda : float\n Condition number of A.\n normx : float\n ``norm(x)``\n\n References\n ----------\n .. [1] D. C.-L. Fong and M. A. Saunders,\n \"LSMR: An iterative algorithm for sparse least-squares problems\",\n SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.\n http://arxiv.org/abs/1006.0758\n .. [2] LSMR Software, http://www.stanford.edu/~clfong/lsmr.html\n\n \"\"\"\n\n A = aslinearoperator(A)\n b = b.squeeze()\n\n msg=('The exact solution is x = 0 ',\n 'Ax - b is small enough, given atol, btol ',\n 'The least-squares solution is good enough, given atol ',\n 'The estimate of cond(Abar) has exceeded conlim ',\n 'Ax - b is small enough for this machine ',\n 'The least-squares solution is good enough for this machine',\n 'Cond(Abar) seems to be too large for this machine ',\n 'The iteration limit has been reached ')\n\n hdg1 = ' itn x(1) norm r norm A''r'\n hdg2 = ' compatible LS norm A cond A'\n pfreq = 20 # print frequency (for repeating the heading)\n pcount = 0 # print counter\n\n m, n = A.shape\n\n # stores the num of singular values\n minDim = min([m, n])\n\n if maxiter is None:\n maxiter = minDim\n\n if show:\n print(' ')\n print('LSMR Least-squares solution of Ax = b\\n')\n print('The matrix A has %8g rows and %8g cols' % (m, n))\n print('damp = %20.14e\\n' % (damp))\n print('atol = %8.2e conlim = %8.2e\\n' % (atol, conlim))\n print('btol = %8.2e maxiter = %8g\\n' % (btol, maxiter))\n\n u = b\n beta = norm(u)\n\n v = zeros(n)\n alpha = 0\n\n if beta > 0:\n u = (1 / beta) * u\n v = A.rmatvec(u)\n alpha = norm(v)\n\n if alpha > 0:\n v = (1 / alpha) * v\n\n\n # Initialize variables for 1st iteration.\n\n itn = 0\n zetabar = alpha * beta\n alphabar = alpha\n rho = 1\n rhobar = 1\n cbar = 1\n sbar = 0\n\n h = v.copy()\n hbar = zeros(n)\n x = zeros(n)\n\n # Initialize variables for estimation of ||r||.\n\n betadd = beta\n betad = 0\n rhodold = 1\n tautildeold = 0\n thetatilde = 0\n zeta = 0\n d = 0\n\n # Initialize variables for estimation of ||A|| and cond(A)\n\n normA2 = alpha * alpha\n maxrbar = 0\n minrbar = 1e+100\n normA = sqrt(normA2)\n condA = 1\n normx = 0\n\n # Items for use in stopping rules.\n normb = beta\n istop = 0\n ctol = 0\n if conlim > 0:\n ctol = 1 / conlim\n normr = beta\n\n # Reverse the order here from the original matlab code because\n # there was an error on return when arnorm==0\n normar = alpha * beta\n if normar == 0:\n if show:\n print(msg[0])\n return x, istop, itn, normr, normar, normA, condA, normx\n\n if show:\n print(' ')\n print(hdg1, hdg2)\n test1 = 1\n test2 = alpha / beta\n str1 = '%6g %12.5e' % (itn, x[0])\n str2 = ' %10.3e %10.3e' % (normr, normar)\n str3 = ' %8.1e %8.1e' % (test1, test2)\n print(''.join([str1, str2, str3]))\n\n # Main iteration loop.\n while itn < maxiter:\n itn = itn + 1\n\n # Perform the next step of the bidiagonalization to obtain the\n # next beta, u, alpha, v. These satisfy the relations\n # beta*u = a*v - alpha*u,\n # alpha*v = A'*u - beta*v.\n\n u = A.matvec(v) - alpha * u\n beta = norm(u)\n\n if beta > 0:\n u = (1 / beta) * u\n v = A.rmatvec(u) - beta * v\n alpha = norm(v)\n if alpha > 0:\n v = (1 / alpha) * v\n\n # At this point, beta = beta_{k+1}, alpha = alpha_{k+1}.\n\n # Construct rotation Qhat_{k,2k+1}.\n\n chat, shat, alphahat = _sym_ortho(alphabar, damp)\n\n # Use a plane rotation (Q_i) to turn B_i to R_i\n\n rhoold = rho\n c, s, rho = _sym_ortho(alphahat, beta)\n thetanew = s*alpha\n alphabar = c*alpha\n\n # Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar\n\n rhobarold = rhobar\n zetaold = zeta\n thetabar = sbar * rho\n rhotemp = cbar * rho\n cbar, sbar, rhobar = _sym_ortho(cbar * rho, thetanew)\n zeta = cbar * zetabar\n zetabar = - sbar * zetabar\n\n # Update h, h_hat, x.\n\n hbar = h - (thetabar * rho / (rhoold * rhobarold)) * hbar\n x = x + (zeta / (rho * rhobar)) * hbar\n h = v - (thetanew / rho) * h\n\n # Estimate of ||r||.\n\n # Apply rotation Qhat_{k,2k+1}.\n betaacute = chat * betadd\n betacheck = -shat * betadd\n\n # Apply rotation Q_{k,k+1}.\n betahat = c * betaacute\n betadd = -s * betaacute\n\n # Apply rotation Qtilde_{k-1}.\n # betad = betad_{k-1} here.\n\n thetatildeold = thetatilde\n ctildeold, stildeold, rhotildeold = _sym_ortho(rhodold, thetabar)\n thetatilde = stildeold* rhobar\n rhodold = ctildeold * rhobar\n betad = - stildeold * betad + ctildeold * betahat\n\n # betad = betad_k here.\n # rhodold = rhod_k here.\n\n tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold\n taud = (zeta - thetatilde * tautildeold) / rhodold\n d = d + betacheck * betacheck\n normr = sqrt(d + (betad - taud)**2 + betadd * betadd)\n\n # Estimate ||A||.\n normA2 = normA2 + beta * beta\n normA = sqrt(normA2)\n normA2 = normA2 + alpha * alpha\n\n # Estimate cond(A).\n maxrbar = max(maxrbar, rhobarold)\n if itn > 1:\n minrbar= min(minrbar, rhobarold)\n condA = max(maxrbar, rhotemp) / min(minrbar, rhotemp)\n\n # Test for convergence.\n\n # Compute norms for convergence testing.\n normar = abs(zetabar)\n normx = norm(x)\n\n # Now use these norms to estimate certain other quantities,\n # some of which will be small near a solution.\n\n test1 = normr / normb\n if (normA * normr) != 0:\n test2 = normar / (normA * normr)\n else:\n test2 = infty\n test3 = 1 / condA\n t1 = test1 / (1 + normA * normx / normb)\n rtol = btol + atol * normA * normx / normb\n\n # The following tests guard against extremely small values of\n # atol, btol or ctol. (The user may have set any or all of\n # the parameters atol, btol, conlim to 0.)\n # The effect is equivalent to the normAl tests using\n # atol = eps, btol = eps, conlim = 1/eps.\n\n if itn >= maxiter:\n istop = 7\n if 1 + test3 <= 1:\n istop = 6\n if 1 + test2 <= 1:\n istop = 5\n if 1 + t1 <= 1:\n istop = 4\n\n # Allow for tolerances set by the user.\n\n if test3 <= ctol:\n istop = 3\n if test2 <= atol:\n istop = 2\n if test1 <= rtol:\n istop = 1\n\n # See if it is time to print something.\n\n if show:\n if (n <= 40) or (itn <= 10) or (itn >= maxiter - 10) or \\\n (itn % 10 == 0) or (test3 <= 1.1 * ctol) or \\\n (test2 <= 1.1 * atol) or (test1 <= 1.1 * rtol) or \\\n (istop != 0):\n\n if pcount >= pfreq:\n pcount = 0\n print(' ')\n print(hdg1, hdg2)\n pcount = pcount + 1\n str1 = '%6g %12.5e' % (itn, x[0])\n str2 = ' %10.3e %10.3e' % (normr, normar)\n str3 = ' %8.1e %8.1e' % (test1, test2)\n str4 = ' %8.1e %8.1e' % (normA, condA)\n print(''.join([str1, str2, str3, str4]))\n\n if istop > 0:\n break\n\n # Print the stopping condition.\n\n if show:\n print(' ')\n print('LSMR finished')\n print(msg[istop])\n print('istop =%8g normr =%8.1e' % (istop, normr))\n print(' normA =%8.1e normAr =%8.1e' % (normA, normar))\n print('itn =%8g condA =%8.1e' % (itn, condA))\n print(' normx =%8.1e' % (normx))\n print(str1, str2)\n print(str3, str4)\n\n return x, istop, itn, normr, normar, normA, condA, normx\n",
"\"\"\"\n=============================================================\nSpatial algorithms and data structures (:mod:`scipy.spatial`)\n=============================================================\n\nNearest-neighbor Queries\n========================\n.. autosummary::\n :toctree: generated/\n\n KDTree - *Class* for efficient nearest-neighbor queries\n cKDTree -- class for efficient nearest-neighbor queries (faster impl.)\n distance - *Module* containing many different distance measures\n\nDelaunay Triangulation, Convex Hulls and Voronoi Diagrams\n=========================================================\n\n.. autosummary::\n :toctree: generated/\n\n Delaunay -- compute Delaunay triangulation of input points\n ConvexHull -- compute a convex hull for input points\n Voronoi -- compute a Voronoi diagram hull from input points\n\nPlotting Helpers\n================\n\n.. autosummary::\n :toctree: generated/\n\n delaunay_plot_2d -- plot 2-D triangulation\n convex_hull_plot_2d -- plot 2-D convex hull\n voronoi_plot_2d -- plot 2-D voronoi diagram\n\n.. seealso:: :ref:`Tutorial <qhulltutorial>`\n\n\nSimplex representation\n======================\nThe simplices (triangles, tetrahedra, ...) appearing in the Delaunay\ntesselation (N-dim simplices), convex hull facets, and Voronoi ridges\n(N-1 dim simplices) are represented in the following scheme::\n\n tess = Delaunay(points)\n hull = ConvexHull(points)\n voro = Voronoi(points)\n\n # coordinates of the j-th vertex of the i-th simplex\n tess.points[tess.simplices[i, j], :] # tesselation element\n hull.points[hull.simplices[i, j], :] # convex hull facet\n voro.vertices[voro.ridge_vertices[i, j], :] # ridge between Voronoi cells\n\nFor Delaunay triangulations and convex hulls, the neighborhood\nstructure of the simplices satisfies the condition:\n\n ``tess.neighbors[i,j]`` is the neighboring simplex of the i-th\n simplex, opposite to the j-vertex. It is -1 in case of no\n neighbor.\n\nConvex hull facets also define a hyperplane equation:\n\n (hull.equations[i,:-1] * coord).sum() + hull.equations[i,-1] == 0\n\nSimilar hyperplane equations for the Delaunay triangulation correspond\nto the convex hull facets on the corresponding N+1 dimensional\nparaboloid.\n\nThe Delaunay triangulation objects offer a method for locating the\nsimplex containing a given point, and barycentric coordinate\ncomputations.\n\nFunctions\n---------\n\n.. autosummary::\n :toctree: generated/\n\n tsearch\n distance_matrix\n minkowski_distance\n minkowski_distance_p\n\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nfrom .kdtree import *\nfrom .ckdtree import *\nfrom .qhull import *\nfrom ._plotutils import *\n\n__all__ = [s for s in dir() if not s.startswith('_')]\n__all__ += ['distance']\n\nfrom . import distance\nfrom numpy.testing import Tester\ntest = Tester().test\nbench = Tester().bench\n",
"from __future__ import division, print_function, absolute_import\n\nimport warnings\n\nimport numpy as np\nfrom numpy.testing import TestCase, assert_array_almost_equal, \\\n assert_array_equal, assert_raises, assert_equal, assert_, \\\n run_module_suite\n\nfrom scipy.signal import tf2zpk, zpk2tf, BadCoefficients, freqz, normalize\n\n\nclass TestTf2zpk(TestCase):\n\n def test_simple(self):\n z_r = np.array([0.5, -0.5])\n p_r = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])\n # Sort the zeros/poles so that we don't fail the test if the order\n # changes\n z_r.sort()\n p_r.sort()\n b = np.poly(z_r)\n a = np.poly(p_r)\n\n z, p, k = tf2zpk(b, a)\n z.sort()\n p.sort()\n assert_array_almost_equal(z, z_r)\n assert_array_almost_equal(p, p_r)\n\n def test_bad_filter(self):\n \"\"\"Regression test for #651: better handling of badly conditioned\n filter coefficients.\"\"\"\n warnings.simplefilter(\"error\", BadCoefficients)\n try:\n assert_raises(BadCoefficients, tf2zpk, [1e-15], [1.0, 1.0])\n finally:\n warnings.simplefilter(\"always\", BadCoefficients)\n\n\nclass TestZpk2Tf(TestCase):\n\n def test_identity(self):\n \"\"\"Test the identity transfer function.\"\"\"\n z = []\n p = []\n k = 1.\n b, a = zpk2tf(z, p, k)\n b_r = np.array([1.]) # desired result\n a_r = np.array([1.]) # desired result\n # The test for the *type* of the return values is a regression\n # test for ticket #1095. In the case p=[], zpk2tf used to\n # return the scalar 1.0 instead of array([1.0]).\n assert_array_equal(b, b_r)\n assert_(isinstance(b, np.ndarray))\n assert_array_equal(a, a_r)\n assert_(isinstance(a, np.ndarray))\n\n\nclass TestFreqz(TestCase):\n\n def test_ticket1441(self):\n \"\"\"Regression test for ticket 1441.\"\"\"\n # Because freqz previously used arange instead of linspace,\n # when N was large, it would return one more point than\n # requested.\n N = 100000\n w, h = freqz([1.0], worN=N)\n assert_equal(w.shape, (N,))\n\n def test_basic(self):\n w, h = freqz([1.0], worN=8)\n assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8)\n assert_array_almost_equal(h, np.ones(8))\n\n def test_basic_whole(self):\n w, h = freqz([1.0], worN=8, whole=True)\n assert_array_almost_equal(w, 2 * np.pi * np.arange(8.0) / 8)\n assert_array_almost_equal(h, np.ones(8))\n\n def test_plot(self):\n\n def plot(w, h):\n assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8)\n assert_array_almost_equal(h, np.ones(8))\n\n assert_raises(ZeroDivisionError,\n freqz, [1.0], worN=8, plot=lambda w, h: 1 / 0)\n freqz([1.0], worN=8, plot=plot)\n\nclass TestNormalize(TestCase):\n\n def test_allclose(self):\n \"\"\"Test for false positive on allclose in normalize() in\n filter_design.py\"\"\"\n # Test to make sure the allclose call within signal.normalize does not\n # choose false positives. Then check against a known output from MATLAB\n # to make sure the fix doesn't break anything.\n \n # These are the coefficients returned from\n # `[b,a] = cheby1(8, 0.5, 0.048)'\n # in MATLAB. There are at least 15 significant figures in each\n # coefficient, so it makes sense to test for errors on the order of\n # 1e-13 (this can always be relaxed if different platforms have\n # different rounding errors)\n b_matlab = np.array([2.150733144728282e-11, 1.720586515782626e-10,\n 6.022052805239190e-10, 1.204410561047838e-09,\n 1.505513201309798e-09, 1.204410561047838e-09,\n 6.022052805239190e-10, 1.720586515782626e-10,\n 2.150733144728282e-11])\n a_matlab = np.array([1.000000000000000e+00, -7.782402035027959e+00,\n 2.654354569747454e+01, -5.182182531666387e+01,\n 6.334127355102684e+01, -4.963358186631157e+01,\n 2.434862182949389e+01, -6.836925348604676e+00,\n 8.412934944449140e-01])\n \n # This is the input to signal.normalize after passing through the\n # equivalent steps in signal.iirfilter as was done for MATLAB\n b_norm_in = np.array([1.5543135865293012e-06, 1.2434508692234413e-05,\n 4.3520780422820447e-05, 8.7041560845640893e-05,\n 1.0880195105705122e-04, 8.7041560845640975e-05,\n 4.3520780422820447e-05, 1.2434508692234413e-05,\n 1.5543135865293012e-06])\n a_norm_in = np.array([7.2269025909127173e+04, -5.6242661430467968e+05,\n 1.9182761917308895e+06, -3.7451128364682454e+06,\n 4.5776121393762771e+06, -3.5869706138592605e+06,\n 1.7596511818472347e+06, -4.9409793515707983e+05,\n 6.0799461347219651e+04])\n \n b_output, a_output = normalize(b_norm_in, a_norm_in)\n \n # The test on b works for decimal=14 but the one for a does not. For\n # the sake of consistency, both of these are decimal=13. If something\n # breaks on another platform, it is probably fine to relax this lower.\n assert_array_almost_equal(b_matlab, b_output, decimal=13)\n assert_array_almost_equal(a_matlab, a_output, decimal=13)\n\nif __name__ == \"__main__\":\n run_module_suite()\n",
"\"\"\"LInked List sparse matrix class\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\n__docformat__ = \"restructuredtext en\"\n\n__all__ = ['lil_matrix','isspmatrix_lil']\n\nfrom bisect import bisect_left\n\nimport numpy as np\nfrom scipy.lib.six.moves import xrange\n\nfrom .base import spmatrix, isspmatrix\nfrom .sputils import getdtype, isshape, issequence, isscalarlike\n\nfrom warnings import warn\nfrom .base import SparseEfficiencyWarning\n\n\nclass lil_matrix(spmatrix):\n \"\"\"Row-based linked list sparse matrix\n\n This is an efficient structure for constructing sparse\n matrices incrementally.\n\n This can be instantiated in several ways:\n lil_matrix(D)\n with a dense matrix or rank-2 ndarray D\n\n lil_matrix(S)\n with another sparse matrix S (equivalent to S.tolil())\n\n lil_matrix((M, N), [dtype])\n to construct an empty matrix with shape (M, N)\n dtype is optional, defaulting to dtype='d'.\n\n Attributes\n ----------\n dtype : dtype\n Data type of the matrix\n shape : 2-tuple\n Shape of the matrix\n ndim : int\n Number of dimensions (this is always 2)\n nnz\n Number of nonzero elements\n data\n LIL format data array of the matrix\n rows\n LIL format row index array of the matrix\n\n Notes\n -----\n\n Sparse matrices can be used in arithmetic operations: they support\n addition, subtraction, multiplication, division, and matrix power.\n\n Advantages of the LIL format\n - supports flexible slicing\n - changes to the matrix sparsity structure are efficient\n\n Disadvantages of the LIL format\n - arithmetic operations LIL + LIL are slow (consider CSR or CSC)\n - slow column slicing (consider CSC)\n - slow matrix vector products (consider CSR or CSC)\n\n Intended Usage\n - LIL is a convenient format for constructing sparse matrices\n - once a matrix has been constructed, convert to CSR or\n CSC format for fast arithmetic and matrix vector operations\n - consider using the COO format when constructing large matrices\n\n Data Structure\n - An array (``self.rows``) of rows, each of which is a sorted\n list of column indices of non-zero elements.\n - The corresponding nonzero values are stored in similar\n fashion in ``self.data``.\n\n\n \"\"\"\n\n def __init__(self, arg1, shape=None, dtype=None, copy=False):\n spmatrix.__init__(self)\n self.dtype = getdtype(dtype, arg1, default=float)\n\n # First get the shape\n if isspmatrix(arg1):\n if isspmatrix_lil(arg1) and copy:\n A = arg1.copy()\n else:\n A = arg1.tolil()\n\n if dtype is not None:\n A = A.astype(dtype)\n\n self.shape = A.shape\n self.dtype = A.dtype\n self.rows = A.rows\n self.data = A.data\n elif isinstance(arg1,tuple):\n if isshape(arg1):\n if shape is not None:\n raise ValueError('invalid use of shape parameter')\n M, N = arg1\n self.shape = (M,N)\n self.rows = np.empty((M,), dtype=object)\n self.data = np.empty((M,), dtype=object)\n for i in range(M):\n self.rows[i] = []\n self.data[i] = []\n else:\n raise TypeError('unrecognized lil_matrix constructor usage')\n else:\n #assume A is dense\n try:\n A = np.asmatrix(arg1)\n except TypeError:\n raise TypeError('unsupported matrix type')\n else:\n from .csr import csr_matrix\n A = csr_matrix(A, dtype=dtype).tolil()\n\n self.shape = A.shape\n self.dtype = A.dtype\n self.rows = A.rows\n self.data = A.data\n\n def __iadd__(self,other):\n self[:,:] = self + other\n return self\n\n def __isub__(self,other):\n self[:,:] = self - other\n return self\n\n def __imul__(self,other):\n if isscalarlike(other):\n self[:,:] = self * other\n return self\n else:\n raise NotImplementedError\n\n def __itruediv__(self,other):\n if isscalarlike(other):\n self[:,:] = self / other\n return self\n else:\n raise NotImplementedError\n\n # Whenever the dimensions change, empty lists should be created for each\n # row\n\n def getnnz(self):\n return sum([len(rowvals) for rowvals in self.data])\n nnz = property(fget=getnnz)\n\n def __str__(self):\n val = ''\n for i, row in enumerate(self.rows):\n for pos, j in enumerate(row):\n val += \" %s\\t%s\\n\" % (str((i, j)), str(self.data[i][pos]))\n return val[:-1]\n\n def getrowview(self, i):\n \"\"\"Returns a view of the 'i'th row (without copying).\n \"\"\"\n new = lil_matrix((1, self.shape[1]), dtype=self.dtype)\n new.rows[0] = self.rows[i]\n new.data[0] = self.data[i]\n return new\n\n def getrow(self, i):\n \"\"\"Returns a copy of the 'i'th row.\n \"\"\"\n new = lil_matrix((1, self.shape[1]), dtype=self.dtype)\n new.rows[0] = self.rows[i][:]\n new.data[0] = self.data[i][:]\n return new\n\n def _get1(self, i, j):\n\n if i < 0:\n i += self.shape[0]\n if i < 0 or i >= self.shape[0]:\n raise IndexError('row index out of bounds')\n\n if j < 0:\n j += self.shape[1]\n if j < 0 or j >= self.shape[1]:\n raise IndexError('column index out of bounds')\n\n row = self.rows[i]\n data = self.data[i]\n\n pos = bisect_left(row, j)\n if pos != len(data) and row[pos] == j:\n return self.dtype.type(data[pos])\n else:\n return self.dtype.type(0)\n\n def _slicetoseq(self, j, shape):\n if j.start is not None and j.start < 0:\n start = shape + j.start\n elif j.start is None:\n start = 0\n else:\n start = j.start\n if j.stop is not None and j.stop < 0:\n stop = shape + j.stop\n elif j.stop is None:\n stop = shape\n else:\n stop = j.stop\n j = list(range(start, stop, j.step or 1))\n return j\n\n\n def __getitem__(self, index):\n \"\"\"Return the element(s) index=(i, j), where j may be a slice.\n This always returns a copy for consistency, since slices into\n Python lists return copies.\n \"\"\"\n try:\n i, j = index\n except (AssertionError, TypeError):\n raise IndexError('invalid index')\n\n if not np.isscalar(i) and np.isscalar(j):\n warn('Indexing into a lil_matrix with multiple indices is slow. '\n 'Pre-converting to CSC or CSR beforehand is more efficient.',\n SparseEfficiencyWarning)\n\n if np.isscalar(i):\n if np.isscalar(j):\n return self._get1(i, j)\n if isinstance(j, slice):\n j = self._slicetoseq(j, self.shape[1])\n if issequence(j):\n return self.__class__([[self._get1(i, jj) for jj in j]])\n elif issequence(i) and issequence(j):\n return self.__class__([[self._get1(ii, jj) for (ii, jj) in zip(i, j)]])\n elif issequence(i) or isinstance(i, slice):\n if isinstance(i, slice):\n i = self._slicetoseq(i, self.shape[0])\n if np.isscalar(j):\n return self.__class__([[self._get1(ii, j)] for ii in i])\n if isinstance(j, slice):\n j = self._slicetoseq(j, self.shape[1])\n if issequence(j):\n return self.__class__([[self._get1(ii, jj) for jj in j] for ii in i])\n else:\n raise IndexError\n\n def _insertat2(self, row, data, j, x):\n \"\"\" helper for __setitem__: insert a value in the given row/data at\n column j. \"\"\"\n\n if j < 0: #handle negative column indices\n j += self.shape[1]\n\n if j < 0 or j >= self.shape[1]:\n raise IndexError('column index out of bounds')\n\n if not np.isscalar(x):\n raise ValueError('setting an array element with a sequence')\n\n try:\n x = self.dtype.type(x)\n except:\n raise TypeError('Unable to convert value (%s) to dtype [%s]' % (x,self.dtype.name))\n\n pos = bisect_left(row, j)\n if x != 0:\n if pos == len(row):\n row.append(j)\n data.append(x)\n elif row[pos] != j:\n row.insert(pos, j)\n data.insert(pos, x)\n else:\n data[pos] = x\n else:\n if pos < len(row) and row[pos] == j:\n del row[pos]\n del data[pos]\n\n def _setitem_setrow(self, row, data, j, xrow, xdata, xcols):\n if isinstance(j, slice):\n j = self._slicetoseq(j, self.shape[1])\n if issequence(j):\n if xcols == len(j):\n for jj, xi in zip(j, xrange(xcols)):\n pos = bisect_left(xrow, xi)\n if pos != len(xdata) and xrow[pos] == xi:\n self._insertat2(row, data, jj, xdata[pos])\n else:\n self._insertat2(row, data, jj, 0)\n elif xcols == 1: # OK, broadcast across row\n if len(xdata) > 0 and xrow[0] == 0:\n val = xdata[0]\n else:\n val = 0\n for jj in j:\n self._insertat2(row, data, jj,val)\n else:\n raise IndexError('invalid index')\n elif np.isscalar(j):\n if not xcols == 1:\n raise ValueError('array dimensions are not compatible for copy')\n if len(xdata) > 0 and xrow[0] == 0:\n self._insertat2(row, data, j, xdata[0])\n else:\n self._insertat2(row, data, j, 0)\n else:\n raise ValueError('invalid column value: %s' % str(j))\n\n def __setitem__(self, index, x):\n try:\n i, j = index\n except (ValueError, TypeError):\n raise IndexError('invalid index')\n\n # shortcut for common case of single entry assign:\n if np.isscalar(x) and np.isscalar(i) and np.isscalar(j):\n self._insertat2(self.rows[i], self.data[i], j, x)\n return\n\n # shortcut for common case of full matrix assign:\n if isspmatrix(x):\n if isinstance(i, slice) and i == slice(None) and \\\n isinstance(j, slice) and j == slice(None):\n x = lil_matrix(x, dtype=self.dtype)\n self.rows = x.rows\n self.data = x.data\n return\n\n if isinstance(i, tuple): # can't index lists with tuple\n i = list(i)\n\n if np.isscalar(i):\n rows = [self.rows[i]]\n datas = [self.data[i]]\n else:\n rows = self.rows[i]\n datas = self.data[i]\n\n x = lil_matrix(x, copy=False)\n xrows, xcols = x.shape\n if xrows == len(rows): # normal rectangular copy\n for row, data, xrow, xdata in zip(rows, datas, x.rows, x.data):\n self._setitem_setrow(row, data, j, xrow, xdata, xcols)\n elif xrows == 1: # OK, broadcast down column\n for row, data in zip(rows, datas):\n self._setitem_setrow(row, data, j, x.rows[0], x.data[0], xcols)\n\n # needed to pass 'test_lil_sequence_assignement' unit test:\n # -- set row from column of entries --\n elif xcols == len(rows):\n x = x.T\n for row, data, xrow, xdata in zip(rows, datas, x.rows, x.data):\n self._setitem_setrow(row, data, j, xrow, xdata, xrows)\n else:\n raise IndexError('invalid index')\n\n def _mul_scalar(self, other):\n if other == 0:\n # Multiply by zero: return the zero matrix\n new = lil_matrix(self.shape, dtype=self.dtype)\n else:\n new = self.copy()\n # Multiply this scalar by every element.\n new.data[:] = [[val*other for val in rowvals] for\n rowvals in new.data]\n return new\n\n def __truediv__(self, other): # self / other\n if isscalarlike(other):\n new = self.copy()\n # Divide every element by this scalar\n new.data = [[val/other for val in rowvals] for\n rowvals in new.data]\n return new\n else:\n return self.tocsr() / other\n\n## This code doesn't work with complex matrices\n# def multiply(self, other):\n# \"\"\"Point-wise multiplication by another lil_matrix.\n#\n# \"\"\"\n# if np.isscalar(other):\n# return self.__mul__(other)\n#\n# if isspmatrix_lil(other):\n# reference,target = self,other\n#\n# if reference.shape != target.shape:\n# raise ValueError(\"Dimensions do not match.\")\n#\n# if len(reference.data) > len(target.data):\n# reference,target = target,reference\n#\n# new = lil_matrix(reference.shape)\n# for r,row in enumerate(reference.rows):\n# tr = target.rows[r]\n# td = target.data[r]\n# rd = reference.data[r]\n# L = len(tr)\n# for c,column in enumerate(row):\n# ix = bisect_left(tr,column)\n# if ix < L and tr[ix] == column:\n# new.rows[r].append(column)\n# new.data[r].append(rd[c] * td[ix])\n# return new\n# else:\n# raise ValueError(\"Point-wise multiplication only allowed \"\n# \"with another lil_matrix.\")\n\n def copy(self):\n from copy import deepcopy\n new = lil_matrix(self.shape, dtype=self.dtype)\n new.data = deepcopy(self.data)\n new.rows = deepcopy(self.rows)\n return new\n\n def reshape(self,shape):\n new = lil_matrix(shape, dtype=self.dtype)\n j_max = self.shape[1]\n for i,row in enumerate(self.rows):\n for col,j in enumerate(row):\n new_r,new_c = np.unravel_index(i*j_max + j,shape)\n new[new_r,new_c] = self[i,j]\n return new\n\n def toarray(self, order=None, out=None):\n \"\"\"See the docstring for `spmatrix.toarray`.\"\"\"\n d = self._process_toarray_args(order, out)\n for i, row in enumerate(self.rows):\n for pos, j in enumerate(row):\n d[i, j] = self.data[i][pos]\n return d\n\n def transpose(self):\n return self.tocsr().transpose().tolil()\n\n def tolil(self, copy=False):\n if copy:\n return self.copy()\n else:\n return self\n\n def tocsr(self):\n \"\"\" Return Compressed Sparse Row format arrays for this matrix.\n \"\"\"\n\n indptr = np.asarray([len(x) for x in self.rows], dtype=np.intc)\n indptr = np.concatenate( (np.array([0], dtype=np.intc), np.cumsum(indptr)) )\n\n nnz = indptr[-1]\n\n indices = []\n for x in self.rows:\n indices.extend(x)\n indices = np.asarray(indices, dtype=np.intc)\n\n data = []\n for x in self.data:\n data.extend(x)\n data = np.asarray(data, dtype=self.dtype)\n\n from .csr import csr_matrix\n return csr_matrix((data, indices, indptr), shape=self.shape)\n\n def tocsc(self):\n \"\"\" Return Compressed Sparse Column format arrays for this matrix.\n \"\"\"\n return self.tocsr().tocsc()\n\n\ndef isspmatrix_lil( x ):\n return isinstance(x, lil_matrix)\n",
"\"\"\"\nbasinhopping: The basinhopping global optimization algorithm\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom numpy import cos, sin\nimport scipy.optimize\nimport collections\n\n__all__ = ['basinhopping']\n\n\nclass Storage(object):\n def __init__(self, x, f):\n \"\"\"\n Class used to store the lowest energy structure\n \"\"\"\n self._add(x, f)\n\n def _add(self, x, f):\n self.x = np.copy(x)\n self.f = f\n\n def update(self, x, f):\n if f < self.f:\n self._add(x, f)\n return True\n else:\n return False\n\n def get_lowest(self):\n return self.x, self.f\n\n\nclass BasinHoppingRunner(object):\n def __init__(self, x0, minimizer, step_taking, accept_tests, disp=False):\n self.x = np.copy(x0)\n self.minimizer = minimizer\n self.step_taking = step_taking\n self.accept_tests = accept_tests\n self.disp = disp\n\n self.nstep = 0\n\n #do initial minimization\n minres = minimizer(self.x)\n self.x = np.copy(minres.x)\n self.energy = minres.fun\n if self.disp:\n print(\"basinhopping step %d: f %g\" % (self.nstep, self.energy))\n\n #initialize storage class\n self.storage = Storage(self.x, self.energy)\n\n #initialize return object\n self.res = scipy.optimize.Result()\n if hasattr(minres, \"nfev\"):\n self.res.nfev = minres.nfev\n if hasattr(minres, \"njev\"):\n self.res.njev = minres.njev\n if hasattr(minres, \"nhev\"):\n self.res.nhev = minres.nhev\n\n def _monte_carlo_step(self):\n #Take a random step. Make a copy of x because the step_taking\n #algorithm might change x in place\n x_after_step = np.copy(self.x)\n x_after_step = self.step_taking(x_after_step)\n\n #do a local minimization\n minres = self.minimizer(x_after_step)\n x_after_quench = minres.x\n energy_after_quench = minres.fun\n if hasattr(minres, \"success\"):\n if not minres.success and self.disp:\n print(\"warning: basinhoppping: local minimization failure\")\n if hasattr(minres, \"nfev\"):\n self.res.nfev += minres.nfev\n if hasattr(minres, \"njev\"):\n self.res.njev += minres.njev\n if hasattr(minres, \"nhev\"):\n self.res.nhev += minres.nhev\n\n #accept the move based on self.accept_tests. If any test is false, than\n #reject the step. If any test returns the special value, the\n #string 'force accept', accept the step regardless.\n #This can be used to forcefully escape from a local minima if normal\n #basin hopping steps are not sufficient.\n accept = True\n for test in self.accept_tests:\n testres = test(f_new=energy_after_quench, x_new=x_after_quench,\n f_old=self.energy, x_old=self.x)\n if isinstance(testres, bool):\n if not testres:\n accept = False\n elif isinstance(testres, str):\n if testres == \"force accept\":\n accept = True\n break\n else:\n raise ValueError(\"accept test must return bool or string \"\n \"'force accept'. Type is\", type(testres))\n else:\n raise ValueError(\"accept test must return bool or string \"\n \"'force accept'. Type is\", type(testres))\n\n #Report the result of the acceptance test to the take step class. This\n #is for adaptive step taking\n if hasattr(self.step_taking, \"report\"):\n self.step_taking.report(accept, f_new=energy_after_quench,\n x_new=x_after_quench, f_old=self.energy,\n x_old=self.x)\n\n return x_after_quench, energy_after_quench, accept\n\n def one_cycle(self):\n self.nstep += 1\n new_global_min = False\n\n xtrial, energy_trial, accept = self._monte_carlo_step()\n\n if accept:\n self.energy = energy_trial\n self.x = np.copy(xtrial)\n new_global_min = self.storage.update(self.x, self.energy)\n\n #print some information\n if self.disp:\n self.print_report(energy_trial, accept)\n if new_global_min:\n print(\"found new global minimum on step %d with function\"\n \" value %g\" % (self.nstep, self.energy))\n\n #save some variables as BasinHoppingRunner attributes\n self.xtrial = xtrial\n self.energy_trial = energy_trial\n self.accept = accept\n\n return new_global_min\n\n def print_report(self, energy_trial, accept):\n xlowest, energy_lowest = self.storage.get_lowest()\n print(\"basinhopping step %d: f %g trial_f %g accepted %d \"\n \" lowest_f %g\" % (self.nstep, self.energy, energy_trial,\n accept, energy_lowest))\n\n\nclass AdaptiveStepsize(object):\n \"\"\"\n Class to implement adaptive stepsize.\n\n This class wraps the step taking class and modifies the stepsize to\n ensure the true acceptance rate is as close as possible to the target.\n\n Parameters\n ----------\n takestep : callable\n The step taking routine. Must contain modifiable attribute\n takestep.stepsize\n accept_rate : float, optional\n The target step acceptance rate\n interval : int, optional\n Interval for how often to update the stepsize\n factor : float, optional\n The step size is multiplied or divided by this factor upon each\n update.\n verbose : bool, optional\n Print information about each update\n\n \"\"\"\n def __init__(self, takestep, accept_rate=0.5, interval=50, factor=0.9,\n verbose=True):\n self.takestep = takestep\n self.target_accept_rate = accept_rate\n self.interval = interval\n self.factor = factor\n self.verbose = verbose\n\n self.nstep = 0\n self.nstep_tot = 0\n self.naccept = 0\n\n def __call__(self, x):\n return self.take_step(x)\n\n def _adjust_step_size(self):\n old_stepsize = self.takestep.stepsize\n accept_rate = float(self.naccept) / self.nstep\n if accept_rate > self.target_accept_rate:\n #We're accepting too many steps. This generally means we're\n #trapped in a basin. Take bigger steps\n self.takestep.stepsize /= self.factor\n else:\n #We're not accepting enough steps. Take smaller steps\n self.takestep.stepsize *= self.factor\n if self.verbose:\n print(\"adaptive stepsize: acceptance rate %f target %f new \"\n \"stepsize %g old stepsize %g\" % (accept_rate,\n self.target_accept_rate, self.takestep.stepsize,\n old_stepsize))\n\n def take_step(self, x):\n self.nstep += 1\n self.nstep_tot += 1\n if self.nstep % self.interval == 0:\n self._adjust_step_size()\n return self.takestep(x)\n\n def report(self, accept, **kwargs):\n \"called by basinhopping to report the result of the step\"\n if accept:\n self.naccept += 1\n\n\nclass RandomDisplacement(object):\n \"\"\"\n Add a random displacement of maximum size, stepsize, to the coordinates\n\n update x inplace\n \"\"\"\n def __init__(self, stepsize=0.5):\n self.stepsize = stepsize\n\n def __call__(self, x):\n x += np.random.uniform(-self.stepsize, self.stepsize, np.shape(x))\n return x\n\n\nclass MinimizerWrapper(object):\n \"\"\"\n wrap a minimizer function as a minimizer class\n \"\"\"\n def __init__(self, minimizer, func=None, **kwargs):\n self.minimizer = minimizer\n self.func = func\n self.kwargs = kwargs\n\n def __call__(self, x0):\n if self.func is None:\n return self.minimizer(x0, **self.kwargs)\n else:\n return self.minimizer(self.func, x0, **self.kwargs)\n\n\nclass Metropolis(object):\n \"\"\"\n Metropolis acceptance criterion\n \"\"\"\n def __init__(self, T):\n self.beta = 1.0 / T\n\n def accept_reject(self, energy_new, energy_old):\n w = min(1.0, np.exp(-(energy_new - energy_old) * self.beta))\n rand = np.random.rand()\n return w >= rand\n\n def __call__(self, **kwargs):\n \"\"\"\n f_new and f_old are mandatory in kwargs\n \"\"\"\n return bool(self.accept_reject(kwargs[\"f_new\"],\n kwargs[\"f_old\"]))\n\n\ndef basinhopping(func, x0, niter=100, T=1.0, stepsize=0.5,\n minimizer_kwargs=None, take_step=None, accept_test=None,\n callback=None, interval=50, disp=False, niter_success=None):\n \"\"\"\n Find the global minimum of a function using the basin-hopping algorithm\n\n .. versionadded:: 0.12.0\n\n Parameters\n ----------\n func : callable ``f(x, *args)``\n Function to be optimized. ``args`` can be passed as an optional item\n in the dict ``minimizer_kwargs``\n x0 : ndarray\n Initial guess.\n niter : integer, optional\n The number of basin hopping iterations\n T : float, optional\n The \"temperature\" parameter for the accept or reject criterion. Higher\n \"temperatures\" mean that larger jumps in function value will be\n accepted. For best results ``T`` should be comparable to the\n separation\n (in function value) between local minima.\n stepsize : float, optional\n initial step size for use in the random displacement.\n minimizer_kwargs : dict, optional\n Extra keyword arguments to be passed to the minimizer\n ``scipy.optimize.minimize()`` Some important options could be:\n method : str\n The minimization method (e.g. ``\"L-BFGS-B\"``)\n args : tuple\n Extra arguments passed to the objective function (``func``) and\n its derivatives (Jacobian, Hessian).\n\n take_step : callable ``take_step(x)``, optional\n Replace the default step taking routine with this routine. The default\n step taking routine is a random displacement of the coordinates, but\n other step taking algorithms may be better for some systems.\n ``take_step`` can optionally have the attribute ``take_step.stepsize``.\n If this attribute exists, then ``basinhopping`` will adjust\n ``take_step.stepsize`` in order to try to optimize the global minimum\n search.\n accept_test : callable, ``accept_test(f_new=f_new, x_new=x_new, f_old=fold, x_old=x_old)``, optional\n Define a test which will be used to judge whether or not to accept the\n step. This will be used in addition to the Metropolis test based on\n \"temperature\" ``T``. The acceptable return values are ``True``,\n ``False``, or ``\"force accept\"``. If the latter, then this will\n override any other tests in order to accept the step. This can be\n used, for example, to forcefully escape from a local minimum that\n ``basinhopping`` is trapped in.\n callback : callable, ``callback(x, f, accept)``, optional\n A callback function which will be called for all minimum found. ``x``\n and ``f`` are the coordinates and function value of the trial minima,\n and ``accept`` is whether or not that minima was accepted. This can be\n used, for example, to save the lowest N minima found. Also,\n ``callback`` can be used to specify a user defined stop criterion by\n optionally returning ``True`` to stop the ``basinhopping`` routine.\n interval : integer, optional\n interval for how often to update the ``stepsize``\n disp : bool, optional\n Set to ``True`` to print status messages\n niter_success : integer, optional\n Stop the run if the global minimum candidate remains the same for this\n number of iterations.\n\n\n Returns\n -------\n res : Result\n The optimization result represented as a ``Result`` object. Important\n attributes are: ``x`` the solution array, ``fun`` the value of the\n function at the solution, and ``message`` which describes the cause of\n the termination. See `Result` for a description of other attributes.\n\n See Also\n --------\n minimize :\n The local minimization function called once for each basinhopping step.\n ``minimizer_kwargs`` is passed to this routine.\n\n Notes\n -----\n Basin-hopping is a stochastic algorithm which attempts to find the global\n minimum of a smooth scalar function of one or more variables [1]_ [2]_ [3]_\n [4]_. The algorithm in its current form was described by David Wales and\n Jonathan Doye [2]_ http://www-wales.ch.cam.ac.uk/.\n\n The algorithm is iterative with each cycle composed of the following\n features\n\n 1) random perturbation of the coordinates\n\n 2) local minimization\n\n 3) accept or reject the new coordinates based on the minimized function\n value\n\n The acceptance test used here is the Metropolis criterion of standard Monte\n Carlo algorithms, although there are many other possibilities [3]_.\n\n This global minimization method has been shown to be extremely efficient\n for a wide variety of problems in physics and chemistry. It is\n particularly useful when the function has many minima separated by large\n barriers. See the Cambridge Cluster Database\n http://www-wales.ch.cam.ac.uk/CCD.html for databases of molecular systems\n that have been optimized primarily using basin-hopping. This database\n includes minimization problems exceeding 300 degrees of freedom.\n\n See the free software program GMIN (http://www-wales.ch.cam.ac.uk/GMIN) for\n a Fortran implementation of basin-hopping. This implementation has many\n different variations of the procedure described above, including more\n advanced step taking algorithms and alternate acceptance criterion.\n\n For stochastic global optimization there is no way to determine if the true\n global minimum has actually been found. Instead, as a consistency check,\n the algorithm can be run from a number of different random starting points\n to ensure the lowest minimum found in each example has converged to the\n global minimum. For this reason ``basinhopping`` will by default simply\n run for the number of iterations ``niter`` and return the lowest minimum\n found. It is left to the user to ensure that this is in fact the global\n minimum.\n\n Choosing ``stepsize``: This is a crucial parameter in ``basinhopping`` and\n depends on the problem being solved. Ideally it should be comparable to\n the typical separation between local minima of the function being\n optimized. ``basinhopping`` will, by default, adjust ``stepsize`` to find\n an optimal value, but this may take many iterations. You will get quicker\n results if you set a sensible value for ``stepsize``.\n\n Choosing ``T``: The parameter ``T`` is the temperature used in the\n metropolis criterion. Basinhopping steps are accepted with probability\n ``1`` if ``func(xnew) < func(xold)``, or otherwise with probability::\n\n exp( -(func(xnew) - func(xold)) / T )\n\n So, for best results, ``T`` should to be comparable to the typical\n difference in function value between between local minima\n\n References\n ----------\n .. [1] Wales, David J. 2003, Energy Landscapes, Cambridge University Press,\n Cambridge, UK.\n .. [2] Wales, D J, and Doye J P K, Global Optimization by Basin-Hopping and\n the Lowest Energy Structures of Lennard-Jones Clusters Containing up to\n 110 Atoms. Journal of Physical Chemistry A, 1997, 101, 5111.\n .. [3] Li, Z. and Scheraga, H. A., Monte Carlo-minimization approach to the\n multiple-minima problem in protein folding, Proc. Natl. Acad. Sci. USA,\n 1987, 84, 6611.\n .. [4] Wales, D. J. and Scheraga, H. A., Global optimization of clusters,\n crystals, and biomolecules, Science, 1999, 285, 1368.\n\n Examples\n --------\n The following example is a one-dimensional minimization problem, with many\n local minima superimposed on a parabola.\n\n >>> func = lambda x: cos(14.5 * x - 0.3) + (x + 0.2) * x\n >>> x0=[1.]\n\n Basinhopping, internally, uses a local minimization algorithm. We will use\n the parameter ``minimizer_kwargs`` to tell basinhopping which algorithm to\n use and how to set up that minimizer. This parameter will be passed to\n ``scipy.optimize.minimize()``.\n\n >>> minimizer_kwargs = {\"method\": \"BFGS\"}\n >>> ret = basinhopping(func, x0, minimizer_kwargs=minimizer_kwargs,\n ... niter=200)\n >>> print(\"global minimum: x = %.4f, f(x0) = %.4f\" % (ret.x, ret.fun))\n global minimum: x = -0.1951, f(x0) = -1.0009\n\n Next consider a two-dimensional minimization problem. Also, this time we\n will use gradient information to significantly speed up the search.\n\n >>> def func2d(x):\n ... f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] +\n ... 0.2) * x[0]\n ... df = np.zeros(2)\n ... df[0] = -14.5 * sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2\n ... df[1] = 2. * x[1] + 0.2\n ... return f, df\n\n We'll also use a different local minimization algorithm. Also we must tell\n the minimizer that our function returns both energy and gradient (jacobian)\n\n >>> minimizer_kwargs = {\"method\":\"L-BFGS-B\", \"jac\":True}\n >>> x0 = [1.0, 1.0]\n >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,\n ... niter=200)\n >>> print(\"global minimum: x = [%.4f, %.4f], f(x0) = %.4f\" % (ret.x[0],\n ... ret.x[1],\n ... ret.fun))\n global minimum: x = [-0.1951, -0.1000], f(x0) = -1.0109\n\n\n Here is an example using a custom step taking routine. Imagine you want\n the first coordinate to take larger steps then the rest of the coordinates.\n This can be implemented like so:\n\n >>> class MyTakeStep(object):\n ... def __init__(self, stepsize=0.5):\n ... self.stepsize = stepsize\n ... def __call__(self, x):\n ... s = self.stepsize\n ... x[0] += np.random.uniform(-2.*s, 2.*s)\n ... x[1:] += np.random.uniform(-s, s, x[1:].shape)\n ... return x\n\n Since ``MyTakeStep.stepsize`` exists basinhopping will adjust the magnitude\n of ``stepsize`` to optimize the search. We'll use the same 2-D function as\n before\n\n >>> mytakestep = MyTakeStep()\n >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,\n ... niter=200, take_step=mytakestep)\n >>> print(\"global minimum: x = [%.4f, %.4f], f(x0) = %.4f\" % (ret.x[0],\n ... ret.x[1],\n ... ret.fun))\n global minimum: x = [-0.1951, -0.1000], f(x0) = -1.0109\n\n\n Now let's do an example using a custom callback function which prints the\n value of every minimum found\n\n >>> def print_fun(x, f, accepted):\n ... print(\"at minima %.4f accepted %d\" % (f, int(accepted)))\n\n We'll run it for only 10 basinhopping steps this time.\n\n >>> np.random.seed(1)\n >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,\n ... niter=10, callback=print_fun)\n at minima 0.4159 accepted 1\n at minima -0.9073 accepted 1\n at minima -0.1021 accepted 1\n at minima -0.1021 accepted 1\n at minima 0.9102 accepted 1\n at minima 0.9102 accepted 1\n at minima 2.2945 accepted 0\n at minima -0.1021 accepted 1\n at minima -1.0109 accepted 1\n at minima -1.0109 accepted 1\n\n\n The minima at -1.0109 is actually the global minimum, found already on the\n 8th iteration.\n\n Now let's implement bounds on the problem using a custom ``accept_test``:\n\n >>> class MyBounds(object):\n ... def __init__(self, xmax=[1.1,1.1], xmin=[-1.1,-1.1] ):\n ... self.xmax = np.array(xmax)\n ... self.xmin = np.array(xmin)\n ... def __call__(self, **kwargs):\n ... x = kwargs[\"x_new\"]\n ... tmax = bool(np.all(x <= self.xmax))\n ... tmin = bool(np.all(x >= self.xmin))\n ... return tmax and tmin\n\n >>> mybounds = MyBounds()\n >>> ret = basinhopping(func2d, x0, minimizer_kwargs=minimizer_kwargs,\n ... niter=10, accept_test=mybounds)\n\n \"\"\"\n x0 = np.array(x0)\n\n #set up minimizer\n if minimizer_kwargs is None:\n minimizer_kwargs = dict()\n wrapped_minimizer = MinimizerWrapper(scipy.optimize.minimize, func,\n **minimizer_kwargs)\n\n #set up step taking algorithm\n if take_step is not None:\n if not isinstance(take_step, collections.Callable):\n raise TypeError(\"take_step must be callable\")\n # if take_step.stepsize exists then use AdaptiveStepsize to control\n # take_step.stepsize\n if hasattr(take_step, \"stepsize\"):\n take_step_wrapped = AdaptiveStepsize(take_step, interval=interval,\n verbose=disp)\n else:\n take_step_wrapped = take_step\n else:\n #use default\n displace = RandomDisplacement(stepsize=stepsize)\n take_step_wrapped = AdaptiveStepsize(displace, interval=interval,\n verbose=disp)\n\n #set up accept tests\n if accept_test is not None:\n if not isinstance(accept_test, collections.Callable):\n raise TypeError(\"accept_test must be callable\")\n accept_tests = [accept_test]\n else:\n accept_tests = []\n ##use default\n metropolis = Metropolis(T)\n accept_tests.append(metropolis)\n\n if niter_success is None:\n niter_success = niter + 2\n\n bh = BasinHoppingRunner(x0, wrapped_minimizer, take_step_wrapped,\n accept_tests, disp=disp)\n\n #start main iteration loop\n count = 0\n message = [\"requested number of basinhopping iterations completed\"\n \" successfully\"]\n for i in range(niter):\n new_global_min = bh.one_cycle()\n\n if isinstance(callback, collections.Callable):\n #should we pass acopy of x?\n val = callback(bh.xtrial, bh.energy_trial, bh.accept)\n if val is not None:\n if val:\n message = [\"callback function requested stop early by\"\n \"returning True\"]\n break\n\n count += 1\n if new_global_min:\n count = 0\n elif count > niter_success:\n message = [\"success condition satisfied\"]\n break\n\n #prepare return object\n lowest = bh.storage.get_lowest()\n res = bh.res\n res.x = np.copy(lowest[0])\n res.fun = lowest[1]\n res.message = message\n res.nit = i + 1\n return res\n\n\ndef _test_func2d_nograd(x):\n f = (cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]\n + 1.010876184442655)\n return f\n\n\ndef _test_func2d(x):\n f = (cos(14.5 * x[0] - 0.3) + (x[0] + 0.2) * x[0] + cos(14.5 * x[1] -\n 0.3) + (x[1] + 0.2) * x[1] + x[0] * x[1] + 1.963879482144252)\n df = np.zeros(2)\n df[0] = -14.5 * sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2 + x[1]\n df[1] = -14.5 * sin(14.5 * x[1] - 0.3) + 2. * x[1] + 0.2 + x[0]\n return f, df\n\nif __name__ == \"__main__\":\n print(\"\\n\\nminimize a 2d function without gradient\")\n # minimum expected at ~[-0.195, -0.1]\n kwargs = {\"method\": \"L-BFGS-B\"}\n x0 = np.array([1.0, 1.])\n scipy.optimize.minimize(_test_func2d_nograd, x0, **kwargs)\n ret = basinhopping(_test_func2d_nograd, x0, minimizer_kwargs=kwargs,\n niter=200, disp=False)\n print(\"minimum expected at func([-0.195, -0.1]) = 0.0\")\n print(ret)\n\n print(\"\\n\\ntry a harder 2d problem\")\n kwargs = {\"method\": \"L-BFGS-B\", \"jac\": True}\n x0 = np.array([1.0, 1.0])\n ret = basinhopping(_test_func2d, x0, minimizer_kwargs=kwargs, niter=200,\n disp=False)\n print(\"minimum expected at ~, func([-0.19415263, -0.19415263]) = 0\")\n print(ret)\n",
"#!/usr/bin/env python\n# Created by Pearu Peterson, September 2002\n\"\"\" Test functions for fftpack.basic module\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\n__usage__ = \"\"\"\nBuild fftpack:\n python setup_fftpack.py build\nRun tests if scipy is installed:\n python -c 'import scipy;scipy.fftpack.test()'\nRun tests if fftpack is not installed:\n python tests/test_basic.py\n\"\"\"\n\nfrom numpy.testing import assert_, assert_equal, assert_array_almost_equal, \\\n assert_array_almost_equal_nulp, assert_raises, run_module_suite, \\\n TestCase, dec\nfrom scipy.fftpack import ifft,fft,fftn,ifftn,rfft,irfft, fft2\nfrom scipy.fftpack import _fftpack as fftpack\n\nfrom numpy import arange, add, array, asarray, zeros, dot, exp, pi,\\\n swapaxes, double, cdouble\nimport numpy as np\nimport numpy.fft\n\n# \"large\" composite numbers supported by FFTPACK\nLARGE_COMPOSITE_SIZES = [\n 2**13,\n 2**5 * 3**5,\n 2**3 * 3**3 * 5**2,\n]\nSMALL_COMPOSITE_SIZES = [\n 2,\n 2*3*5,\n 2*2*3*3,\n]\n# prime\nLARGE_PRIME_SIZES = [\n 2011\n]\nSMALL_PRIME_SIZES = [\n 29\n]\n\nfrom numpy.random import rand\ndef random(size):\n return rand(*size)\n\ndef get_mat(n):\n data = arange(n)\n data = add.outer(data,data)\n return data\n\ndef direct_dft(x):\n x = asarray(x)\n n = len(x)\n y = zeros(n,dtype=cdouble)\n w = -arange(n)*(2j*pi/n)\n for i in range(n):\n y[i] = dot(exp(i*w),x)\n return y\n\ndef direct_idft(x):\n x = asarray(x)\n n = len(x)\n y = zeros(n,dtype=cdouble)\n w = arange(n)*(2j*pi/n)\n for i in range(n):\n y[i] = dot(exp(i*w),x)/n\n return y\n\ndef direct_dftn(x):\n x = asarray(x)\n for axis in range(len(x.shape)):\n x = fft(x,axis=axis)\n return x\n\ndef direct_idftn(x):\n x = asarray(x)\n for axis in range(len(x.shape)):\n x = ifft(x,axis=axis)\n return x\n\ndef direct_rdft(x):\n x = asarray(x)\n n = len(x)\n w = -arange(n)*(2j*pi/n)\n r = zeros(n,dtype=double)\n for i in range(n//2+1):\n y = dot(exp(i*w),x)\n if i:\n r[2*i-1] = y.real\n if 2*i<n:\n r[2*i] = y.imag\n else:\n r[0] = y.real\n return r\n\ndef direct_irdft(x):\n x = asarray(x)\n n = len(x)\n x1 = zeros(n,dtype=cdouble)\n for i in range(n//2+1):\n if i:\n if 2*i<n:\n x1[i] = x[2*i-1] + 1j* x[2*i]\n x1[n-i] = x[2*i-1] - 1j* x[2*i]\n else:\n x1[i] = x[2*i-1]\n else:\n x1[0] = x[0]\n return direct_idft(x1).real\n\nclass _TestFFTBase(TestCase):\n def setUp(self):\n self.cdt = None\n self.rdt = None\n np.random.seed(1234)\n\n def test_definition(self):\n x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype = self.cdt)\n y = fft(x)\n self.assertTrue(y.dtype == self.cdt,\n \"Output dtype is %s, expected %s\" % (y.dtype, self.cdt))\n y1 = direct_dft(x)\n assert_array_almost_equal(y,y1)\n x = np.array([1,2,3,4+0j,5], dtype = self.cdt)\n assert_array_almost_equal(fft(x),direct_dft(x))\n\n def test_n_argument_real(self):\n x1 = np.array([1,2,3,4], dtype=self.rdt)\n x2 = np.array([1,2,3,4], dtype=self.rdt)\n y = fft([x1,x2],n=4)\n self.assertTrue(y.dtype == self.cdt,\n \"Output dtype is %s, expected %s\" % (y.dtype, self.cdt))\n assert_equal(y.shape,(2,4))\n assert_array_almost_equal(y[0],direct_dft(x1))\n assert_array_almost_equal(y[1],direct_dft(x2))\n\n def _test_n_argument_complex(self):\n x1 = np.array([1,2,3,4+1j], dtype=self.cdt)\n x2 = np.array([1,2,3,4+1j], dtype=self.cdt)\n y = fft([x1,x2],n=4)\n self.assertTrue(y.dtype == self.cdt,\n \"Output dtype is %s, expected %s\" % (y.dtype, self.cdt))\n assert_equal(y.shape,(2,4))\n assert_array_almost_equal(y[0],direct_dft(x1))\n assert_array_almost_equal(y[1],direct_dft(x2))\n\n def test_djbfft(self):\n for i in range(2,14):\n n = 2**i\n x = list(range(n))\n y = fftpack.zfft(x)\n y2 = numpy.fft.fft(x)\n assert_array_almost_equal(y,y2)\n y = fftpack.zrfft(x)\n assert_array_almost_equal(y,y2)\n\nclass TestDoubleFFT(_TestFFTBase):\n def setUp(self):\n self.cdt = np.cdouble\n self.rdt = np.double\n\nclass TestSingleFFT(_TestFFTBase):\n def setUp(self):\n self.cdt = np.complex64\n self.rdt = np.float32\n\n @dec.knownfailureif(True, \"single-precision FFT implementation is partially disabled, until accuracy issues with large prime powers are resolved\")\n def test_notice(self):\n pass\n\nclass _TestIFFTBase(TestCase):\n def setUp(self):\n np.random.seed(1234)\n\n def test_definition(self):\n x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt)\n y = ifft(x)\n y1 = direct_idft(x)\n self.assertTrue(y.dtype == self.cdt,\n \"Output dtype is %s, expected %s\" % (y.dtype, self.cdt))\n assert_array_almost_equal(y,y1)\n\n x = np.array([1,2,3,4+0j,5], self.cdt)\n assert_array_almost_equal(ifft(x),direct_idft(x))\n\n def test_definition_real(self):\n x = np.array([1,2,3,4,1,2,3,4], self.rdt)\n y = ifft(x)\n self.assertTrue(y.dtype == self.cdt,\n \"Output dtype is %s, expected %s\" % (y.dtype, self.cdt))\n y1 = direct_idft(x)\n assert_array_almost_equal(y,y1)\n\n x = np.array([1,2,3,4,5], dtype=self.rdt)\n self.assertTrue(y.dtype == self.cdt,\n \"Output dtype is %s, expected %s\" % (y.dtype, self.cdt))\n assert_array_almost_equal(ifft(x),direct_idft(x))\n\n def test_djbfft(self):\n for i in range(2,14):\n n = 2**i\n x = list(range(n))\n y = fftpack.zfft(x,direction=-1)\n y2 = numpy.fft.ifft(x)\n assert_array_almost_equal(y,y2)\n y = fftpack.zrfft(x,direction=-1)\n assert_array_almost_equal(y,y2)\n\n def test_random_complex(self):\n for size in [1,51,111,100,200,64,128,256,1024]:\n x = random([size]).astype(self.cdt)\n x = random([size]).astype(self.cdt) +1j*x\n y1 = ifft(fft(x))\n y2 = fft(ifft(x))\n self.assertTrue(y1.dtype == self.cdt,\n \"Output dtype is %s, expected %s\" % (y1.dtype, self.cdt))\n self.assertTrue(y2.dtype == self.cdt,\n \"Output dtype is %s, expected %s\" % (y2.dtype, self.cdt))\n assert_array_almost_equal (y1, x)\n assert_array_almost_equal (y2, x)\n\n def test_random_real(self):\n for size in [1,51,111,100,200,64,128,256,1024]:\n x = random([size]).astype(self.rdt)\n y1 = ifft(fft(x))\n y2 = fft(ifft(x))\n self.assertTrue(y1.dtype == self.cdt,\n \"Output dtype is %s, expected %s\" % (y1.dtype, self.cdt))\n self.assertTrue(y2.dtype == self.cdt,\n \"Output dtype is %s, expected %s\" % (y2.dtype, self.cdt))\n assert_array_almost_equal (y1, x)\n assert_array_almost_equal (y2, x)\n\n def test_size_accuracy(self):\n # Sanity check for the accuracy for prime and non-prime sized inputs\n if self.rdt == np.float32:\n rtol = 1e-5\n elif self.rdt == np.float64:\n rtol = 1e-10\n\n for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:\n np.random.seed(1234)\n x = np.random.rand(size).astype(self.rdt)\n y = ifft(fft(x))\n self.assertTrue(np.linalg.norm(x - y) < rtol*np.linalg.norm(x),\n (size, self.rdt))\n y = fft(ifft(x))\n self.assertTrue(np.linalg.norm(x - y) < rtol*np.linalg.norm(x),\n (size, self.rdt))\n\n x = (x + 1j*np.random.rand(size)).astype(self.cdt)\n y = ifft(fft(x))\n self.assertTrue(np.linalg.norm(x - y) < rtol*np.linalg.norm(x),\n (size, self.rdt))\n y = fft(ifft(x))\n self.assertTrue(np.linalg.norm(x - y) < rtol*np.linalg.norm(x),\n (size, self.rdt))\n\nclass TestDoubleIFFT(_TestIFFTBase):\n def setUp(self):\n self.cdt = np.cdouble\n self.rdt = np.double\n\nclass TestSingleIFFT(_TestIFFTBase):\n def setUp(self):\n self.cdt = np.complex64\n self.rdt = np.float32\n\nclass _TestRFFTBase(TestCase):\n def setUp(self):\n np.random.seed(1234)\n\n def test_definition(self):\n for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]:\n x = np.array(t, dtype=self.rdt)\n y = rfft(x)\n y1 = direct_rdft(x)\n assert_array_almost_equal(y,y1)\n self.assertTrue(y.dtype == self.rdt,\n \"Output dtype is %s, expected %s\" % (y.dtype, self.rdt))\n\n def test_djbfft(self):\n from numpy.fft import fft as numpy_fft\n for i in range(2,14):\n n = 2**i\n x = list(range(n))\n y2 = numpy_fft(x)\n y1 = zeros((n,),dtype=double)\n y1[0] = y2[0].real\n y1[-1] = y2[n//2].real\n for k in range(1, n//2):\n y1[2*k-1] = y2[k].real\n y1[2*k] = y2[k].imag\n y = fftpack.drfft(x)\n assert_array_almost_equal(y,y1)\n\nclass TestRFFTDouble(_TestRFFTBase):\n def setUp(self):\n self.cdt = np.cdouble\n self.rdt = np.double\n\nclass TestRFFTSingle(_TestRFFTBase):\n def setUp(self):\n self.cdt = np.complex64\n self.rdt = np.float32\n\nclass _TestIRFFTBase(TestCase):\n def setUp(self):\n np.random.seed(1234)\n\n def test_definition(self):\n x1 = [1,2,3,4,1,2,3,4]\n x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j]\n x2= [1,2,3,4,1,2,3,4,5]\n x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j]\n\n def _test(x, xr):\n y = irfft(np.array(x, dtype=self.rdt))\n y1 = direct_irdft(x)\n self.assertTrue(y.dtype == self.rdt,\n \"Output dtype is %s, expected %s\" % (y.dtype, self.rdt))\n assert_array_almost_equal(y,y1, decimal=self.ndec)\n assert_array_almost_equal(y,ifft(xr), decimal=self.ndec)\n\n _test(x1, x1_1)\n _test(x2, x2_1)\n\n def test_djbfft(self):\n from numpy.fft import ifft as numpy_ifft\n for i in range(2,14):\n n = 2**i\n x = list(range(n))\n x1 = zeros((n,),dtype=cdouble)\n x1[0] = x[0]\n for k in range(1, n//2):\n x1[k] = x[2*k-1]+1j*x[2*k]\n x1[n-k] = x[2*k-1]-1j*x[2*k]\n x1[n//2] = x[-1]\n y1 = numpy_ifft(x1)\n y = fftpack.drfft(x,direction=-1)\n assert_array_almost_equal(y,y1)\n\n def test_random_real(self):\n for size in [1,51,111,100,200,64,128,256,1024]:\n x = random([size]).astype(self.rdt)\n y1 = irfft(rfft(x))\n y2 = rfft(irfft(x))\n self.assertTrue(y1.dtype == self.rdt,\n \"Output dtype is %s, expected %s\" % (y1.dtype, self.rdt))\n self.assertTrue(y2.dtype == self.rdt,\n \"Output dtype is %s, expected %s\" % (y2.dtype, self.rdt))\n assert_array_almost_equal (y1, x, decimal=self.ndec,\n err_msg=\"size=%d\" % size)\n assert_array_almost_equal (y2, x, decimal=self.ndec,\n err_msg=\"size=%d\" % size)\n\n def test_size_accuracy(self):\n # Sanity check for the accuracy for prime and non-prime sized inputs\n if self.rdt == np.float32:\n rtol = 1e-5\n elif self.rdt == np.float64:\n rtol = 1e-10\n\n for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:\n np.random.seed(1234)\n x = np.random.rand(size).astype(self.rdt)\n y = irfft(rfft(x))\n self.assertTrue(np.linalg.norm(x - y) < rtol*np.linalg.norm(x),\n (size, self.rdt))\n y = rfft(irfft(x))\n self.assertTrue(np.linalg.norm(x - y) < rtol*np.linalg.norm(x),\n (size, self.rdt))\n\n# self.ndec is bogus; we should have a assert_array_approx_equal for number of\n# significant digits\nclass TestIRFFTDouble(_TestIRFFTBase):\n def setUp(self):\n self.cdt = np.cdouble\n self.rdt = np.double\n self.ndec = 14\n\nclass TestIRFFTSingle(_TestIRFFTBase):\n def setUp(self):\n self.cdt = np.complex64\n self.rdt = np.float32\n self.ndec = 5\n\nclass Testfft2(TestCase):\n def setUp(self):\n np.random.seed(1234)\n\n def test_regression_244(self):\n \"\"\"fft returns wrong result with axes parameter.\"\"\"\n # fftn (and hence fft2) used to break when both axes and shape were\n # used\n x = numpy.ones((4,4,2))\n y = fft2(x, shape=(8,8), axes=(-3,-2))\n y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2))\n assert_array_almost_equal(y, y_r)\n\nclass TestFftnSingle(TestCase):\n def setUp(self):\n np.random.seed(1234)\n\n def test_definition(self):\n x = [[1,2,3],[4,5,6],[7,8,9]]\n y = fftn(np.array(x, np.float32))\n if not y.dtype == np.complex64:\n raise ValueError(\"double precision output with single precision\")\n\n y_r = np.array(fftn(x), np.complex64)\n assert_array_almost_equal_nulp(y, y_r)\n\n def test_size_accuracy(self):\n for size in SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES:\n np.random.seed(1234)\n x = np.random.rand(size, size) + 1j*np.random.rand(size, size)\n y1 = fftn(x.real.astype(np.float32))\n y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)\n\n self.assertTrue(y1.dtype == np.complex64)\n assert_array_almost_equal_nulp(y1, y2, 2000)\n\n for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:\n np.random.seed(1234)\n x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)\n y1 = fftn(x.real.astype(np.float32))\n y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)\n\n self.assertTrue(y1.dtype == np.complex64)\n assert_array_almost_equal_nulp(y1, y2, 2000)\n\nclass TestFftn(TestCase):\n def setUp(self):\n np.random.seed(1234)\n\n def test_definition(self):\n x = [[1,2,3],[4,5,6],[7,8,9]]\n y = fftn(x)\n assert_array_almost_equal(y,direct_dftn(x))\n x = random((20,26))\n assert_array_almost_equal(fftn(x),direct_dftn(x))\n x = random((5,4,3,20))\n assert_array_almost_equal(fftn(x),direct_dftn(x))\n\n def test_axes_argument(self):\n #plane == ji_plane, x== kji_space\n plane1 = [[1,2,3],[4,5,6],[7,8,9]]\n plane2 = [[10,11,12],[13,14,15],[16,17,18]]\n plane3 = [[19,20,21],[22,23,24],[25,26,27]]\n ki_plane1 = [[1,2,3],[10,11,12],[19,20,21]]\n ki_plane2 = [[4,5,6],[13,14,15],[22,23,24]]\n ki_plane3 = [[7,8,9],[16,17,18],[25,26,27]]\n jk_plane1 = [[1,10,19],[4,13,22],[7,16,25]]\n jk_plane2 = [[2,11,20],[5,14,23],[8,17,26]]\n jk_plane3 = [[3,12,21],[6,15,24],[9,18,27]]\n kj_plane1 = [[1,4,7],[10,13,16],[19,22,25]]\n kj_plane2 = [[2,5,8],[11,14,17],[20,23,26]]\n kj_plane3 = [[3,6,9],[12,15,18],[21,24,27]]\n ij_plane1 = [[1,4,7],[2,5,8],[3,6,9]]\n ij_plane2 = [[10,13,16],[11,14,17],[12,15,18]]\n ij_plane3 = [[19,22,25],[20,23,26],[21,24,27]]\n ik_plane1 = [[1,10,19],[2,11,20],[3,12,21]]\n ik_plane2 = [[4,13,22],[5,14,23],[6,15,24]]\n ik_plane3 = [[7,16,25],[8,17,26],[9,18,27]]\n ijk_space = [jk_plane1,jk_plane2,jk_plane3]\n ikj_space = [kj_plane1,kj_plane2,kj_plane3]\n jik_space = [ik_plane1,ik_plane2,ik_plane3]\n jki_space = [ki_plane1,ki_plane2,ki_plane3]\n kij_space = [ij_plane1,ij_plane2,ij_plane3]\n x = array([plane1,plane2,plane3])\n\n assert_array_almost_equal(fftn(x),fftn(x,axes=(-3,-2,-1))) # kji_space\n assert_array_almost_equal(fftn(x),fftn(x,axes=(0,1,2)))\n y = fftn(x,axes=(2,1,0)) # ijk_space\n assert_array_almost_equal(swapaxes(y,-1,-3),fftn(ijk_space))\n y = fftn(x,axes=(2,0,1)) # ikj_space\n assert_array_almost_equal(swapaxes(swapaxes(y,-1,-3),\n -1,-2)\n ,fftn(ikj_space))\n y = fftn(x,axes=(1,2,0)) # jik_space\n assert_array_almost_equal(swapaxes(swapaxes(y,-1,-3),\n -3,-2)\n ,fftn(jik_space))\n y = fftn(x,axes=(1,0,2)) # jki_space\n assert_array_almost_equal(swapaxes(y,-2,-3),fftn(jki_space))\n y = fftn(x,axes=(0,2,1)) # kij_space\n assert_array_almost_equal(swapaxes(y,-2,-1),\n fftn(kij_space))\n\n y = fftn(x,axes=(-2,-1)) # ji_plane\n assert_array_almost_equal(fftn(plane1),y[0])\n assert_array_almost_equal(fftn(plane2),y[1])\n assert_array_almost_equal(fftn(plane3),y[2])\n y = fftn(x,axes=(1,2)) # ji_plane\n assert_array_almost_equal(fftn(plane1),y[0])\n assert_array_almost_equal(fftn(plane2),y[1])\n assert_array_almost_equal(fftn(plane3),y[2])\n y = fftn(x,axes=(-3,-2)) # kj_plane\n assert_array_almost_equal(fftn(x[:,:,0]),y[:,:,0])\n assert_array_almost_equal(fftn(x[:,:,1]),y[:,:,1])\n assert_array_almost_equal(fftn(x[:,:,2]),y[:,:,2])\n y = fftn(x,axes=(-3,-1)) # ki_plane\n assert_array_almost_equal(fftn(x[:,0,:]),y[:,0,:])\n assert_array_almost_equal(fftn(x[:,1,:]),y[:,1,:])\n assert_array_almost_equal(fftn(x[:,2,:]),y[:,2,:])\n y = fftn(x,axes=(-1,-2)) # ij_plane\n assert_array_almost_equal(fftn(ij_plane1),swapaxes(y[0],-2,-1))\n assert_array_almost_equal(fftn(ij_plane2),swapaxes(y[1],-2,-1))\n assert_array_almost_equal(fftn(ij_plane3),swapaxes(y[2],-2,-1))\n y = fftn(x,axes=(-1,-3)) # ik_plane\n assert_array_almost_equal(fftn(ik_plane1),swapaxes(y[:,0,:],-1,-2))\n assert_array_almost_equal(fftn(ik_plane2),swapaxes(y[:,1,:],-1,-2))\n assert_array_almost_equal(fftn(ik_plane3),swapaxes(y[:,2,:],-1,-2))\n y = fftn(x,axes=(-2,-3)) # jk_plane\n assert_array_almost_equal(fftn(jk_plane1),swapaxes(y[:,:,0],-1,-2))\n assert_array_almost_equal(fftn(jk_plane2),swapaxes(y[:,:,1],-1,-2))\n assert_array_almost_equal(fftn(jk_plane3),swapaxes(y[:,:,2],-1,-2))\n\n y = fftn(x,axes=(-1,)) # i_line\n for i in range(3):\n for j in range(3):\n assert_array_almost_equal(fft(x[i,j,:]),y[i,j,:])\n y = fftn(x,axes=(-2,)) # j_line\n for i in range(3):\n for j in range(3):\n assert_array_almost_equal(fft(x[i,:,j]),y[i,:,j])\n y = fftn(x,axes=(0,)) # k_line\n for i in range(3):\n for j in range(3):\n assert_array_almost_equal(fft(x[:,i,j]),y[:,i,j])\n\n y = fftn(x,axes=()) # point\n assert_array_almost_equal(y,x)\n\n def test_shape_argument(self):\n small_x = [[1,2,3],[4,5,6]]\n large_x1 = [[1,2,3,0],[4,5,6,0],[0,0,0,0],[0,0,0,0]]\n y = fftn(small_x,shape=(4,4))\n assert_array_almost_equal (y,fftn(large_x1))\n y = fftn(small_x,shape=(3,4))\n assert_array_almost_equal (y,fftn(large_x1[:-1]))\n\n def test_shape_axes_argument(self):\n small_x = [[1,2,3],[4,5,6],[7,8,9]]\n large_x1 = array([[1,2,3,0],\n [4,5,6,0],\n [7,8,9,0],\n [0,0,0,0]])\n # Disable tests with shape and axes of different lengths\n #y = fftn(small_x,shape=(4,4),axes=(-1,))\n #for i in range(4):\n # assert_array_almost_equal (y[i],fft(large_x1[i]))\n #y = fftn(small_x,shape=(4,4),axes=(-2,))\n #for i in range(4):\n # assert_array_almost_equal (y[:,i],fft(large_x1[:,i]))\n y = fftn(small_x,shape=(4,4),axes=(-2,-1))\n assert_array_almost_equal (y,fftn(large_x1))\n y = fftn(small_x,shape=(4,4),axes=(-1,-2))\n assert_array_almost_equal (y,swapaxes(\\\n fftn(swapaxes(large_x1,-1,-2)),-1,-2))\n\n def test_shape_axes_argument2(self):\n # Change shape of the last axis\n x = numpy.random.random((10, 5, 3, 7))\n y = fftn(x, axes=(-1,), shape=(8,))\n assert_array_almost_equal(y, fft(x, axis=-1, n=8))\n\n # Change shape of an arbitrary axis which is not the last one\n x = numpy.random.random((10, 5, 3, 7))\n y = fftn(x, axes=(-2,), shape=(8,))\n assert_array_almost_equal(y, fft(x, axis=-2, n=8))\n\n # Change shape of axes: cf #244, where shape and axes were mixed up\n x = numpy.random.random((4,4,2))\n y = fftn(x, axes=(-3,-2), shape=(8,8))\n assert_array_almost_equal(y, numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8)))\n\n def test_shape_argument_more(self):\n \"\"\"Test that fftn raises ValueError when s.shape is longer than x.shape\"\"\"\n x = zeros((4, 4, 2))\n assert_raises(ValueError, fftn, x, shape=(8, 8, 2, 1))\n\n\nclass _TestIfftn(TestCase):\n dtype = None\n cdtype = None\n\n def setUp(self):\n np.random.seed(1234)\n\n def test_definition(self):\n x = np.array([[1,2,3],[4,5,6],[7,8,9]], dtype=self.dtype)\n y = ifftn(x)\n assert_(y.dtype == self.cdtype)\n assert_array_almost_equal_nulp(y,direct_idftn(x),self.maxnlp)\n x = random((20,26))\n assert_array_almost_equal_nulp(ifftn(x),direct_idftn(x),self.maxnlp)\n x = random((5,4,3,20))\n assert_array_almost_equal_nulp(ifftn(x),direct_idftn(x),self.maxnlp)\n\n def test_random_complex(self):\n for size in [1,2,51,32,64,92]:\n x = random([size,size]) + 1j*random([size,size])\n assert_array_almost_equal_nulp(ifftn(fftn(x)),x,self.maxnlp)\n assert_array_almost_equal_nulp(fftn(ifftn(x)),x,self.maxnlp)\n\nclass TestIfftnDouble(_TestIfftn):\n dtype = np.float64\n cdtype = np.complex128\n maxnlp = 2000\n\nclass TestIfftnSingle(_TestIfftn):\n dtype = np.float32\n cdtype = np.complex64\n maxnlp = 3500\n\nclass TestLongDoubleFailure(TestCase):\n def setUp(self):\n np.random.seed(1234)\n\n def test_complex(self):\n if np.dtype(np.longcomplex).itemsize == np.dtype(np.complex).itemsize:\n # longdouble == double; so fft is supported\n return\n\n x = np.random.randn(10).astype(np.longdouble) + \\\n 1j * np.random.randn(10).astype(np.longdouble)\n\n for f in [fft, ifft]:\n try:\n f(x)\n raise AssertionError(\"Type %r not supported but does not fail\" % \\\n np.longcomplex)\n except ValueError:\n pass\n\n def test_real(self):\n if np.dtype(np.longdouble).itemsize == np.dtype(np.double).itemsize:\n # longdouble == double; so fft is supported\n return\n\n x = np.random.randn(10).astype(np.longcomplex)\n\n for f in [fft, ifft]:\n try:\n f(x)\n raise AssertionError(\"Type %r not supported but does not fail\" % \\\n np.longcomplex)\n except ValueError:\n pass\n\n\n\nclass FakeArray(object):\n def __init__(self, data):\n self._data = data\n self.__array_interface__ = data.__array_interface__\n\nclass FakeArray2(object):\n def __init__(self, data):\n self._data = data\n def __array__(self):\n return self._data\n\nclass TestOverwrite(object):\n \"\"\"\n Check input overwrite behavior of the FFT functions\n \"\"\"\n\n real_dtypes = [np.float32, np.float64]\n dtypes = real_dtypes + [np.complex64, np.complex128]\n\n def _check(self, x, routine, fftsize, axis, overwrite_x, should_overwrite):\n x2 = x.copy()\n for fake in [lambda x: x, FakeArray, FakeArray2]:\n y = routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x)\n\n sig = \"%s(%s%r, %r, axis=%r, overwrite_x=%r)\" % (\n routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)\n if not should_overwrite:\n assert_equal(x2, x, err_msg=\"spurious overwrite in %s\" % sig)\n else:\n if (x2 == x).all():\n raise AssertionError(\"no overwrite in %s\" % sig)\n\n def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes):\n np.random.seed(1234)\n if np.issubdtype(dtype, np.complexfloating):\n data = np.random.randn(*shape) + 1j*np.random.randn(*shape)\n else:\n data = np.random.randn(*shape)\n data = data.astype(dtype)\n\n for fftsize in [8, 16, 32]:\n for overwrite_x in [True, False]:\n should_overwrite = (overwrite_x\n and dtype in overwritable_dtypes\n and fftsize <= shape[axis]\n and (len(shape) == 1 or\n (axis % len(shape) == len(shape)-1\n and fftsize == shape[axis])))\n self._check(data, routine, fftsize, axis,\n overwrite_x=overwrite_x,\n should_overwrite=should_overwrite)\n\n def test_fft(self):\n overwritable = (np.complex128, np.complex64)\n for dtype in self.dtypes:\n self._check_1d(fft, dtype, (16,), -1, overwritable)\n self._check_1d(fft, dtype, (16, 2), 0, overwritable)\n self._check_1d(fft, dtype, (2, 16), 1, overwritable)\n\n def test_ifft(self):\n overwritable = (np.complex128, np.complex64)\n for dtype in self.dtypes:\n self._check_1d(ifft, dtype, (16,), -1, overwritable)\n self._check_1d(ifft, dtype, (16, 2), 0, overwritable)\n self._check_1d(ifft, dtype, (2, 16), 1, overwritable)\n\n def test_rfft(self):\n overwritable = self.real_dtypes\n for dtype in self.real_dtypes:\n self._check_1d(rfft, dtype, (16,), -1, overwritable)\n self._check_1d(rfft, dtype, (16, 2), 0, overwritable)\n self._check_1d(rfft, dtype, (2, 16), 1, overwritable)\n\n def test_irfft(self):\n overwritable = self.real_dtypes\n for dtype in self.real_dtypes:\n self._check_1d(irfft, dtype, (16,), -1, overwritable)\n self._check_1d(irfft, dtype, (16, 2), 0, overwritable)\n self._check_1d(irfft, dtype, (2, 16), 1, overwritable)\n\n def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes):\n np.random.seed(1234)\n if np.issubdtype(dtype, np.complexfloating):\n data = np.random.randn(*shape) + 1j*np.random.randn(*shape)\n else:\n data = np.random.randn(*shape)\n data = data.astype(dtype)\n\n def fftshape_iter(shp):\n if len(shp) <= 0:\n yield ()\n else:\n for j in (shp[0]//2, shp[0], shp[0]*2):\n for rest in fftshape_iter(shp[1:]):\n yield (j,) + rest\n\n if axes is None:\n part_shape = shape\n else:\n part_shape = tuple(np.take(shape, axes))\n\n for overwrite_x in [True, False]:\n for fftshape in fftshape_iter(part_shape):\n should_overwrite = (overwrite_x\n and data.ndim == 1\n and np.all([x < y for x, y in zip(fftshape, part_shape)])\n and dtype in overwritable_dtypes)\n self._check(data, routine, fftshape, axes,\n overwrite_x=overwrite_x,\n should_overwrite=should_overwrite)\n if data.ndim > 1:\n # check fortran order: it never overwrites\n self._check(data.T, routine, fftshape, axes,\n overwrite_x=overwrite_x,\n should_overwrite=False)\n\n def _check_nd(self, routine, dtype, overwritable):\n self._check_nd_one(routine, dtype, (16,), None, overwritable)\n self._check_nd_one(routine, dtype, (16,), (0,), overwritable)\n self._check_nd_one(routine, dtype, (16, 2), (0,), overwritable)\n self._check_nd_one(routine, dtype, (2, 16), (1,), overwritable)\n self._check_nd_one(routine, dtype, (8, 16), None, overwritable)\n self._check_nd_one(routine, dtype, (8, 16), (0, 1), overwritable)\n self._check_nd_one(routine, dtype, (8, 16, 2), (0, 1), overwritable)\n self._check_nd_one(routine, dtype, (8, 16, 2), (1, 2), overwritable)\n self._check_nd_one(routine, dtype, (8, 16, 2), (0,), overwritable)\n self._check_nd_one(routine, dtype, (8, 16, 2), (1,), overwritable)\n self._check_nd_one(routine, dtype, (8, 16, 2), (2,), overwritable)\n self._check_nd_one(routine, dtype, (8, 16, 2), None, overwritable)\n self._check_nd_one(routine, dtype, (8, 16, 2), (0,1,2), overwritable)\n\n def test_fftn(self):\n overwritable = (np.complex128, np.complex64)\n for dtype in self.dtypes:\n self._check_nd(fftn, dtype, overwritable)\n\n def test_ifftn(self):\n overwritable = (np.complex128, np.complex64)\n for dtype in self.dtypes:\n self._check_nd(ifftn, dtype, overwritable)\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n",
"from __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom numpy.testing import assert_equal, assert_array_almost_equal\nfrom scipy.sparse import csgraph\n\ndef test_weak_connections():\n Xde = np.array([[0, 1, 0],\n [0, 0, 0],\n [0, 0, 0]])\n\n Xsp = csgraph.csgraph_from_dense(Xde, null_value=0)\n\n for X in Xsp, Xde:\n n_components, labels =\\\n csgraph.connected_components(X, directed=True,\n connection='weak')\n \n assert_equal(n_components, 2)\n assert_array_almost_equal(labels, [0, 0, 1])\n\ndef test_strong_connections():\n X1de = np.array([[0, 1, 0],\n [0, 0, 0],\n [0, 0, 0]])\n X2de = X1de + X1de.T\n\n X1sp = csgraph.csgraph_from_dense(X1de, null_value=0)\n X2sp = csgraph.csgraph_from_dense(X2de, null_value=0)\n\n for X in X1sp, X1de:\n n_components, labels =\\\n csgraph.connected_components(X, directed=True,\n connection='strong')\n \n assert_equal(n_components, 3)\n labels.sort()\n assert_array_almost_equal(labels, [0, 1, 2])\n\n for X in X2sp, X2de:\n n_components, labels =\\\n csgraph.connected_components(X, directed=True,\n connection='strong')\n \n assert_equal(n_components, 2)\n labels.sort()\n assert_array_almost_equal(labels, [0, 0, 1])\n \ndef test_strong_connections2():\n X = np.array([[0, 0, 0, 0, 0, 0],\n [1, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 1, 0, 1, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0]])\n n_components, labels =\\\n csgraph.connected_components(X, directed=True,\n connection='strong')\n assert_equal(n_components, 5)\n labels.sort()\n assert_array_almost_equal(labels, [0, 1, 2, 2, 3, 4])\n\ndef test_weak_connections2():\n X = np.array([[0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 1, 0, 1, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0]])\n n_components, labels =\\\n csgraph.connected_components(X, directed=True,\n connection='weak')\n assert_equal(n_components, 2)\n labels.sort()\n assert_array_almost_equal(labels, [0, 0, 1, 1, 1, 1])\n\ndef test_ticket1876():\n # Regression test: this failed in the original implementation\n # There should be two strongly-connected components; previously gave one\n g = np.array([[0, 1, 1, 0],\n [1, 0, 0, 1],\n [0, 0, 0, 1],\n [0, 0, 1, 0]])\n n_components, labels = csgraph.connected_components(g, connection='strong')\n\n assert_equal(n_components, 2)\n assert_equal(labels[0], labels[1])\n assert_equal(labels[2], labels[3])\n",
"from __future__ import division, print_function, absolute_import\n\nfrom . import _nnls\nfrom numpy import asarray_chkfinite, zeros, double\n\n__all__ = ['nnls']\n\n\ndef nnls(A,b):\n \"\"\"\n Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``. This is a wrapper\n for a FORTAN non-negative least squares solver.\n\n Parameters\n ----------\n A : ndarray\n Matrix ``A`` as shown above.\n b : ndarray\n Right-hand side vector.\n\n Returns\n -------\n x : ndarray\n Solution vector.\n rnorm : float\n The residual, ``|| Ax-b ||_2``.\n\n Notes\n -----\n The FORTRAN code was published in the book below. The algorithm\n is an active set method. It solves the KKT (Karush-Kuhn-Tucker)\n conditions for the non-negative least squares problem.\n\n References\n ----------\n Lawson C., Hanson R.J., (1987) Solving Least Squares Problems, SIAM\n\n \"\"\"\n\n A,b = map(asarray_chkfinite, (A,b))\n\n if len(A.shape)!=2:\n raise ValueError(\"expected matrix\")\n if len(b.shape)!=1:\n raise ValueError(\"expected vector\")\n\n m,n = A.shape\n\n if m != b.shape[0]:\n raise ValueError(\"incompatible dimensions\")\n\n w = zeros((n,), dtype=double)\n zz = zeros((m,), dtype=double)\n index=zeros((n,), dtype=int)\n\n x,rnorm,mode = _nnls.nnls(A,m,n,b,w,zz,index)\n if mode != 1:\n raise RuntimeError(\"too many iterations\")\n\n return x, rnorm\n",
"\"\"\"\nWrappers to BLAS library\n========================\n\nNOTE: this module is deprecated -- use scipy.linalg.blas instead!\n\nfblas -- wrappers for Fortran [*] BLAS routines\ncblas -- wrappers for ATLAS BLAS routines\nget_blas_funcs -- query for wrapper functions.\n\n[*] If ATLAS libraries are available then Fortran routines\n actually use ATLAS routines and should perform equally\n well to ATLAS routines.\n\nModule fblas\n++++++++++++\n\nIn the following all function names are shown without type prefixes.\n\nLevel 1 routines\n----------------\n\n c,s = rotg(a,b)\n param = rotmg(d1,d2,x1,y1)\n x,y = rot(x,y,c,s,n=(len(x)-offx)/abs(incx),offx=0,incx=1,offy=0,incy=1,overwrite_x=0,overwrite_y=0)\n x,y = rotm(x,y,param,n=(len(x)-offx)/abs(incx),offx=0,incx=1,offy=0,incy=1,overwrite_x=0,overwrite_y=0)\n x,y = swap(x,y,n=(len(x)-offx)/abs(incx),offx=0,incx=1,offy=0,incy=1)\n x = scal(a,x,n=(len(x)-offx)/abs(incx),offx=0,incx=1)\n y = copy(x,y,n=(len(x)-offx)/abs(incx),offx=0,incx=1,offy=0,incy=1)\n y = axpy(x,y,n=(len(x)-offx)/abs(incx),a=1.0,offx=0,incx=1,offy=0,incy=1)\n xy = dot(x,y,n=(len(x)-offx)/abs(incx),offx=0,incx=1,offy=0,incy=1)\n xy = dotu(x,y,n=(len(x)-offx)/abs(incx),offx=0,incx=1,offy=0,incy=1)\n xy = dotc(x,y,n=(len(x)-offx)/abs(incx),offx=0,incx=1,offy=0,incy=1)\n n2 = nrm2(x,n=(len(x)-offx)/abs(incx),offx=0,incx=1)\n s = asum(x,n=(len(x)-offx)/abs(incx),offx=0,incx=1)\n k = amax(x,n=(len(x)-offx)/abs(incx),offx=0,incx=1)\n\n Prefixes:\n rotg,swap,copy,axpy: s,d,c,z\n amax: is,id,ic,iz\n asum,nrm2: s,d,sc,dz\n scal: s,d,c,z,sc,dz\n rotm,rotmg,dot: s,d\n dotu,dotc: c,z\n rot: s,d,cs,zd\n\nLevel 2 routines\n----------------\n\n y = gemv(alpha,a,x,beta=0.0,y=,offx=0,incx=1,offy=0,incy=1,trans=0,overwrite_y=0)\n y = symv(alpha,a,x,beta=0.0,y=,offx=0,incx=1,offy=0,incy=1,lower=0,overwrite_y=0)\n y = hemv(alpha,a,x,beta=(0.0, 0.0),y=,offx=0,incx=1,offy=0,incy=1,lower=0,overwrite_y=0)\n x = trmv(a,x,offx=0,incx=1,lower=0,trans=0,unitdiag=0,overwrite_x=0)\n a = ger(alpha,x,y,incx=1,incy=1,a=0.0,overwrite_x=1,overwrite_y=1,overwrite_a=0)\n a = ger{u|c}(alpha,x,y,incx=1,incy=1,a=(0.0,0.0),overwrite_x=1,overwrite_y=1,overwrite_a=0)\n\n Prefixes:\n gemv, trmv: s,d,c,z\n symv,ger: s,d\n hemv,geru,gerc: c,z\n\nLevel 3 routines\n----------------\n\n c = gemm(alpha,a,b,beta=0.0,c=,trans_a=0,trans_b=0,overwrite_c=0)\n\n Prefixes:\n gemm: s,d,c,z\n\nModule cblas\n++++++++++++\n\nIn the following all function names are shown without type prefixes.\n\nLevel 1 routines\n----------------\n\n z = axpy(x,y,n=len(x)/abs(incx),a=1.0,incx=1,incy=incx,overwrite_y=0)\n\n Prefixes:\n axpy: s,d,c,z\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nfrom warnings import warn\n\n__all__ = ['fblas','cblas','get_blas_funcs']\n\nfrom . import fblas\nfrom . import cblas\n\nfrom numpy import deprecate\n\n@deprecate(old_name=\"scipy.lib.blas\", new_name=\"scipy.linalg.blas\")\ndef _deprecated():\n pass\ntry:\n _deprecated()\nexcept DeprecationWarning as e:\n # don't fail import if DeprecationWarnings raise error -- works around\n # the situation with Numpy's test framework\n pass\n\n_use_force_cblas = 1\nif hasattr(cblas,'empty_module'):\n cblas = fblas\n _use_force_cblas = 0\nelif hasattr(fblas,'empty_module'):\n fblas = cblas\n\n\n_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} # 'd' will be default for 'i',..\n_inv_type_conv = {'s':'f','d':'d','c':'F','z':'D'}\n\n\n@deprecate\ndef get_blas_funcs(names,arrays=(),debug=0):\n \"\"\"\n This function is deprecated, use scipy.linalg.get_blas_funcs instead.\n\n Return available BLAS function objects with names.\n arrays are used to determine the optimal prefix of\n BLAS routines.\n \"\"\"\n\n ordering = []\n for i in range(len(arrays)):\n t = arrays[i].dtype.char\n if t not in _type_conv:\n t = 'd'\n ordering.append((t,i))\n if ordering:\n ordering.sort()\n required_prefix = _type_conv[ordering[0][0]]\n else:\n required_prefix = 'd'\n dtypechar = _inv_type_conv[required_prefix]\n # Default lookup:\n if ordering and arrays[ordering[0][1]].flags['FORTRAN']:\n # prefer Fortran code for leading array with column major order\n m1,m2 = fblas,cblas\n else:\n # in all other cases, C code is preferred\n m1,m2 = cblas,fblas\n funcs = []\n for name in names:\n if name=='ger' and dtypechar in 'FD':\n name = 'gerc'\n elif name in ('dotc', 'dotu') and dtypechar in 'fd':\n name = 'dot'\n func_name = required_prefix + name\n if name == 'nrm2' and dtypechar == 'D':\n func_name = 'dznrm2'\n elif name == 'nrm2' and dtypechar == 'F':\n func_name = 'scnrm2'\n func = getattr(m1,func_name,None)\n if func is None:\n func = getattr(m2,func_name)\n func.module_name = m2.__name__.split('.')[-1]\n else:\n func.module_name = m1.__name__.split('.')[-1]\n func.prefix = required_prefix\n func.dtypechar = dtypechar\n funcs.append(func)\n return tuple(funcs)\n\nfrom numpy.testing import Tester\ntest = Tester().test\n",
"from __future__ import division, print_function, absolute_import\n\nimport numpy as np\n\nfrom numpy.testing import TestCase, run_module_suite, \\\n assert_array_equal, assert_raises\n\nfrom scipy.signal._arraytools import axis_slice, axis_reverse, \\\n odd_ext, even_ext, const_ext\n\n\nclass TestArrayTools(TestCase):\n\n def test_axis_slice(self):\n a = np.arange(12).reshape(3, 4)\n\n s = axis_slice(a, start=0, stop=1, axis=0)\n assert_array_equal(s, a[0:1, :])\n\n s = axis_slice(a, start=-1, axis=0)\n assert_array_equal(s, a[-1:, :])\n\n s = axis_slice(a, start=0, stop=1, axis=1)\n assert_array_equal(s, a[:, 0:1])\n\n s = axis_slice(a, start=-1, axis=1)\n assert_array_equal(s, a[:, -1:])\n\n s = axis_slice(a, start=0, step=2, axis=0)\n assert_array_equal(s, a[::2, :])\n\n s = axis_slice(a, start=0, step=2, axis=1)\n assert_array_equal(s, a[:, ::2])\n\n def test_axis_reverse(self):\n a = np.arange(12).reshape(3, 4)\n\n r = axis_reverse(a, axis=0)\n assert_array_equal(r, a[::-1, :])\n\n r = axis_reverse(a, axis=1)\n assert_array_equal(r, a[:, ::-1])\n\n def test_odd_ext(self):\n a = np.array([[1, 2, 3, 4, 5],\n [9, 8, 7, 6, 5]])\n\n odd = odd_ext(a, 2, axis=1)\n expected = np.array([[-1, 0, 1, 2, 3, 4, 5, 6, 7],\n [11, 10, 9, 8, 7, 6, 5, 4, 3]])\n assert_array_equal(odd, expected)\n\n odd = odd_ext(a, 1, axis=0)\n expected = np.array([[-7, -4, -1, 2, 5],\n [ 1, 2, 3, 4, 5],\n [ 9, 8, 7, 6, 5],\n [17, 14, 11, 8, 5]])\n assert_array_equal(odd, expected)\n\n assert_raises(ValueError, odd_ext, a, 2, axis=0)\n assert_raises(ValueError, odd_ext, a, 5, axis=1)\n\n def test_even_ext(self):\n a = np.array([[1, 2, 3, 4, 5],\n [9, 8, 7, 6, 5]])\n\n even = even_ext(a, 2, axis=1)\n expected = np.array([[3, 2, 1, 2, 3, 4, 5, 4, 3],\n [7, 8, 9, 8, 7, 6, 5, 6, 7]])\n assert_array_equal(even, expected)\n\n even = even_ext(a, 1, axis=0)\n expected = np.array([[ 9, 8, 7, 6, 5],\n [ 1, 2, 3, 4, 5],\n [ 9, 8, 7, 6, 5],\n [ 1, 2, 3, 4, 5]])\n assert_array_equal(even, expected)\n\n assert_raises(ValueError, even_ext, a, 2, axis=0)\n assert_raises(ValueError, even_ext, a, 5, axis=1)\n\n def test_const_ext(self):\n a = np.array([[1, 2, 3, 4, 5],\n [9, 8, 7, 6, 5]])\n\n const = const_ext(a, 2, axis=1)\n expected = np.array([[1, 1, 1, 2, 3, 4, 5, 5, 5],\n [9, 9, 9, 8, 7, 6, 5, 5, 5]])\n assert_array_equal(const, expected)\n\n const = const_ext(a, 1, axis=0)\n expected = np.array([[ 1, 2, 3, 4, 5],\n [ 1, 2, 3, 4, 5],\n [ 9, 8, 7, 6, 5],\n [ 9, 8, 7, 6, 5]])\n assert_array_equal(const, expected)\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n",
"\"\"\" Test functions for linalg.decomp module\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\n__usage__ = \"\"\"\nBuild linalg:\n python setup_linalg.py build\nRun tests if scipy is installed:\n python -c 'import scipy;scipy.linalg.test()'\nRun tests if linalg is not installed:\n python tests/test_decomp.py\n\"\"\"\n\nimport numpy as np\nfrom numpy.testing import TestCase, assert_equal, assert_array_almost_equal, \\\n assert_array_equal, assert_raises, assert_, run_module_suite, dec\n\nfrom scipy.lib.six.moves import xrange\n\nfrom scipy.linalg import eig, eigvals, lu, svd, svdvals, cholesky, qr, \\\n schur, rsf2csf, lu_solve, lu_factor, solve, diagsvd, hessenberg, rq, \\\n eig_banded, eigvals_banded, eigh, eigvalsh, qr_multiply, LinAlgError, \\\n qz\nfrom scipy.linalg.lapack import dgbtrf, dgbtrs, zgbtrf, zgbtrs, \\\n dsbev, dsbevd, dsbevx, zhbevd, zhbevx\n\nfrom numpy import array, transpose, sometrue, diag, ones, linalg, \\\n argsort, zeros, arange, float32, complex64, dot, conj, identity, \\\n ravel, sqrt, iscomplex, shape, sort, conjugate, bmat, sign, \\\n asarray, matrix, isfinite, all, ndarray, outer, eye, dtype, empty,\\\n triu, tril\n\nfrom numpy.random import rand, normal, seed\n\nfrom scipy.linalg._testutils import assert_no_overwrite\n\n# digit precision to use in asserts for different types\nDIGITS = {'d':11, 'D':11, 'f':4, 'F':4}\n\n# XXX: This function should be available through numpy.testing\ndef assert_dtype_equal(act, des):\n if isinstance(act, ndarray):\n act = act.dtype\n else:\n act = dtype(act)\n\n if isinstance(des, ndarray):\n des = des.dtype\n else:\n des = dtype(des)\n\n assert_(act == des, 'dtype mismatch: \"%s\" (should be \"%s\") ' % (act, des))\n\n# XXX: This function should not be defined here, but somewhere in\n# scipy.linalg namespace\ndef symrand(dim_or_eigv):\n \"\"\"Return a random symmetric (Hermitian) matrix.\n\n If 'dim_or_eigv' is an integer N, return a NxN matrix, with eigenvalues\n uniformly distributed on (-1,1).\n\n If 'dim_or_eigv' is 1-D real array 'a', return a matrix whose\n eigenvalues are 'a'.\n \"\"\"\n if isinstance(dim_or_eigv, int):\n dim = dim_or_eigv\n d = (rand(dim)*2)-1\n elif (isinstance(dim_or_eigv, ndarray) and\n len(dim_or_eigv.shape) == 1):\n dim = dim_or_eigv.shape[0]\n d = dim_or_eigv\n else:\n raise TypeError(\"input type not supported.\")\n\n v = random_rot(dim)\n h = dot(dot(v.T.conj(), diag(d)), v)\n # to avoid roundoff errors, symmetrize the matrix (again)\n h = 0.5*(h.T+h)\n return h\n\n# XXX: This function should not be defined here, but somewhere in\n# scipy.linalg namespace\ndef random_rot(dim):\n \"\"\"Return a random rotation matrix, drawn from the Haar distribution\n (the only uniform distribution on SO(n)).\n The algorithm is described in the paper\n Stewart, G.W., 'The efficient generation of random orthogonal\n matrices with an application to condition estimators', SIAM Journal\n on Numerical Analysis, 17(3), pp. 403-409, 1980.\n For more information see\n http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization\"\"\"\n H = eye(dim)\n D = ones((dim, ))\n for n in range(1, dim):\n x = normal(size=(dim-n+1, ))\n D[n-1] = sign(x[0])\n x[0] -= D[n-1]*sqrt((x*x).sum())\n # Householder transformation\n\n Hx = eye(dim-n+1) - 2.*outer(x, x)/(x*x).sum()\n mat = eye(dim)\n mat[n-1:,n-1:] = Hx\n H = dot(H, mat)\n # Fix the last sign such that the determinant is 1\n D[-1] = -D.prod()\n H = (D*H.T).T\n return H\n\ndef random(size):\n return rand(*size)\n\nclass TestEigVals(TestCase):\n\n def test_simple(self):\n a = [[1,2,3],[1,2,3],[2,5,6]]\n w = eigvals(a)\n exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]\n assert_array_almost_equal(w,exact_w)\n\n def test_simple_tr(self):\n a = array([[1,2,3],[1,2,3],[2,5,6]],'d')\n a = transpose(a).copy()\n a = transpose(a)\n w = eigvals(a)\n exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]\n assert_array_almost_equal(w,exact_w)\n\n def test_simple_complex(self):\n a = [[1,2,3],[1,2,3],[2,5,6+1j]]\n w = eigvals(a)\n exact_w = [(9+1j+sqrt(92+6j))/2,\n 0,\n (9+1j-sqrt(92+6j))/2]\n assert_array_almost_equal(w,exact_w)\n\n def test_check_finite(self):\n a = [[1,2,3],[1,2,3],[2,5,6]]\n w = eigvals(a, check_finite=False)\n exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]\n assert_array_almost_equal(w,exact_w)\n\nclass TestEig(object):\n\n def test_simple(self):\n a = [[1,2,3],[1,2,3],[2,5,6]]\n w,v = eig(a)\n exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]\n v0 = array([1,1,(1+sqrt(93)/3)/2])\n v1 = array([3.,0,-1])\n v2 = array([1,1,(1-sqrt(93)/3)/2])\n v0 = v0 / sqrt(dot(v0,transpose(v0)))\n v1 = v1 / sqrt(dot(v1,transpose(v1)))\n v2 = v2 / sqrt(dot(v2,transpose(v2)))\n assert_array_almost_equal(w,exact_w)\n assert_array_almost_equal(v0,v[:,0]*sign(v[0,0]))\n assert_array_almost_equal(v1,v[:,1]*sign(v[0,1]))\n assert_array_almost_equal(v2,v[:,2]*sign(v[0,2]))\n for i in range(3):\n assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i])\n w,v = eig(a,left=1,right=0)\n for i in range(3):\n assert_array_almost_equal(dot(transpose(a),v[:,i]),w[i]*v[:,i])\n\n def test_simple_complex_eig(self):\n a = [[1,2],[-2,1]]\n w,vl,vr = eig(a,left=1,right=1)\n assert_array_almost_equal(w, array([1+2j, 1-2j]))\n for i in range(2):\n assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i])\n for i in range(2):\n assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]),\n conjugate(w[i])*vl[:,i])\n\n def test_simple_complex(self):\n a = [[1,2,3],[1,2,3],[2,5,6+1j]]\n w,vl,vr = eig(a,left=1,right=1)\n for i in range(3):\n assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i])\n for i in range(3):\n assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]),\n conjugate(w[i])*vl[:,i])\n\n def _check_gen_eig(self, A, B):\n A, B = asarray(A), asarray(B)\n msg = \"\\n%r\\n%r\" % (A, B)\n w, vr = eig(A,B)\n wt = eigvals(A,B)\n val1 = dot(A, vr)\n val2 = dot(B, vr) * w\n res = val1 - val2\n for i in range(res.shape[1]):\n if all(isfinite(res[:, i])):\n assert_array_almost_equal(res[:, i], 0, err_msg=msg)\n\n assert_array_almost_equal(sort(w[isfinite(w)]), sort(wt[isfinite(wt)]),\n err_msg=msg)\n\n @dec.knownfailureif(True, \"See ticket #1735\")\n def test_singular(self):\n # Example taken from\n # http://www.cs.umu.se/research/nla/singular_pairs/guptri/matlab.html\n A = array(( [22,34,31,31,17], [45,45,42,19,29], [39,47,49,26,34],\n [27,31,26,21,15], [38,44,44,24,30]))\n B = array(( [13,26,25,17,24], [31,46,40,26,37], [26,40,19,25,25],\n [16,25,27,14,23], [24,35,18,21,22]))\n\n olderr = np.seterr(all='ignore')\n try:\n self._check_gen_eig(A, B)\n finally:\n np.seterr(**olderr)\n\n def test_falker(self):\n \"\"\"Test matrices giving some Nan generalized eigen values.\"\"\"\n M = diag(array(([1,0,3])))\n K = array(([2,-1,-1],[-1,2,-1],[-1,-1,2]))\n D = array(([1,-1,0],[-1,1,0],[0,0,0]))\n Z = zeros((3,3))\n I = identity(3)\n A = bmat([[I,Z],[Z,-K]])\n B = bmat([[Z,I],[M,D]])\n\n olderr = np.seterr(all='ignore')\n try:\n self._check_gen_eig(A, B)\n finally:\n np.seterr(**olderr)\n\n def test_bad_geneig(self):\n # Ticket #709 (strange return values from DGGEV)\n\n def matrices(omega):\n c1 = -9 + omega**2\n c2 = 2*omega\n A = [[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, c1, 0],\n [0, 0, 0, c1]]\n B = [[0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, -c2],\n [0, 1, c2, 0]]\n return A, B\n\n # With a buggy LAPACK, this can fail for different omega on different\n # machines -- so we need to test several values\n olderr = np.seterr(all='ignore')\n try:\n for k in xrange(100):\n A, B = matrices(omega=k*5./100)\n self._check_gen_eig(A, B)\n finally:\n np.seterr(**olderr)\n\n def test_check_finite(self):\n a = [[1,2,3],[1,2,3],[2,5,6]]\n w,v = eig(a, check_finite=False)\n exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]\n v0 = array([1,1,(1+sqrt(93)/3)/2])\n v1 = array([3.,0,-1])\n v2 = array([1,1,(1-sqrt(93)/3)/2])\n v0 = v0 / sqrt(dot(v0,transpose(v0)))\n v1 = v1 / sqrt(dot(v1,transpose(v1)))\n v2 = v2 / sqrt(dot(v2,transpose(v2)))\n assert_array_almost_equal(w,exact_w)\n assert_array_almost_equal(v0,v[:,0]*sign(v[0,0]))\n assert_array_almost_equal(v1,v[:,1]*sign(v[0,1]))\n assert_array_almost_equal(v2,v[:,2]*sign(v[0,2]))\n for i in range(3):\n assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i])\n\n def test_not_square_error(self):\n \"\"\"Check that passing a non-square array raises a ValueError.\"\"\"\n A = np.arange(6).reshape(3,2)\n assert_raises(ValueError, eig, A)\n\n def test_shape_mismatch(self):\n \"\"\"Check that passing arrays of with different shapes raises a ValueError.\"\"\"\n A = identity(2)\n B = np.arange(9.0).reshape(3,3)\n assert_raises(ValueError, eig, A, B)\n assert_raises(ValueError, eig, B, A)\n\nclass TestEigBanded(TestCase):\n\n def __init__(self, *args):\n TestCase.__init__(self, *args)\n\n self.create_bandmat()\n\n def create_bandmat(self):\n \"\"\"Create the full matrix `self.fullmat` and\n the corresponding band matrix `self.bandmat`.\"\"\"\n N = 10\n self.KL = 2 # number of subdiagonals (below the diagonal)\n self.KU = 2 # number of superdiagonals (above the diagonal)\n\n # symmetric band matrix\n self.sym_mat = ( diag(1.0*ones(N))\n + diag(-1.0*ones(N-1), -1) + diag(-1.0*ones(N-1), 1)\n + diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2) )\n\n # hermitian band matrix\n self.herm_mat = ( diag(-1.0*ones(N))\n + 1j*diag(1.0*ones(N-1), -1) - 1j*diag(1.0*ones(N-1), 1)\n + diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2) )\n\n # general real band matrix\n self.real_mat = ( diag(1.0*ones(N))\n + diag(-1.0*ones(N-1), -1) + diag(-3.0*ones(N-1), 1)\n + diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2) )\n\n # general complex band matrix\n self.comp_mat = ( 1j*diag(1.0*ones(N))\n + diag(-1.0*ones(N-1), -1) + 1j*diag(-3.0*ones(N-1), 1)\n + diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2) )\n\n\n # Eigenvalues and -vectors from linalg.eig\n ew, ev = linalg.eig(self.sym_mat)\n ew = ew.real\n args = argsort(ew)\n self.w_sym_lin = ew[args]\n self.evec_sym_lin = ev[:,args]\n\n ew, ev = linalg.eig(self.herm_mat)\n ew = ew.real\n args = argsort(ew)\n self.w_herm_lin = ew[args]\n self.evec_herm_lin = ev[:,args]\n\n\n # Extract upper bands from symmetric and hermitian band matrices\n # (for use in dsbevd, dsbevx, zhbevd, zhbevx\n # and their single precision versions)\n LDAB = self.KU + 1\n self.bandmat_sym = zeros((LDAB, N), dtype=float)\n self.bandmat_herm = zeros((LDAB, N), dtype=complex)\n for i in xrange(LDAB):\n self.bandmat_sym[LDAB-i-1,i:N] = diag(self.sym_mat, i)\n self.bandmat_herm[LDAB-i-1,i:N] = diag(self.herm_mat, i)\n\n\n # Extract bands from general real and complex band matrix\n # (for use in dgbtrf, dgbtrs and their single precision versions)\n LDAB = 2*self.KL + self.KU + 1\n self.bandmat_real = zeros((LDAB, N), dtype=float)\n self.bandmat_real[2*self.KL,:] = diag(self.real_mat) # diagonal\n for i in xrange(self.KL):\n # superdiagonals\n self.bandmat_real[2*self.KL-1-i,i+1:N] = diag(self.real_mat, i+1)\n # subdiagonals\n self.bandmat_real[2*self.KL+1+i,0:N-1-i] = diag(self.real_mat,-i-1)\n\n self.bandmat_comp = zeros((LDAB, N), dtype=complex)\n self.bandmat_comp[2*self.KL,:] = diag(self.comp_mat) # diagonal\n for i in xrange(self.KL):\n # superdiagonals\n self.bandmat_comp[2*self.KL-1-i,i+1:N] = diag(self.comp_mat, i+1)\n # subdiagonals\n self.bandmat_comp[2*self.KL+1+i,0:N-1-i] = diag(self.comp_mat,-i-1)\n\n # absolute value for linear equation system A*x = b\n self.b = 1.0*arange(N)\n self.bc = self.b *(1 + 1j)\n\n\n #####################################################################\n\n\n def test_dsbev(self):\n \"\"\"Compare dsbev eigenvalues and eigenvectors with\n the result of linalg.eig.\"\"\"\n w, evec, info = dsbev(self.bandmat_sym, compute_v=1)\n evec_ = evec[:,argsort(w)]\n assert_array_almost_equal(sort(w), self.w_sym_lin)\n assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))\n\n\n\n def test_dsbevd(self):\n \"\"\"Compare dsbevd eigenvalues and eigenvectors with\n the result of linalg.eig.\"\"\"\n w, evec, info = dsbevd(self.bandmat_sym, compute_v=1)\n evec_ = evec[:,argsort(w)]\n assert_array_almost_equal(sort(w), self.w_sym_lin)\n assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))\n\n\n\n def test_dsbevx(self):\n \"\"\"Compare dsbevx eigenvalues and eigenvectors\n with the result of linalg.eig.\"\"\"\n N,N = shape(self.sym_mat)\n ## Achtung: Argumente 0.0,0.0,range?\n w, evec, num, ifail, info = dsbevx(self.bandmat_sym, 0.0, 0.0, 1, N,\n compute_v=1, range=2)\n evec_ = evec[:,argsort(w)]\n assert_array_almost_equal(sort(w), self.w_sym_lin)\n assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))\n\n\n def test_zhbevd(self):\n \"\"\"Compare zhbevd eigenvalues and eigenvectors\n with the result of linalg.eig.\"\"\"\n w, evec, info = zhbevd(self.bandmat_herm, compute_v=1)\n evec_ = evec[:,argsort(w)]\n assert_array_almost_equal(sort(w), self.w_herm_lin)\n assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))\n\n\n\n def test_zhbevx(self):\n \"\"\"Compare zhbevx eigenvalues and eigenvectors\n with the result of linalg.eig.\"\"\"\n N,N = shape(self.herm_mat)\n ## Achtung: Argumente 0.0,0.0,range?\n w, evec, num, ifail, info = zhbevx(self.bandmat_herm, 0.0, 0.0, 1, N,\n compute_v=1, range=2)\n evec_ = evec[:,argsort(w)]\n assert_array_almost_equal(sort(w), self.w_herm_lin)\n assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))\n\n\n\n def test_eigvals_banded(self):\n \"\"\"Compare eigenvalues of eigvals_banded with those of linalg.eig.\"\"\"\n w_sym = eigvals_banded(self.bandmat_sym)\n w_sym = w_sym.real\n assert_array_almost_equal(sort(w_sym), self.w_sym_lin)\n\n w_herm = eigvals_banded(self.bandmat_herm)\n w_herm = w_herm.real\n assert_array_almost_equal(sort(w_herm), self.w_herm_lin)\n\n # extracting eigenvalues with respect to an index range\n ind1 = 2\n ind2 = 6\n w_sym_ind = eigvals_banded(self.bandmat_sym,\n select='i', select_range=(ind1, ind2) )\n assert_array_almost_equal(sort(w_sym_ind),\n self.w_sym_lin[ind1:ind2+1])\n w_herm_ind = eigvals_banded(self.bandmat_herm,\n select='i', select_range=(ind1, ind2) )\n assert_array_almost_equal(sort(w_herm_ind),\n self.w_herm_lin[ind1:ind2+1])\n\n # extracting eigenvalues with respect to a value range\n v_lower = self.w_sym_lin[ind1] - 1.0e-5\n v_upper = self.w_sym_lin[ind2] + 1.0e-5\n w_sym_val = eigvals_banded(self.bandmat_sym,\n select='v', select_range=(v_lower, v_upper) )\n assert_array_almost_equal(sort(w_sym_val),\n self.w_sym_lin[ind1:ind2+1])\n\n v_lower = self.w_herm_lin[ind1] - 1.0e-5\n v_upper = self.w_herm_lin[ind2] + 1.0e-5\n w_herm_val = eigvals_banded(self.bandmat_herm,\n select='v', select_range=(v_lower, v_upper) )\n assert_array_almost_equal(sort(w_herm_val),\n self.w_herm_lin[ind1:ind2+1])\n\n w_sym = eigvals_banded(self.bandmat_sym, check_finite=False)\n w_sym = w_sym.real\n assert_array_almost_equal(sort(w_sym), self.w_sym_lin)\n\n\n def test_eig_banded(self):\n \"\"\"Compare eigenvalues and eigenvectors of eig_banded\n with those of linalg.eig. \"\"\"\n w_sym, evec_sym = eig_banded(self.bandmat_sym)\n evec_sym_ = evec_sym[:,argsort(w_sym.real)]\n assert_array_almost_equal(sort(w_sym), self.w_sym_lin)\n assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))\n\n w_herm, evec_herm = eig_banded(self.bandmat_herm)\n evec_herm_ = evec_herm[:,argsort(w_herm.real)]\n assert_array_almost_equal(sort(w_herm), self.w_herm_lin)\n assert_array_almost_equal(abs(evec_herm_), abs(self.evec_herm_lin))\n\n # extracting eigenvalues with respect to an index range\n ind1 = 2\n ind2 = 6\n w_sym_ind, evec_sym_ind = eig_banded(self.bandmat_sym,\n select='i', select_range=(ind1, ind2) )\n assert_array_almost_equal(sort(w_sym_ind),\n self.w_sym_lin[ind1:ind2+1])\n assert_array_almost_equal(abs(evec_sym_ind),\n abs(self.evec_sym_lin[:,ind1:ind2+1]) )\n\n w_herm_ind, evec_herm_ind = eig_banded(self.bandmat_herm,\n select='i', select_range=(ind1, ind2) )\n assert_array_almost_equal(sort(w_herm_ind),\n self.w_herm_lin[ind1:ind2+1])\n assert_array_almost_equal(abs(evec_herm_ind),\n abs(self.evec_herm_lin[:,ind1:ind2+1]) )\n\n # extracting eigenvalues with respect to a value range\n v_lower = self.w_sym_lin[ind1] - 1.0e-5\n v_upper = self.w_sym_lin[ind2] + 1.0e-5\n w_sym_val, evec_sym_val = eig_banded(self.bandmat_sym,\n select='v', select_range=(v_lower, v_upper) )\n assert_array_almost_equal(sort(w_sym_val),\n self.w_sym_lin[ind1:ind2+1])\n assert_array_almost_equal(abs(evec_sym_val),\n abs(self.evec_sym_lin[:,ind1:ind2+1]) )\n\n v_lower = self.w_herm_lin[ind1] - 1.0e-5\n v_upper = self.w_herm_lin[ind2] + 1.0e-5\n w_herm_val, evec_herm_val = eig_banded(self.bandmat_herm,\n select='v', select_range=(v_lower, v_upper) )\n assert_array_almost_equal(sort(w_herm_val),\n self.w_herm_lin[ind1:ind2+1])\n assert_array_almost_equal(abs(evec_herm_val),\n abs(self.evec_herm_lin[:,ind1:ind2+1]) )\n\n w_sym, evec_sym = eig_banded(self.bandmat_sym, check_finite=False)\n evec_sym_ = evec_sym[:,argsort(w_sym.real)]\n assert_array_almost_equal(sort(w_sym), self.w_sym_lin)\n assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))\n\n\n def test_dgbtrf(self):\n \"\"\"Compare dgbtrf LU factorisation with the LU factorisation result\n of linalg.lu.\"\"\"\n M,N = shape(self.real_mat)\n lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)\n\n # extract matrix u from lu_symm_band\n u = diag(lu_symm_band[2*self.KL,:])\n for i in xrange(self.KL + self.KU):\n u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1)\n\n p_lin, l_lin, u_lin = lu(self.real_mat, permute_l=0)\n assert_array_almost_equal(u, u_lin)\n\n\n def test_zgbtrf(self):\n \"\"\"Compare zgbtrf LU factorisation with the LU factorisation result\n of linalg.lu.\"\"\"\n M,N = shape(self.comp_mat)\n lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)\n\n # extract matrix u from lu_symm_band\n u = diag(lu_symm_band[2*self.KL,:])\n for i in xrange(self.KL + self.KU):\n u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1)\n\n p_lin, l_lin, u_lin =lu(self.comp_mat, permute_l=0)\n assert_array_almost_equal(u, u_lin)\n\n\n\n def test_dgbtrs(self):\n \"\"\"Compare dgbtrs solutions for linear equation system A*x = b\n with solutions of linalg.solve.\"\"\"\n\n lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)\n y, info = dgbtrs(lu_symm_band, self.KL, self.KU, self.b, ipiv)\n\n y_lin = linalg.solve(self.real_mat, self.b)\n assert_array_almost_equal(y, y_lin)\n\n def test_zgbtrs(self):\n \"\"\"Compare zgbtrs solutions for linear equation system A*x = b\n with solutions of linalg.solve.\"\"\"\n\n lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)\n y, info = zgbtrs(lu_symm_band, self.KL, self.KU, self.bc, ipiv)\n\n y_lin = linalg.solve(self.comp_mat, self.bc)\n assert_array_almost_equal(y, y_lin)\n\ndef test_eigh():\n DIM = 6\n v = {'dim': (DIM, ),\n 'dtype': ('f','d','F','D'),\n 'overwrite': (True, False),\n 'lower': (True, False),\n 'turbo': (True, False),\n 'eigvals': (None, (2, DIM-2))}\n\n for dim in v['dim']:\n for typ in v['dtype']:\n for overwrite in v['overwrite']:\n for turbo in v['turbo']:\n for eigvals in v['eigvals']:\n for lower in v['lower']:\n yield (eigenhproblem_standard,\n 'ordinary',\n dim, typ, overwrite, lower,\n turbo, eigvals)\n yield (eigenhproblem_general,\n 'general ',\n dim, typ, overwrite, lower,\n turbo, eigvals)\n\ndef _complex_symrand(dim, dtype):\n a1, a2 = symrand(dim), symrand(dim)\n # add antisymmetric matrix as imag part\n a = a1 +1j*(triu(a2)-tril(a2))\n return a.astype(dtype)\n\ndef eigenhproblem_standard(desc, dim, dtype,\n overwrite, lower, turbo,\n eigvals):\n \"\"\"Solve a standard eigenvalue problem.\"\"\"\n if iscomplex(empty(1, dtype=dtype)):\n a = _complex_symrand(dim, dtype)\n else:\n a = symrand(dim).astype(dtype)\n\n if overwrite:\n a_c = a.copy()\n else:\n a_c = a\n w, z = eigh(a, overwrite_a=overwrite, lower=lower, eigvals=eigvals)\n assert_dtype_equal(z.dtype, dtype)\n w = w.astype(dtype)\n diag_ = diag(dot(z.T.conj(), dot(a_c, z))).real\n assert_array_almost_equal(diag_, w, DIGITS[dtype])\n\ndef eigenhproblem_general(desc, dim, dtype,\n overwrite, lower, turbo,\n eigvals):\n \"\"\"Solve a generalized eigenvalue problem.\"\"\"\n if iscomplex(empty(1, dtype=dtype)):\n a = _complex_symrand(dim, dtype)\n b = _complex_symrand(dim, dtype)+diag([2.1]*dim).astype(dtype)\n else:\n a = symrand(dim).astype(dtype)\n b = symrand(dim).astype(dtype)+diag([2.1]*dim).astype(dtype)\n\n if overwrite:\n a_c, b_c = a.copy(), b.copy()\n else:\n a_c, b_c = a, b\n\n w, z = eigh(a, b, overwrite_a=overwrite, lower=lower,\n overwrite_b=overwrite, turbo=turbo, eigvals=eigvals)\n assert_dtype_equal(z.dtype, dtype)\n w = w.astype(dtype)\n diag1_ = diag(dot(z.T.conj(), dot(a_c, z))).real\n assert_array_almost_equal(diag1_, w, DIGITS[dtype])\n diag2_ = diag(dot(z.T.conj(), dot(b_c, z))).real\n assert_array_almost_equal(diag2_, ones(diag2_.shape[0]), DIGITS[dtype])\n\ndef test_eigh_integer():\n a = array([[1,2],[2,7]])\n b = array([[3,1],[1,5]])\n w,z = eigh(a)\n w,z = eigh(a,b)\n\nclass TestLU(TestCase):\n\n def __init__(self, *args, **kw):\n TestCase.__init__(self, *args, **kw)\n\n self.a = array([[1,2,3],[1,2,3],[2,5,6]])\n self.ca = array([[1,2,3],[1,2,3],[2,5j,6]])\n # Those matrices are more robust to detect problems in permutation\n # matrices than the ones above\n self.b = array([[1,2,3],[4,5,6],[7,8,9]])\n self.cb = array([[1j,2j,3j],[4j,5j,6j],[7j,8j,9j]])\n\n # Reectangular matrices\n self.hrect = array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])\n self.chrect = 1.j * array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])\n\n self.vrect = array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])\n self.cvrect = 1.j * array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])\n\n # Medium sizes matrices\n self.med = rand(30, 40)\n self.cmed = rand(30, 40) + 1.j * rand(30, 40)\n\n def _test_common(self, data):\n p,l,u = lu(data)\n assert_array_almost_equal(dot(dot(p,l),u),data)\n pl,u = lu(data,permute_l=1)\n assert_array_almost_equal(dot(pl,u),data)\n\n # Simple tests\n def test_simple(self):\n self._test_common(self.a)\n\n def test_simple_complex(self):\n self._test_common(self.ca)\n\n def test_simple2(self):\n self._test_common(self.b)\n\n def test_simple2_complex(self):\n self._test_common(self.cb)\n\n # rectangular matrices tests\n def test_hrectangular(self):\n self._test_common(self.hrect)\n\n def test_vrectangular(self):\n self._test_common(self.vrect)\n\n def test_hrectangular_complex(self):\n self._test_common(self.chrect)\n\n def test_vrectangular_complex(self):\n self._test_common(self.cvrect)\n\n # Bigger matrices\n def test_medium1(self):\n \"\"\"Check lu decomposition on medium size, rectangular matrix.\"\"\"\n self._test_common(self.med)\n\n def test_medium1_complex(self):\n \"\"\"Check lu decomposition on medium size, rectangular matrix.\"\"\"\n self._test_common(self.cmed)\n\n def test_check_finite(self):\n p, l, u = lu(self.a, check_finite=False)\n assert_array_almost_equal(dot(dot(p,l),u), self.a)\n\n def test_simple_known(self):\n # Ticket #1458\n for order in ['C', 'F']:\n A = np.array([[2, 1],[0, 1.]], order=order)\n LU, P = lu_factor(A)\n assert_array_almost_equal(LU, np.array([[2, 1], [0, 1]]))\n assert_array_equal(P, np.array([0, 1]))\n\n\nclass TestLUSingle(TestLU):\n \"\"\"LU testers for single precision, real and double\"\"\"\n def __init__(self, *args, **kw):\n TestLU.__init__(self, *args, **kw)\n\n self.a = self.a.astype(float32)\n self.ca = self.ca.astype(complex64)\n self.b = self.b.astype(float32)\n self.cb = self.cb.astype(complex64)\n\n self.hrect = self.hrect.astype(float32)\n self.chrect = self.hrect.astype(complex64)\n\n self.vrect = self.vrect.astype(float32)\n self.cvrect = self.vrect.astype(complex64)\n\n self.med = self.vrect.astype(float32)\n self.cmed = self.vrect.astype(complex64)\n\nclass TestLUSolve(TestCase):\n def setUp(self):\n seed(1234)\n\n def test_lu(self):\n a0 = random((10,10))\n b = random((10,))\n\n for order in ['C', 'F']:\n a = np.array(a0, order=order)\n\n x1 = solve(a,b)\n\n lu_a = lu_factor(a)\n x2 = lu_solve(lu_a,b)\n\n assert_array_almost_equal(x1,x2)\n\n def test_check_finite(self):\n a = random((10,10))\n b = random((10,))\n x1 = solve(a,b)\n\n lu_a = lu_factor(a, check_finite=False)\n x2 = lu_solve(lu_a,b, check_finite=False)\n\n assert_array_almost_equal(x1,x2)\n\nclass TestSVD(TestCase):\n def setUp(self):\n seed(1234)\n\n def test_simple(self):\n a = [[1,2,3],[1,20,3],[2,5,6]]\n for full_matrices in (True, False):\n u,s,vh = svd(a, full_matrices=full_matrices)\n assert_array_almost_equal(dot(transpose(u),u),identity(3))\n assert_array_almost_equal(dot(transpose(vh),vh),identity(3))\n sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)\n for i in range(len(s)): sigma[i,i] = s[i]\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\n\n def test_simple_singular(self):\n a = [[1,2,3],[1,2,3],[2,5,6]]\n for full_matrices in (True, False):\n u,s,vh = svd(a, full_matrices=full_matrices)\n assert_array_almost_equal(dot(transpose(u),u),identity(3))\n assert_array_almost_equal(dot(transpose(vh),vh),identity(3))\n sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)\n for i in range(len(s)): sigma[i,i] = s[i]\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\n\n def test_simple_underdet(self):\n a = [[1,2,3],[4,5,6]]\n for full_matrices in (True, False):\n u,s,vh = svd(a, full_matrices=full_matrices)\n assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[0]))\n sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)\n for i in range(len(s)): sigma[i,i] = s[i]\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\n\n def test_simple_overdet(self):\n a = [[1,2],[4,5],[3,4]]\n for full_matrices in (True, False):\n u,s,vh = svd(a, full_matrices=full_matrices)\n assert_array_almost_equal(dot(transpose(u),u), identity(u.shape[1]))\n assert_array_almost_equal(dot(transpose(vh),vh),identity(2))\n sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)\n for i in range(len(s)): sigma[i,i] = s[i]\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\n\n def test_random(self):\n n = 20\n m = 15\n for i in range(3):\n for a in [random([n,m]),random([m,n])]:\n for full_matrices in (True, False):\n u,s,vh = svd(a, full_matrices=full_matrices)\n assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[1]))\n assert_array_almost_equal(dot(vh, transpose(vh)),identity(vh.shape[0]))\n sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)\n for i in range(len(s)): sigma[i,i] = s[i]\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\n\n def test_simple_complex(self):\n a = [[1,2,3],[1,2j,3],[2,5,6]]\n for full_matrices in (True, False):\n u,s,vh = svd(a, full_matrices=full_matrices)\n assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1]))\n assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(vh.shape[0]))\n sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)\n for i in range(len(s)): sigma[i,i] = s[i]\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\n\n def test_random_complex(self):\n n = 20\n m = 15\n for i in range(3):\n for full_matrices in (True, False):\n for a in [random([n,m]),random([m,n])]:\n a = a + 1j*random(list(a.shape))\n u,s,vh = svd(a, full_matrices=full_matrices)\n assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1]))\n # This fails when [m,n]\n #assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(len(vh),dtype=vh.dtype.char))\n sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)\n for i in range(len(s)): sigma[i,i] = s[i]\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\n\n def test_crash_1580(self):\n sizes = [(13, 23), (30, 50), (60, 100)]\n np.random.seed(1234)\n for sz in sizes:\n for dt in [np.float32, np.float64, np.complex64, np.complex128]:\n a = np.random.rand(*sz).astype(dt)\n # should not crash\n svd(a)\n\n def test_check_finite(self):\n a = [[1,2,3],[1,20,3],[2,5,6]]\n u,s,vh = svd(a, check_finite=False)\n assert_array_almost_equal(dot(transpose(u),u),identity(3))\n assert_array_almost_equal(dot(transpose(vh),vh),identity(3))\n sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)\n for i in range(len(s)): sigma[i,i] = s[i]\n assert_array_almost_equal(dot(dot(u,sigma),vh),a)\n\n\nclass TestSVDVals(TestCase):\n\n def test_simple(self):\n a = [[1,2,3],[1,2,3],[2,5,6]]\n s = svdvals(a)\n assert_(len(s) == 3)\n assert_(s[0] >= s[1] >= s[2])\n\n def test_simple_underdet(self):\n a = [[1,2,3],[4,5,6]]\n s = svdvals(a)\n assert_(len(s) == 2)\n assert_(s[0] >= s[1])\n\n def test_simple_overdet(self):\n a = [[1,2],[4,5],[3,4]]\n s = svdvals(a)\n assert_(len(s) == 2)\n assert_(s[0] >= s[1])\n\n def test_simple_complex(self):\n a = [[1,2,3],[1,20,3j],[2,5,6]]\n s = svdvals(a)\n assert_(len(s) == 3)\n assert_(s[0] >= s[1] >= s[2])\n\n def test_simple_underdet_complex(self):\n a = [[1,2,3],[4,5j,6]]\n s = svdvals(a)\n assert_(len(s) == 2)\n assert_(s[0] >= s[1])\n\n def test_simple_overdet_complex(self):\n a = [[1,2],[4,5],[3j,4]]\n s = svdvals(a)\n assert_(len(s) == 2)\n assert_(s[0] >= s[1])\n\n def test_check_finite(self):\n a = [[1,2,3],[1,2,3],[2,5,6]]\n s = svdvals(a, check_finite=False)\n assert_(len(s) == 3)\n assert_(s[0] >= s[1] >= s[2])\n\nclass TestDiagSVD(TestCase):\n\n def test_simple(self):\n assert_array_almost_equal(diagsvd([1,0,0],3,3),[[1,0,0],[0,0,0],[0,0,0]])\n\n\nclass TestQR(TestCase):\n\n def setUp(self):\n seed(1234)\n\n def test_simple(self):\n a = [[8,2,3],[2,9,3],[5,3,6]]\n q,r = qr(a)\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\n assert_array_almost_equal(dot(q,r),a)\n\n def test_simple_left(self):\n a = [[8,2,3],[2,9,3],[5,3,6]]\n q,r = qr(a)\n c = [1, 2, 3]\n qc,r = qr_multiply(a, mode=\"left\", c=c)\n assert_array_almost_equal(dot(q, c), qc[:, 0])\n qc,r = qr_multiply(a, mode=\"left\", c=identity(3))\n assert_array_almost_equal(q, qc)\n\n def test_simple_right(self):\n a = [[8,2,3],[2,9,3],[5,3,6]]\n q,r = qr(a)\n c = [1, 2, 3]\n qc,r = qr_multiply(a, mode=\"right\", c=c)\n assert_array_almost_equal(dot(c, q), qc[0, :])\n qc,r = qr_multiply(a, mode=\"right\", c=identity(3))\n assert_array_almost_equal(q, qc)\n\n def test_simple_left(self):\n a = [[8,2,3],[2,9,3],[5,3,6]]\n q,r = qr(a)\n c = [1, 2, 3]\n qc,r2 = qr_multiply(a, c, \"left\")\n assert_array_almost_equal(dot(q, c), qc)\n assert_array_almost_equal(r, r2)\n qc,r2 = qr_multiply(a, identity(3), \"left\")\n assert_array_almost_equal(q, qc)\n\n def test_simple_right(self):\n a = [[8,2,3],[2,9,3],[5,3,6]]\n q,r = qr(a)\n c = [1, 2, 3]\n qc,r2 = qr_multiply(a, c)\n assert_array_almost_equal(dot(c, q), qc)\n assert_array_almost_equal(r, r2)\n qc,r = qr_multiply(a, identity(3))\n assert_array_almost_equal(q, qc)\n\n def test_simple_pivoting(self):\n a = np.asarray([[8,2,3],[2,9,3],[5,3,6]])\n q,r,p = qr(a, pivoting=True)\n d = abs(diag(r))\n assert_(all(d[1:] <= d[:-1]))\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\n assert_array_almost_equal(dot(q,r),a[:,p])\n q2,r2 = qr(a[:,p])\n assert_array_almost_equal(q,q2)\n assert_array_almost_equal(r,r2)\n\n def test_simple_left_pivoting(self):\n a = [[8,2,3],[2,9,3],[5,3,6]]\n q,r,jpvt = qr(a, pivoting=True)\n c = [1, 2, 3]\n qc,r,jpvt = qr_multiply(a, c, \"left\", True)\n assert_array_almost_equal(dot(q, c), qc)\n\n def test_simple_right_pivoting(self):\n a = [[8,2,3],[2,9,3],[5,3,6]]\n q,r,jpvt = qr(a, pivoting=True)\n c = [1, 2, 3]\n qc,r,jpvt = qr_multiply(a, c, pivoting=True)\n assert_array_almost_equal(dot(c, q), qc)\n\n def test_simple_trap(self):\n a = [[8,2,3],[2,9,3]]\n q,r = qr(a)\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\n assert_array_almost_equal(dot(q,r),a)\n\n def test_simple_trap_pivoting(self):\n a = np.asarray([[8,2,3],[2,9,3]])\n q,r,p = qr(a, pivoting=True)\n d = abs(diag(r))\n assert_(all(d[1:] <= d[:-1]))\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\n assert_array_almost_equal(dot(q,r),a[:,p])\n q2,r2 = qr(a[:,p])\n assert_array_almost_equal(q,q2)\n assert_array_almost_equal(r,r2)\n\n def test_simple_tall(self):\n # full version\n a = [[8,2],[2,9],[5,3]]\n q,r = qr(a)\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\n assert_array_almost_equal(dot(q,r),a)\n\n def test_simple_tall_pivoting(self):\n # full version pivoting\n a = np.asarray([[8,2],[2,9],[5,3]])\n q,r,p = qr(a, pivoting=True)\n d = abs(diag(r))\n assert_(all(d[1:] <= d[:-1]))\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\n assert_array_almost_equal(dot(q,r),a[:,p])\n q2,r2 = qr(a[:,p])\n assert_array_almost_equal(q,q2)\n assert_array_almost_equal(r,r2)\n\n def test_simple_tall_e(self):\n # economy version\n a = [[8,2],[2,9],[5,3]]\n q,r = qr(a, mode='economic')\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\n assert_array_almost_equal(dot(q,r),a)\n assert_equal(q.shape, (3,2))\n assert_equal(r.shape, (2,2))\n\n def test_simple_tall_e_pivoting(self):\n # economy version pivoting\n a = np.asarray([[8,2],[2,9],[5,3]])\n q,r,p = qr(a, pivoting=True, mode='economic')\n d = abs(diag(r))\n assert_(all(d[1:] <= d[:-1]))\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\n assert_array_almost_equal(dot(q,r),a[:,p])\n q2,r2 = qr(a[:,p], mode='economic')\n assert_array_almost_equal(q,q2)\n assert_array_almost_equal(r,r2)\n\n def test_simple_tall_left(self):\n a = [[8,2],[2,9],[5,3]]\n q,r = qr(a, mode=\"economic\")\n c = [1, 2]\n qc,r2 = qr_multiply(a, c, \"left\")\n assert_array_almost_equal(dot(q, c), qc)\n assert_array_almost_equal(r, r2)\n c = array([1,2,0])\n qc,r2 = qr_multiply(a, c, \"left\", overwrite_c=True)\n assert_array_almost_equal(dot(q, c[:2]), qc)\n qc,r = qr_multiply(a, identity(2), \"left\")\n assert_array_almost_equal(qc, q)\n\n def test_simple_tall_left_pivoting(self):\n a = [[8,2],[2,9],[5,3]]\n q,r,jpvt = qr(a, mode=\"economic\", pivoting=True)\n c = [1, 2]\n qc,r,kpvt = qr_multiply(a, c, \"left\", True)\n assert_array_equal(jpvt, kpvt)\n assert_array_almost_equal(dot(q, c), qc)\n qc,r,jpvt = qr_multiply(a, identity(2), \"left\", True)\n assert_array_almost_equal(qc, q)\n\n def test_simple_tall_right(self):\n a = [[8,2],[2,9],[5,3]]\n q,r = qr(a, mode=\"economic\")\n c = [1, 2, 3]\n cq,r2 = qr_multiply(a, c)\n assert_array_almost_equal(dot(c, q), cq)\n assert_array_almost_equal(r, r2)\n cq,r = qr_multiply(a, identity(3))\n assert_array_almost_equal(cq, q)\n\n def test_simple_tall_right_pivoting(self):\n a = [[8,2],[2,9],[5,3]]\n q,r,jpvt = qr(a, pivoting=True, mode=\"economic\")\n c = [1, 2, 3]\n cq,r,jpvt = qr_multiply(a, c, pivoting=True)\n assert_array_almost_equal(dot(c, q), cq)\n cq,r,jpvt = qr_multiply(a, identity(3), pivoting=True)\n assert_array_almost_equal(cq, q)\n\n def test_simple_fat(self):\n # full version\n a = [[8,2,5],[2,9,3]]\n q,r = qr(a)\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\n assert_array_almost_equal(dot(q,r),a)\n assert_equal(q.shape, (2,2))\n assert_equal(r.shape, (2,3))\n\n def test_simple_fat_pivoting(self):\n # full version pivoting\n a = np.asarray([[8,2,5],[2,9,3]])\n q,r,p = qr(a, pivoting=True)\n d = abs(diag(r))\n assert_(all(d[1:] <= d[:-1]))\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\n assert_array_almost_equal(dot(q,r),a[:,p])\n assert_equal(q.shape, (2,2))\n assert_equal(r.shape, (2,3))\n q2,r2 = qr(a[:,p])\n assert_array_almost_equal(q,q2)\n assert_array_almost_equal(r,r2)\n\n def test_simple_fat_e(self):\n # economy version\n a = [[8,2,3],[2,9,5]]\n q,r = qr(a, mode='economic')\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\n assert_array_almost_equal(dot(q,r),a)\n assert_equal(q.shape, (2,2))\n assert_equal(r.shape, (2,3))\n\n def test_simple_fat_e_pivoting(self):\n # economy version pivoting\n a = np.asarray([[8,2,3],[2,9,5]])\n q,r,p = qr(a, pivoting=True, mode='economic')\n d = abs(diag(r))\n assert_(all(d[1:] <= d[:-1]))\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\n assert_array_almost_equal(dot(q,r),a[:,p])\n assert_equal(q.shape, (2,2))\n assert_equal(r.shape, (2,3))\n q2,r2 = qr(a[:,p], mode='economic')\n assert_array_almost_equal(q,q2)\n assert_array_almost_equal(r,r2)\n\n def test_simple_fat_left(self):\n a = [[8,2,3],[2,9,5]]\n q,r = qr(a, mode=\"economic\")\n c = [1, 2]\n qc,r2 = qr_multiply(a, c, \"left\")\n assert_array_almost_equal(dot(q, c), qc)\n assert_array_almost_equal(r, r2)\n qc,r = qr_multiply(a, identity(2), \"left\")\n assert_array_almost_equal(qc, q)\n\n def test_simple_fat_left_pivoting(self):\n a = [[8,2,3],[2,9,5]]\n q,r,jpvt = qr(a, mode=\"economic\", pivoting=True)\n c = [1, 2]\n qc,r,jpvt = qr_multiply(a, c, \"left\", True)\n assert_array_almost_equal(dot(q, c), qc)\n qc,r,jpvt = qr_multiply(a, identity(2), \"left\", True)\n assert_array_almost_equal(qc, q)\n\n def test_simple_fat_right(self):\n a = [[8,2,3],[2,9,5]]\n q,r = qr(a, mode=\"economic\")\n c = [1, 2]\n cq,r2 = qr_multiply(a, c)\n assert_array_almost_equal(dot(c, q), cq)\n assert_array_almost_equal(r, r2)\n cq,r = qr_multiply(a, identity(2))\n assert_array_almost_equal(cq, q)\n\n def test_simple_fat_right_pivoting(self):\n a = [[8,2,3],[2,9,5]]\n q,r,jpvt = qr(a, pivoting=True, mode=\"economic\")\n c = [1, 2]\n cq,r,jpvt = qr_multiply(a, c, pivoting=True)\n assert_array_almost_equal(dot(c, q), cq)\n cq,r,jpvt = qr_multiply(a, identity(2), pivoting=True)\n assert_array_almost_equal(cq, q)\n\n def test_simple_complex(self):\n a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]\n q,r = qr(a)\n assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3))\n assert_array_almost_equal(dot(q,r),a)\n\n def test_simple_complex_left(self):\n a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]\n q,r = qr(a)\n c = [1, 2, 3+4j]\n qc,r = qr_multiply(a, c, \"left\")\n assert_array_almost_equal(dot(q, c), qc)\n qc,r = qr_multiply(a, identity(3), \"left\")\n assert_array_almost_equal(q, qc)\n\n def test_simple_complex_right(self):\n a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]\n q,r = qr(a)\n c = [1, 2, 3+4j]\n qc,r = qr_multiply(a, c)\n assert_array_almost_equal(dot(c, q), qc)\n qc,r = qr_multiply(a, identity(3))\n assert_array_almost_equal(q, qc)\n\n def test_simple_tall_complex_left(self):\n a = [[8,2+3j],[2,9],[5+7j,3]]\n q,r = qr(a, mode=\"economic\")\n c = [1, 2+2j]\n qc,r2 = qr_multiply(a, c, \"left\")\n assert_array_almost_equal(dot(q, c), qc)\n assert_array_almost_equal(r, r2)\n c = array([1,2,0])\n qc,r2 = qr_multiply(a, c, \"left\", overwrite_c=True)\n assert_array_almost_equal(dot(q, c[:2]), qc)\n qc,r = qr_multiply(a, identity(2), \"left\")\n assert_array_almost_equal(qc, q)\n\n def test_simple_complex_left_conjugate(self):\n a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]\n q,r = qr(a)\n c = [1, 2, 3+4j]\n qc,r = qr_multiply(a, c, \"left\", conjugate=True)\n assert_array_almost_equal(dot(q.conjugate(), c), qc)\n\n def test_simple_complex_tall_left_conjugate(self):\n a = [[3,3+4j],[5,2+2j],[3,2]]\n q,r = qr(a, mode='economic')\n c = [1, 3+4j]\n qc,r = qr_multiply(a, c, \"left\", conjugate=True)\n assert_array_almost_equal(dot(q.conjugate(), c), qc)\n\n def test_simple_complex_right_conjugate(self):\n a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]\n q,r = qr(a)\n c = [1, 2, 3+4j]\n qc,r = qr_multiply(a, c, conjugate=True)\n assert_array_almost_equal(dot(c, q.conjugate()), qc)\n\n def test_simple_complex_pivoting(self):\n a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])\n q,r,p = qr(a, pivoting=True)\n d = abs(diag(r))\n assert_(all(d[1:] <= d[:-1]))\n assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3))\n assert_array_almost_equal(dot(q,r),a[:,p])\n q2,r2 = qr(a[:,p])\n assert_array_almost_equal(q,q2)\n assert_array_almost_equal(r,r2)\n\n def test_simple_complex_left_pivoting(self):\n a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])\n q,r,jpvt = qr(a, pivoting=True)\n c = [1, 2, 3+4j]\n qc,r,jpvt = qr_multiply(a, c, \"left\", True)\n assert_array_almost_equal(dot(q, c), qc)\n\n def test_simple_complex_right_pivoting(self):\n a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])\n q,r,jpvt = qr(a, pivoting=True)\n c = [1, 2, 3+4j]\n qc,r,jpvt = qr_multiply(a, c, pivoting=True)\n assert_array_almost_equal(dot(c, q), qc)\n\n def test_random(self):\n n = 20\n for k in range(2):\n a = random([n,n])\n q,r = qr(a)\n assert_array_almost_equal(dot(transpose(q),q),identity(n))\n assert_array_almost_equal(dot(q,r),a)\n\n def test_random_left(self):\n n = 20\n for k in range(2):\n a = random([n,n])\n q,r = qr(a)\n c = random([n])\n qc,r = qr_multiply(a, c, \"left\")\n assert_array_almost_equal(dot(q, c), qc)\n qc,r = qr_multiply(a, identity(n), \"left\")\n assert_array_almost_equal(q, qc)\n\n def test_random_right(self):\n n = 20\n for k in range(2):\n a = random([n,n])\n q,r = qr(a)\n c = random([n])\n cq,r = qr_multiply(a, c)\n assert_array_almost_equal(dot(c, q), cq)\n cq,r = qr_multiply(a, identity(n))\n assert_array_almost_equal(q, cq)\n\n def test_random_pivoting(self):\n n = 20\n for k in range(2):\n a = random([n,n])\n q,r,p = qr(a, pivoting=True)\n d = abs(diag(r))\n assert_(all(d[1:] <= d[:-1]))\n assert_array_almost_equal(dot(transpose(q),q),identity(n))\n assert_array_almost_equal(dot(q,r),a[:,p])\n q2,r2 = qr(a[:,p])\n assert_array_almost_equal(q,q2)\n assert_array_almost_equal(r,r2)\n\n def test_random_tall(self):\n # full version\n m = 200\n n = 100\n for k in range(2):\n a = random([m,n])\n q,r = qr(a)\n assert_array_almost_equal(dot(transpose(q),q),identity(m))\n assert_array_almost_equal(dot(q,r),a)\n\n def test_random_tall_left(self):\n # full version\n m = 200\n n = 100\n for k in range(2):\n a = random([m,n])\n q,r = qr(a, mode=\"economic\")\n c = random([n])\n qc,r = qr_multiply(a, c, \"left\")\n assert_array_almost_equal(dot(q, c), qc)\n qc,r = qr_multiply(a, identity(n), \"left\")\n assert_array_almost_equal(qc, q)\n\n def test_random_tall_right(self):\n # full version\n m = 200\n n = 100\n for k in range(2):\n a = random([m,n])\n q,r = qr(a, mode=\"economic\")\n c = random([m])\n cq,r = qr_multiply(a, c)\n assert_array_almost_equal(dot(c, q), cq)\n cq,r = qr_multiply(a, identity(m))\n assert_array_almost_equal(cq, q)\n\n def test_random_tall_pivoting(self):\n # full version pivoting\n m = 200\n n = 100\n for k in range(2):\n a = random([m,n])\n q,r,p = qr(a, pivoting=True)\n d = abs(diag(r))\n assert_(all(d[1:] <= d[:-1]))\n assert_array_almost_equal(dot(transpose(q),q),identity(m))\n assert_array_almost_equal(dot(q,r),a[:,p])\n q2,r2 = qr(a[:,p])\n assert_array_almost_equal(q,q2)\n assert_array_almost_equal(r,r2)\n\n def test_random_tall_e(self):\n # economy version\n m = 200\n n = 100\n for k in range(2):\n a = random([m,n])\n q,r = qr(a, mode='economic')\n assert_array_almost_equal(dot(transpose(q),q),identity(n))\n assert_array_almost_equal(dot(q,r),a)\n assert_equal(q.shape, (m,n))\n assert_equal(r.shape, (n,n))\n\n def test_random_tall_e_pivoting(self):\n # economy version pivoting\n m = 200\n n = 100\n for k in range(2):\n a = random([m,n])\n q,r,p = qr(a, pivoting=True, mode='economic')\n d = abs(diag(r))\n assert_(all(d[1:] <= d[:-1]))\n assert_array_almost_equal(dot(transpose(q),q),identity(n))\n assert_array_almost_equal(dot(q,r),a[:,p])\n assert_equal(q.shape, (m,n))\n assert_equal(r.shape, (n,n))\n q2,r2 = qr(a[:,p], mode='economic')\n assert_array_almost_equal(q,q2)\n assert_array_almost_equal(r,r2)\n\n def test_random_trap(self):\n m = 100\n n = 200\n for k in range(2):\n a = random([m,n])\n q,r = qr(a)\n assert_array_almost_equal(dot(transpose(q),q),identity(m))\n assert_array_almost_equal(dot(q,r),a)\n\n def test_random_trap_pivoting(self):\n m = 100\n n = 200\n for k in range(2):\n a = random([m,n])\n q,r,p = qr(a, pivoting=True)\n d = abs(diag(r))\n assert_(all(d[1:] <= d[:-1]))\n assert_array_almost_equal(dot(transpose(q),q),identity(m))\n assert_array_almost_equal(dot(q,r),a[:,p])\n q2,r2 = qr(a[:,p])\n assert_array_almost_equal(q,q2)\n assert_array_almost_equal(r,r2)\n\n def test_random_complex(self):\n n = 20\n for k in range(2):\n a = random([n,n])+1j*random([n,n])\n q,r = qr(a)\n assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n))\n assert_array_almost_equal(dot(q,r),a)\n\n def test_random_complex_left(self):\n n = 20\n for k in range(2):\n a = random([n,n])+1j*random([n,n])\n q,r = qr(a)\n c = random([n])+1j*random([n])\n qc,r = qr_multiply(a, c, \"left\")\n assert_array_almost_equal(dot(q, c), qc)\n qc,r = qr_multiply(a, identity(n), \"left\")\n assert_array_almost_equal(q, qc)\n\n def test_random_complex_right(self):\n n = 20\n for k in range(2):\n a = random([n,n])+1j*random([n,n])\n q,r = qr(a)\n c = random([n])+1j*random([n])\n cq,r = qr_multiply(a, c)\n assert_array_almost_equal(dot(c, q), cq)\n cq,r = qr_multiply(a, identity(n))\n assert_array_almost_equal(q, cq)\n\n def test_random_complex_pivoting(self):\n n = 20\n for k in range(2):\n a = random([n,n])+1j*random([n,n])\n q,r,p = qr(a, pivoting=True)\n d = abs(diag(r))\n assert_(all(d[1:] <= d[:-1]))\n assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n))\n assert_array_almost_equal(dot(q,r),a[:,p])\n q2,r2 = qr(a[:,p])\n assert_array_almost_equal(q,q2)\n assert_array_almost_equal(r,r2)\n\n def test_check_finite(self):\n a = [[8,2,3],[2,9,3],[5,3,6]]\n q,r = qr(a, check_finite=False)\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\n assert_array_almost_equal(dot(q,r),a)\n\nclass TestRQ(TestCase):\n\n def setUp(self):\n seed(1234)\n\n def test_simple(self):\n a = [[8,2,3],[2,9,3],[5,3,6]]\n r,q = rq(a)\n assert_array_almost_equal(dot(q, transpose(q)),identity(3))\n assert_array_almost_equal(dot(r,q),a)\n\n def test_r(self):\n a = [[8,2,3],[2,9,3],[5,3,6]]\n r,q = rq(a)\n r2 = rq(a, mode='r')\n assert_array_almost_equal(r, r2)\n\n def test_random(self):\n n = 20\n for k in range(2):\n a = random([n,n])\n r,q = rq(a)\n assert_array_almost_equal(dot(q, transpose(q)),identity(n))\n assert_array_almost_equal(dot(r,q),a)\n\n def test_simple_trap(self):\n a = [[8,2,3],[2,9,3]]\n r,q = rq(a)\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\n assert_array_almost_equal(dot(r,q),a)\n\n def test_simple_tall(self):\n a = [[8,2],[2,9],[5,3]]\n r,q = rq(a)\n assert_array_almost_equal(dot(transpose(q),q),identity(2))\n assert_array_almost_equal(dot(r,q),a)\n\n def test_simple_fat(self):\n a = [[8,2,5],[2,9,3]]\n r,q = rq(a)\n assert_array_almost_equal(dot(transpose(q),q),identity(3))\n assert_array_almost_equal(dot(r,q),a)\n\n def test_simple_complex(self):\n a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]\n r,q = rq(a)\n assert_array_almost_equal(dot(q, conj(transpose(q))),identity(3))\n assert_array_almost_equal(dot(r,q),a)\n\n def test_random_tall(self):\n m = 200\n n = 100\n for k in range(2):\n a = random([m,n])\n r,q = rq(a)\n assert_array_almost_equal(dot(q, transpose(q)),identity(n))\n assert_array_almost_equal(dot(r,q),a)\n\n def test_random_trap(self):\n m = 100\n n = 200\n for k in range(2):\n a = random([m,n])\n r,q = rq(a)\n assert_array_almost_equal(dot(q, transpose(q)),identity(n))\n assert_array_almost_equal(dot(r,q),a)\n\n def test_random_trap_economic(self):\n m = 100\n n = 200\n for k in range(2):\n a = random([m,n])\n r,q = rq(a, mode='economic')\n assert_array_almost_equal(dot(q,transpose(q)),identity(m))\n assert_array_almost_equal(dot(r,q),a)\n assert_equal(q.shape, (m, n))\n assert_equal(r.shape, (m, m))\n\n def test_random_complex(self):\n n = 20\n for k in range(2):\n a = random([n,n])+1j*random([n,n])\n r,q = rq(a)\n assert_array_almost_equal(dot(q, conj(transpose(q))),identity(n))\n assert_array_almost_equal(dot(r,q),a)\n\n def test_random_complex_economic(self):\n m = 100\n n = 200\n for k in range(2):\n a = random([m,n])+1j*random([m,n])\n r,q = rq(a, mode='economic')\n assert_array_almost_equal(dot(q,conj(transpose(q))),identity(m))\n assert_array_almost_equal(dot(r,q),a)\n assert_equal(q.shape, (m, n))\n assert_equal(r.shape, (m, m))\n\n def test_check_finite(self):\n a = [[8,2,3],[2,9,3],[5,3,6]]\n r,q = rq(a, check_finite=False)\n assert_array_almost_equal(dot(q, transpose(q)),identity(3))\n assert_array_almost_equal(dot(r,q),a)\n\n\ntransp = transpose\nany = sometrue\n\nclass TestSchur(TestCase):\n\n def test_simple(self):\n a = [[8,12,3],[2,9,3],[10,3,6]]\n t,z = schur(a)\n assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a)\n tc,zc = schur(a,'complex')\n assert_(any(ravel(iscomplex(zc))) and any(ravel(iscomplex(tc))))\n assert_array_almost_equal(dot(dot(zc,tc),transp(conj(zc))),a)\n tc2,zc2 = rsf2csf(tc,zc)\n assert_array_almost_equal(dot(dot(zc2,tc2),transp(conj(zc2))),a)\n\n def test_sort(self):\n a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]]\n s,u,sdim = schur(a,sort='lhp')\n assert_array_almost_equal([[0.1134,0.5436,0.8316,0.],\n [-0.1134,-0.8245,0.5544,0.],\n [-0.8213,0.1308,0.0265,-0.5547],\n [-0.5475,0.0872,0.0177,0.8321]],\n u,3)\n assert_array_almost_equal([[-1.4142,0.1456,-11.5816,-7.7174],\n [0.,-0.5000,9.4472,-0.7184],\n [0.,0.,1.4142,-0.1456],\n [0.,0.,0.,0.5]],\n s,3)\n assert_equal(2,sdim)\n\n s,u,sdim = schur(a,sort='rhp')\n assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071],\n [-0.4862,0.4930,-0.1434,-0.7071],\n [0.6042,0.3944,-0.6924,0.],\n [0.4028,0.5986,0.6924,0.]],\n u,3)\n assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130],\n [0.,0.5,6.5809,-3.1870],\n [0.,0.,-1.4142,0.9270],\n [0.,0.,0.,-0.5]],\n s,3)\n assert_equal(2,sdim)\n\n s,u,sdim = schur(a,sort='iuc')\n assert_array_almost_equal([[0.5547,0.,-0.5721,-0.6042],\n [-0.8321,0.,-0.3814,-0.4028],\n [0.,0.7071,-0.5134,0.4862],\n [0.,0.7071,0.5134,-0.4862]],\n u,3)\n assert_array_almost_equal([[-0.5000,0.0000,-6.5809,-4.0974],\n [0.,0.5000,-3.3191,-14.4130],\n [0.,0.,1.4142,2.1573],\n [0.,0.,0.,-1.4142]],\n s,3)\n assert_equal(2,sdim)\n\n s,u,sdim = schur(a,sort='ouc')\n assert_array_almost_equal([[0.4862,-0.5134,0.7071,0.],\n [-0.4862,0.5134,0.7071,0.],\n [0.6042,0.5721,0.,-0.5547],\n [0.4028,0.3814,0.,0.8321]],\n u,3)\n assert_array_almost_equal([[1.4142,-2.1573,14.4130,4.0974],\n [0.,-1.4142,3.3191,6.5809],\n [0.,0.,-0.5000,0.],\n [0.,0.,0.,0.5000]],\n s,3)\n assert_equal(2,sdim)\n\n rhp_function = lambda x: x >= 0.0\n s,u,sdim = schur(a,sort=rhp_function)\n assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071],\n [-0.4862,0.4930,-0.1434,-0.7071],\n [0.6042,0.3944,-0.6924,0.],\n [0.4028,0.5986,0.6924,0.]],\n u,3)\n assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130],\n [0.,0.5,6.5809,-3.1870],\n [0.,0.,-1.4142,0.9270],\n [0.,0.,0.,-0.5]],\n s,3)\n assert_equal(2,sdim)\n\n def test_sort_errors(self):\n a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]]\n assert_raises(ValueError, schur, a, sort='unsupported')\n assert_raises(ValueError, schur, a, sort=1)\n\n def test_check_finite(self):\n a = [[8,12,3],[2,9,3],[10,3,6]]\n t,z = schur(a, check_finite=False)\n assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a)\n\n\nclass TestHessenberg(TestCase):\n\n def test_simple(self):\n a = [[-149, -50,-154],\n [ 537, 180, 546],\n [ -27, -9, -25]]\n h1 = [[-149.0000,42.2037,-156.3165],\n [-537.6783,152.5511,-554.9272],\n [0,0.0728, 2.4489]]\n h,q = hessenberg(a,calc_q=1)\n assert_array_almost_equal(dot(transp(q),dot(a,q)),h)\n assert_array_almost_equal(h,h1,decimal=4)\n\n def test_simple_complex(self):\n a = [[-149, -50,-154],\n [ 537, 180j, 546],\n [ -27j, -9, -25]]\n h,q = hessenberg(a,calc_q=1)\n h1 = dot(transp(conj(q)),dot(a,q))\n assert_array_almost_equal(h1,h)\n\n def test_simple2(self):\n a = [[1,2,3,4,5,6,7],\n [0,2,3,4,6,7,2],\n [0,2,2,3,0,3,2],\n [0,0,2,8,0,0,2],\n [0,3,1,2,0,1,2],\n [0,1,2,3,0,1,0],\n [0,0,0,0,0,1,2]]\n h,q = hessenberg(a,calc_q=1)\n assert_array_almost_equal(dot(transp(q),dot(a,q)),h)\n\n def test_random(self):\n n = 20\n for k in range(2):\n a = random([n,n])\n h,q = hessenberg(a,calc_q=1)\n assert_array_almost_equal(dot(transp(q),dot(a,q)),h)\n\n def test_random_complex(self):\n n = 20\n for k in range(2):\n a = random([n,n])+1j*random([n,n])\n h,q = hessenberg(a,calc_q=1)\n h1 = dot(transp(conj(q)),dot(a,q))\n assert_array_almost_equal(h1,h)\n\n def test_check_finite(self):\n a = [[-149, -50,-154],\n [ 537, 180, 546],\n [ -27, -9, -25]]\n h1 = [[-149.0000,42.2037,-156.3165],\n [-537.6783,152.5511,-554.9272],\n [0,0.0728, 2.4489]]\n h,q = hessenberg(a,calc_q=1, check_finite=False)\n assert_array_almost_equal(dot(transp(q),dot(a,q)),h)\n assert_array_almost_equal(h,h1,decimal=4)\n\n\nclass TestQZ(TestCase):\n def setUp(self):\n seed(12345)\n\n def test_qz_single(self):\n n = 5\n A = random([n,n]).astype(float32)\n B = random([n,n]).astype(float32)\n AA,BB,Q,Z = qz(A,B)\n assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)\n assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)\n assert_array_almost_equal(dot(Q,Q.T), eye(n))\n assert_array_almost_equal(dot(Z,Z.T), eye(n))\n assert_(all(diag(BB) >= 0))\n\n def test_qz_double(self):\n n = 5\n A = random([n,n])\n B = random([n,n])\n AA,BB,Q,Z = qz(A,B)\n assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)\n assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)\n assert_array_almost_equal(dot(Q,Q.T), eye(n))\n assert_array_almost_equal(dot(Z,Z.T), eye(n))\n assert_(all(diag(BB) >= 0))\n\n def test_qz_complex(self):\n n = 5\n A = random([n,n]) + 1j*random([n,n])\n B = random([n,n]) + 1j*random([n,n])\n AA,BB,Q,Z = qz(A,B)\n assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A)\n assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B)\n assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n))\n assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n))\n assert_(all(diag(BB) >= 0))\n assert_(all(diag(BB).imag == 0))\n\n\n def test_qz_complex64(self):\n n = 5\n A = (random([n,n]) + 1j*random([n,n])).astype(complex64)\n B = (random([n,n]) + 1j*random([n,n])).astype(complex64)\n AA,BB,Q,Z = qz(A,B)\n assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A)\n assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B)\n assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n))\n assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n))\n assert_(all(diag(BB) >= 0))\n assert_(all(diag(BB).imag == 0))\n\n def test_qz_double_complex(self):\n n = 5\n A = random([n,n])\n B = random([n,n])\n AA,BB,Q,Z = qz(A,B, output='complex')\n aa = dot(dot(Q,AA),Z.conjugate().T)\n assert_array_almost_equal(aa.real, A)\n assert_array_almost_equal(aa.imag, 0)\n bb = dot(dot(Q,BB),Z.conjugate().T)\n assert_array_almost_equal(bb.real, B)\n assert_array_almost_equal(bb.imag, 0)\n assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n))\n assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n))\n assert_(all(diag(BB) >= 0))\n\n def test_qz_double_sort(self):\n #from http://www.nag.com/lapack-ex/node119.html\n #NOTE: These matrices may be ill-conditioned and lead to a\n # seg fault on certain python versions when compiled with\n # sse2 or sse3 older ATLAS/LAPACK binaries for windows\n #A = np.array([[3.9, 12.5, -34.5, -0.5],\n # [ 4.3, 21.5, -47.5, 7.5],\n # [ 4.3, 21.5, -43.5, 3.5],\n # [ 4.4, 26.0, -46.0, 6.0 ]])\n\n #B = np.array([[ 1.0, 2.0, -3.0, 1.0],\n # [1.0, 3.0, -5.0, 4.0],\n # [1.0, 3.0, -4.0, 3.0],\n # [1.0, 3.0, -4.0, 4.0]])\n A = np.array([[3.9, 12.5, -34.5, 2.5],\n [ 4.3, 21.5, -47.5, 7.5],\n [ 4.3, 1.5, -43.5, 3.5],\n [ 4.4, 6.0, -46.0, 6.0 ]])\n\n B = np.array([[ 1.0, 1.0, -3.0, 1.0],\n [1.0, 3.0, -5.0, 4.4],\n [1.0, 2.0, -4.0, 1.0],\n [1.2, 3.0, -4.0, 4.0]])\n\n sort = lambda ar,ai,beta : ai == 0\n\n assert_raises(ValueError, qz, A, B, sort=sort)\n if False:\n AA,BB,Q,Z,sdim = qz(A,B,sort=sort)\n #assert_(sdim == 2)\n assert_(sdim == 4)\n assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)\n assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)\n\n # test absolute values bc the sign is ambiguous and might be platform\n # dependent\n assert_array_almost_equal(np.abs(AA), np.abs(np.array(\n [[ 35.7864, -80.9061, -12.0629, -9.498 ],\n [ 0. , 2.7638, -2.3505, 7.3256],\n [ 0. , 0. , 0.6258, -0.0398],\n [ 0. , 0. , 0. , -12.8217]])), 4)\n assert_array_almost_equal(np.abs(BB), np.abs(np.array(\n [[ 4.5324, -8.7878, 3.2357, -3.5526],\n [ 0. , 1.4314, -2.1894, 0.9709],\n [ 0. , 0. , 1.3126, -0.3468],\n [ 0. , 0. , 0. , 0.559 ]])), 4)\n assert_array_almost_equal(np.abs(Q), np.abs(np.array(\n [[-0.4193, -0.605 , -0.1894, -0.6498],\n [-0.5495, 0.6987, 0.2654, -0.3734],\n [-0.4973, -0.3682, 0.6194, 0.4832],\n [-0.5243, 0.1008, -0.7142, 0.4526]])), 4)\n assert_array_almost_equal(np.abs(Z), np.abs(np.array(\n [[-0.9471, -0.2971, -0.1217, 0.0055],\n [-0.0367, 0.1209, 0.0358, 0.9913],\n [ 0.3171, -0.9041, -0.2547, 0.1312],\n [ 0.0346, 0.2824, -0.9587, 0.0014]])), 4)\n\n # test absolute values bc the sign is ambiguous and might be platform\n # dependent\n #assert_array_almost_equal(abs(AA), abs(np.array([\n # [3.8009, -69.4505, 50.3135, -43.2884],\n # [0.0000, 9.2033, -0.2001, 5.9881],\n # [0.0000, 0.0000, 1.4279, 4.4453],\n # [0.0000, 0.0000, 0.9019, -1.1962]])), 4)\n #assert_array_almost_equal(abs(BB), abs(np.array([\n # [1.9005, -10.2285, 0.8658, -5.2134],\n # [0.0000, 2.3008, 0.7915, 0.4262],\n # [0.0000, 0.0000, 0.8101, 0.0000],\n # [0.0000, 0.0000, 0.0000, -0.2823]])), 4)\n #assert_array_almost_equal(abs(Q), abs(np.array([\n # [0.4642, 0.7886, 0.2915, -0.2786],\n # [0.5002, -0.5986, 0.5638, -0.2713],\n # [0.5002, 0.0154, -0.0107, 0.8657],\n # [0.5331, -0.1395, -0.7727, -0.3151]])), 4)\n #assert_array_almost_equal(dot(Q,Q.T), eye(4))\n #assert_array_almost_equal(abs(Z), abs(np.array([\n # [0.9961, -0.0014, 0.0887, -0.0026],\n # [0.0057, -0.0404, -0.0938, -0.9948],\n # [0.0626, 0.7194, -0.6908, 0.0363],\n # [0.0626, -0.6934, -0.7114, 0.0956]])), 4)\n #assert_array_almost_equal(dot(Z,Z.T), eye(4))\n\n #def test_qz_complex_sort(self):\n # cA = np.array([\n # [-21.10+22.50*1j, 53.50+-50.50*1j, -34.50+127.50*1j, 7.50+ 0.50*1j],\n # [-0.46+ -7.78*1j, -3.50+-37.50*1j, -15.50+ 58.50*1j,-10.50+ -1.50*1j],\n # [ 4.30+ -5.50*1j, 39.70+-17.10*1j, -68.50+ 12.50*1j, -7.50+ -3.50*1j],\n # [ 5.50+ 4.40*1j, 14.40+ 43.30*1j, -32.50+-46.00*1j,-19.00+-32.50*1j]])\n\n # cB = np.array([\n # [1.00+ -5.00*1j, 1.60+ 1.20*1j,-3.00+ 0.00*1j, 0.00+ -1.00*1j],\n # [0.80+ -0.60*1j, 3.00+ -5.00*1j,-4.00+ 3.00*1j,-2.40+ -3.20*1j],\n # [1.00+ 0.00*1j, 2.40+ 1.80*1j,-4.00+ -5.00*1j, 0.00+ -3.00*1j],\n # [0.00+ 1.00*1j,-1.80+ 2.40*1j, 0.00+ -4.00*1j, 4.00+ -5.00*1j]])\n\n # AAS,BBS,QS,ZS,sdim = qz(cA,cB,sort='lhp')\n\n # eigenvalues = diag(AAS)/diag(BBS)\n # assert_(all(np.real(eigenvalues[:sdim] < 0)))\n # assert_(all(np.real(eigenvalues[sdim:] > 0)))\n\n def test_check_finite(self):\n n = 5\n A = random([n,n])\n B = random([n,n])\n AA,BB,Q,Z = qz(A,B,check_finite=False)\n assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)\n assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)\n assert_array_almost_equal(dot(Q,Q.T), eye(n))\n assert_array_almost_equal(dot(Z,Z.T), eye(n))\n assert_(all(diag(BB) >= 0))\n\n\nclass TestDatacopied(TestCase):\n\n def test_datacopied(self):\n from scipy.linalg.decomp import _datacopied\n\n M = matrix([[0,1],[2,3]])\n A = asarray(M)\n L = M.tolist()\n M2 = M.copy()\n\n class Fake1:\n def __array__(self):\n return A\n\n class Fake2:\n __array_interface__ = A.__array_interface__\n\n F1 = Fake1()\n F2 = Fake2()\n\n AF1 = asarray(F1)\n AF2 = asarray(F2)\n\n for item, status in [(M, False), (A, False), (L, True),\n (M2, False), (F1, False), (F2, False)]:\n arr = asarray(item)\n assert_equal(_datacopied(arr, item), status,\n err_msg=repr(item))\n\n\ndef test_aligned_mem_float():\n \"\"\"Check linalg works with non-aligned memory\"\"\"\n # Allocate 402 bytes of memory (allocated on boundary)\n a = arange(402, dtype=np.uint8)\n\n # Create an array with boundary offset 4\n z = np.frombuffer(a.data, offset=2, count=100, dtype=float32)\n z.shape = 10, 10\n\n eig(z, overwrite_a=True)\n eig(z.T, overwrite_a=True)\n\n\ndef test_aligned_mem():\n \"\"\"Check linalg works with non-aligned memory\"\"\"\n # Allocate 804 bytes of memory (allocated on boundary)\n a = arange(804, dtype=np.uint8)\n\n # Create an array with boundary offset 4\n z = np.frombuffer(a.data, offset=4, count=100, dtype=float)\n z.shape = 10, 10\n\n eig(z, overwrite_a=True)\n eig(z.T, overwrite_a=True)\n\ndef test_aligned_mem_complex():\n \"\"\"Check that complex objects don't need to be completely aligned\"\"\"\n # Allocate 1608 bytes of memory (allocated on boundary)\n a = zeros(1608, dtype=np.uint8)\n\n # Create an array with boundary offset 8\n z = np.frombuffer(a.data, offset=8, count=100, dtype=complex)\n z.shape = 10, 10\n\n eig(z, overwrite_a=True)\n # This does not need special handling\n eig(z.T, overwrite_a=True)\n\ndef check_lapack_misaligned(func, args, kwargs):\n args = list(args)\n for i in range(len(args)):\n a = args[:]\n if isinstance(a[i],np.ndarray):\n # Try misaligning a[i]\n aa = np.zeros(a[i].size*a[i].dtype.itemsize+8, dtype=np.uint8)\n aa = np.frombuffer(aa.data, offset=4, count=a[i].size, dtype=a[i].dtype)\n aa.shape = a[i].shape\n aa[...] = a[i]\n a[i] = aa\n func(*a,**kwargs)\n if len(a[i].shape)>1:\n a[i] = a[i].T\n func(*a,**kwargs)\n\n\[email protected](True, \"Ticket #1152, triggers a segfault in rare cases.\")\ndef test_lapack_misaligned():\n M = np.eye(10,dtype=float)\n R = np.arange(100)\n R.shape = 10,10\n S = np.arange(20000,dtype=np.uint8)\n S = np.frombuffer(S.data, offset=4, count=100, dtype=np.float)\n S.shape = 10, 10\n b = np.ones(10)\n v = np.ones(3,dtype=float)\n LU, piv = lu_factor(S)\n for (func, args, kwargs) in [\n (eig,(S,),dict(overwrite_a=True)), # crash\n (eigvals,(S,),dict(overwrite_a=True)), # no crash\n (lu,(S,),dict(overwrite_a=True)), # no crash\n (lu_factor,(S,),dict(overwrite_a=True)), # no crash\n (lu_solve,((LU,piv),b),dict(overwrite_b=True)),\n (solve,(S,b),dict(overwrite_a=True,overwrite_b=True)),\n (svd,(M,),dict(overwrite_a=True)), # no crash\n (svd,(R,),dict(overwrite_a=True)), # no crash\n (svd,(S,),dict(overwrite_a=True)), # crash\n (svdvals,(S,),dict()), # no crash\n (svdvals,(S,),dict(overwrite_a=True)), #crash\n (cholesky,(M,),dict(overwrite_a=True)), # no crash\n (qr,(S,),dict(overwrite_a=True)), # crash\n (rq,(S,),dict(overwrite_a=True)), # crash\n (hessenberg,(S,),dict(overwrite_a=True)), # crash\n (schur,(S,),dict(overwrite_a=True)), # crash\n ]:\n yield check_lapack_misaligned, func, args, kwargs\n# not properly tested\n# cholesky, rsf2csf, lu_solve, solve, eig_banded, eigvals_banded, eigh, diagsvd\n\n\nclass TestOverwrite(object):\n def test_eig(self):\n assert_no_overwrite(eig, [(3,3)])\n assert_no_overwrite(eig, [(3,3), (3,3)])\n def test_eigh(self):\n assert_no_overwrite(eigh, [(3,3)])\n assert_no_overwrite(eigh, [(3,3), (3,3)])\n def test_eig_banded(self):\n assert_no_overwrite(eig_banded, [(3,2)])\n def test_eigvals(self):\n assert_no_overwrite(eigvals, [(3,3)])\n def test_eigvalsh(self):\n assert_no_overwrite(eigvalsh, [(3,3)])\n def test_eigvals_banded(self):\n assert_no_overwrite(eigvals_banded, [(3,2)])\n def test_hessenberg(self):\n assert_no_overwrite(hessenberg, [(3,3)])\n def test_lu_factor(self):\n assert_no_overwrite(lu_factor, [(3,3)])\n def test_lu_solve(self):\n x = np.array([[1,2,3], [4,5,6], [7,8,8]])\n xlu = lu_factor(x)\n assert_no_overwrite(lambda b: lu_solve(xlu, b), [(3,)])\n def test_lu(self):\n assert_no_overwrite(lu, [(3,3)])\n def test_qr(self):\n assert_no_overwrite(qr, [(3,3)])\n def test_rq(self):\n assert_no_overwrite(rq, [(3,3)])\n def test_schur(self):\n assert_no_overwrite(schur, [(3,3)])\n def test_schur_complex(self):\n assert_no_overwrite(lambda a: schur(a, 'complex'), [(3,3)],\n dtypes=[np.float32, np.float64])\n def test_svd(self):\n assert_no_overwrite(svd, [(3,3)])\n def test_svdvals(self):\n assert_no_overwrite(svdvals, [(3,3)])\n\nif __name__ == \"__main__\":\n run_module_suite()\n",
"\"\"\"\nQuantilization functions and related stuff\n\"\"\"\n\nfrom pandas.core.api import DataFrame, Series\nfrom pandas.core.categorical import Categorical\nfrom pandas.core.index import _ensure_index\nimport pandas.core.algorithms as algos\nimport pandas.core.common as com\nimport pandas.core.nanops as nanops\n\nimport numpy as np\n\n\ndef cut(x, bins, right=True, labels=None, retbins=False, precision=3,\n include_lowest=False):\n \"\"\"\n Return indices of half-open bins to which each value of `x` belongs.\n\n Parameters\n ----------\n x : array-like\n Input array to be binned. It has to be 1-dimensional.\n bins : int or sequence of scalars\n If `bins` is an int, it defines the number of equal-width bins in the\n range of `x`. However, in this case, the range of `x` is extended\n by .1% on each side to include the min or max values of `x`. If\n `bins` is a sequence it defines the bin edges allowing for\n non-uniform bin width. No extension of the range of `x` is done in\n this case.\n right : bool, optional\n Indicates whether the bins include the rightmost edge or not. If\n right == True (the default), then the bins [1,2,3,4] indicate\n (1,2], (2,3], (3,4].\n labels : array or boolean, default None\n Labels to use for bin edges, or False to return integer bin labels\n retbins : bool, optional\n Whether to return the bins or not. Can be useful if bins is given\n as a scalar.\n\n Returns\n -------\n out : Categorical or array of integers if labels is False\n bins : ndarray of floats\n Returned only if `retbins` is True.\n\n Notes\n -----\n The `cut` function can be useful for going from a continuous variable to\n a categorical variable. For example, `cut` could convert ages to groups\n of age ranges.\n\n Any NA values will be NA in the result. Out of bounds values will be NA in\n the resulting Categorical object\n\n\n Examples\n --------\n >>> cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), 3, retbins=True)\n (array([(0.191, 3.367], (0.191, 3.367], (0.191, 3.367], (3.367, 6.533],\n (6.533, 9.7], (0.191, 3.367]], dtype=object),\n array([ 0.1905 , 3.36666667, 6.53333333, 9.7 ]))\n >>> cut(np.ones(5), 4, labels=False)\n array([2, 2, 2, 2, 2])\n \"\"\"\n # NOTE: this binning code is changed a bit from histogram for var(x) == 0\n if not np.iterable(bins):\n if np.isscalar(bins) and bins < 1:\n raise ValueError(\"`bins` should be a positive integer.\")\n try: # for array-like\n sz = x.size\n except AttributeError:\n x = np.asarray(x)\n sz = x.size\n if sz == 0:\n raise ValueError('Cannot cut empty array')\n # handle empty arrays. Can't determine range, so use 0-1.\n # rng = (0, 1)\n else:\n rng = (nanops.nanmin(x), nanops.nanmax(x))\n mn, mx = [mi + 0.0 for mi in rng]\n\n if mn == mx: # adjust end points before binning\n mn -= .001 * mn\n mx += .001 * mx\n bins = np.linspace(mn, mx, bins + 1, endpoint=True)\n else: # adjust end points after binning\n bins = np.linspace(mn, mx, bins + 1, endpoint=True)\n adj = (mx - mn) * 0.001 # 0.1% of the range\n if right:\n bins[0] -= adj\n else:\n bins[-1] += adj\n\n else:\n bins = np.asarray(bins)\n if (np.diff(bins) < 0).any():\n raise ValueError('bins must increase monotonically.')\n\n return _bins_to_cuts(x, bins, right=right, labels=labels,\n retbins=retbins, precision=precision,\n include_lowest=include_lowest)\n\n\ndef qcut(x, q, labels=None, retbins=False, precision=3):\n \"\"\"\n Quantile-based discretization function. Discretize variable into\n equal-sized buckets based on rank or based on sample quantiles. For example\n 1000 values for 10 quantiles would produce a Categorical object indicating\n quantile membership for each data point.\n\n Parameters\n ----------\n x : ndarray or Series\n q : integer or array of quantiles\n Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately\n array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles\n labels : array or boolean, default None\n Labels to use for bin edges, or False to return integer bin labels\n retbins : bool, optional\n Whether to return the bins or not. Can be useful if bins is given\n as a scalar.\n\n Returns\n -------\n cat : Categorical\n\n Notes\n -----\n Out of bounds values will be NA in the resulting Categorical object\n\n Examples\n --------\n \"\"\"\n if com.is_integer(q):\n quantiles = np.linspace(0, 1, q + 1)\n else:\n quantiles = q\n bins = algos.quantile(x, quantiles)\n return _bins_to_cuts(x, bins, labels=labels, retbins=retbins,\n precision=precision, include_lowest=True)\n\n\ndef _bins_to_cuts(x, bins, right=True, labels=None, retbins=False,\n precision=3, name=None, include_lowest=False):\n if name is None and isinstance(x, Series):\n name = x.name\n x = np.asarray(x)\n\n side = 'left' if right else 'right'\n ids = bins.searchsorted(x, side=side)\n\n if len(algos.unique(bins)) < len(bins):\n raise ValueError('Bin edges must be unique: %s' % repr(bins))\n\n if include_lowest:\n ids[x == bins[0]] = 1\n\n na_mask = com.isnull(x) | (ids == len(bins)) | (ids == 0)\n has_nas = na_mask.any()\n\n if labels is not False:\n if labels is None:\n increases = 0\n while True:\n try:\n levels = _format_levels(bins, precision, right=right,\n include_lowest=include_lowest)\n except ValueError:\n increases += 1\n precision += 1\n if increases >= 20:\n raise\n else:\n break\n\n else:\n if len(labels) != len(bins) - 1:\n raise ValueError('Bin labels must be one fewer than '\n 'the number of bin edges')\n levels = labels\n\n levels = np.asarray(levels, dtype=object)\n np.putmask(ids, na_mask, 0)\n fac = Categorical(ids - 1, levels, name=name)\n else:\n fac = ids - 1\n if has_nas:\n fac = fac.astype(np.float64)\n np.putmask(fac, na_mask, np.nan)\n\n if not retbins:\n return fac\n\n return fac, bins\n\n\ndef _format_levels(bins, prec, right=True,\n include_lowest=False):\n fmt = lambda v: _format_label(v, precision=prec)\n if right:\n levels = []\n for a, b in zip(bins, bins[1:]):\n fa, fb = fmt(a), fmt(b)\n\n if a != b and fa == fb:\n raise ValueError('precision too low')\n\n formatted = '(%s, %s]' % (fa, fb)\n\n levels.append(formatted)\n\n if include_lowest:\n levels[0] = '[' + levels[0][1:]\n else:\n levels = ['[%s, %s)' % (fmt(a), fmt(b))\n for a, b in zip(bins, bins[1:])]\n\n return levels\n\n\ndef _format_label(x, precision=3):\n fmt_str = '%%.%dg' % precision\n if com.is_float(x):\n frac, whole = np.modf(x)\n sgn = '-' if x < 0 else ''\n whole = abs(whole)\n if frac != 0.0:\n val = fmt_str % frac\n\n # rounded up or down\n if '.' not in val:\n if x < 0:\n return '%d' % (-whole - 1)\n else:\n return '%d' % (whole + 1)\n\n if 'e' in val:\n return _trim_zeros(fmt_str % x)\n else:\n val = _trim_zeros(val)\n if '.' in val:\n return sgn + '.'.join(('%d' % whole, val.split('.')[1]))\n else: # pragma: no cover\n return sgn + '.'.join(('%d' % whole, val))\n else:\n return sgn + '%d' % whole\n else:\n return str(x)\n\n\ndef _trim_zeros(x):\n while len(x) > 1 and x[-1] == '0':\n x = x[:-1]\n if len(x) > 1 and x[-1] == '.':\n x = x[:-1]\n return x\n",
"\"\"\"\n========================================\nInterpolation (:mod:`scipy.interpolate`)\n========================================\n\n.. currentmodule:: scipy.interpolate\n\nSub-package for objects used in interpolation.\n\nAs listed below, this sub-package contains spline functions and classes,\none-dimensional and multi-dimensional (univariate and multivariate)\ninterpolation classes, Lagrange and Taylor polynomial interpolators, and\nwrappers for `FITPACK <http://www.cisl.ucar.edu/softlib/FITPACK.html>`_\nand DFITPACK functions.\n\nUnivariate interpolation\n========================\n\n.. autosummary::\n :toctree: generated/\n\n interp1d\n BarycentricInterpolator\n KroghInterpolator\n PiecewisePolynomial\n PchipInterpolator\n barycentric_interpolate\n krogh_interpolate\n piecewise_polynomial_interpolate\n pchip_interpolate\n\n\nMultivariate interpolation\n==========================\n\nUnstructured data:\n\n.. autosummary::\n :toctree: generated/\n\n griddata\n LinearNDInterpolator\n NearestNDInterpolator\n CloughTocher2DInterpolator\n Rbf\n interp2d\n\nFor data on a grid:\n\n.. autosummary::\n\n RectBivariateSpline\n\n.. seealso:: `scipy.ndimage.map_coordinates`\n\n\n1-D Splines\n===========\n\n.. autosummary::\n :toctree: generated/\n\n UnivariateSpline\n InterpolatedUnivariateSpline\n LSQUnivariateSpline\n\nThe above univariate spline classes have the following methods:\n\n.. autosummary::\n\n UnivariateSpline.__call__\n UnivariateSpline.derivatives\n UnivariateSpline.integral\n UnivariateSpline.roots\n UnivariateSpline.get_coeffs\n UnivariateSpline.get_knots\n UnivariateSpline.get_residual\n UnivariateSpline.set_smoothing_factor\n\n\nLow-level interface to FITPACK functions:\n\n.. autosummary::\n :toctree: generated/\n\n splrep\n splprep\n splev\n splint\n sproot\n spalde\n bisplrep\n bisplev\n\n\n2-D Splines\n===========\n\nFor data on a grid:\n\n.. autosummary::\n :toctree: generated/\n\n RectBivariateSpline\n RectSphereBivariateSpline\n\nFor unstructured data:\n\n.. autosummary::\n :toctree: generated/\n\n BivariateSpline\n SmoothBivariateSpline\n LSQBivariateSpline\n\nLow-level interface to FITPACK functions:\n\n.. autosummary::\n :toctree: generated/\n\n bisplrep\n bisplev\n\nAdditional tools\n================\n\n.. autosummary::\n :toctree: generated/\n\n lagrange\n approximate_taylor_polynomial\n\n.. seealso::\n\n `scipy.ndimage.map_coordinates`,\n `scipy.ndimage.spline_filter`,\n `scipy.signal.resample`,\n `scipy.signal.bspline`,\n `scipy.signal.gauss_spline`,\n `scipy.signal.qspline1d`,\n `scipy.signal.cspline1d`,\n `scipy.signal.qspline1d_eval`,\n `scipy.signal.cspline1d_eval`,\n `scipy.signal.qspline2d`,\n `scipy.signal.cspline2d`.\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nfrom .interpolate import *\nfrom .fitpack import *\n\n# New interface to fitpack library:\nfrom .fitpack2 import *\n\nfrom .rbf import Rbf\n\nfrom .polyint import *\n\nfrom .ndgriddata import *\n\n__all__ = [s for s in dir() if not s.startswith('_')]\nfrom numpy.testing import Tester\ntest = Tester().test\n",
"\"\"\" Factory methods to create N-D panels \"\"\"\n\nimport pandas.lib as lib\n\n\ndef create_nd_panel_factory(klass_name, axis_orders, axis_slices, slicer, axis_aliases=None, stat_axis=2,ns=None):\n \"\"\" manufacture a n-d class:\n\n parameters\n ----------\n klass_name : the klass name\n axis_orders : the names of the axes in order (highest to lowest)\n axis_slices : a dictionary that defines how the axes map to the sliced axis\n slicer : the class representing a slice of this panel\n axis_aliases: a dictionary defining aliases for various axes\n default = { major : major_axis, minor : minor_axis }\n stat_axis : the default statistic axis\n default = 2\n het_axis : the info axis\n\n\n returns\n -------\n a class object reprsenting this panel\n\n\n \"\"\"\n\n # if slicer is a name, get the object\n if isinstance(slicer, basestring):\n import pandas\n try:\n slicer = getattr(pandas, slicer)\n except:\n raise Exception(\"cannot create this slicer [%s]\" % slicer)\n\n # build the klass\n ns = {} if not ns else ns\n klass = type(klass_name, (slicer,), ns)\n\n # add the class variables\n klass._AXIS_ORDERS = axis_orders\n klass._AXIS_NUMBERS = dict([(a, i) for i, a in enumerate(axis_orders)])\n klass._AXIS_ALIASES = axis_aliases or dict()\n klass._AXIS_NAMES = dict([(i, a) for i, a in enumerate(axis_orders)])\n klass._AXIS_SLICEMAP = axis_slices\n klass._AXIS_LEN = len(axis_orders)\n klass._default_stat_axis = stat_axis\n klass._het_axis = 0\n klass._info_axis = axis_orders[klass._het_axis]\n\n klass._constructor_sliced = slicer\n\n # add the axes\n for i, a in enumerate(axis_orders):\n setattr(klass, a, lib.AxisProperty(i))\n\n #### define the methods ####\n def __init__(self, *args, **kwargs):\n if not (kwargs.get('data') or len(args)):\n raise Exception(\n \"must supply at least a data argument to [%s]\" % klass_name)\n if 'copy' not in kwargs:\n kwargs['copy'] = False\n if 'dtype' not in kwargs:\n kwargs['dtype'] = None\n self._init_data(*args, **kwargs)\n klass.__init__ = __init__\n\n def _get_plane_axes(self, axis):\n\n axis = self._get_axis_name(axis)\n index = self._AXIS_ORDERS.index(axis)\n\n planes = []\n if index:\n planes.extend(self._AXIS_ORDERS[0:index])\n if index != self._AXIS_LEN:\n planes.extend(self._AXIS_ORDERS[index + 1:])\n\n return [getattr(self, p) for p in planes]\n klass._get_plane_axes = _get_plane_axes\n\n def _combine(self, other, func, axis=0):\n if isinstance(other, klass):\n return self._combine_with_constructor(other, func)\n return super(klass, self)._combine(other, func, axis=axis)\n klass._combine = _combine\n\n def _combine_with_constructor(self, other, func):\n\n # combine labels to form new axes\n new_axes = []\n for a in self._AXIS_ORDERS:\n new_axes.append(getattr(self, a) + getattr(other, a))\n\n # reindex: could check that everything's the same size, but forget it\n d = dict([(a, ax) for a, ax in zip(self._AXIS_ORDERS, new_axes)])\n d['copy'] = False\n this = self.reindex(**d)\n other = other.reindex(**d)\n\n result_values = func(this.values, other.values)\n\n return self._constructor(result_values, **d)\n klass._combine_with_constructor = _combine_with_constructor\n\n # set as NonImplemented operations which we don't support\n for f in ['to_frame', 'to_excel', 'to_sparse', 'groupby', 'join', 'filter', 'dropna', 'shift']:\n def func(self, *args, **kwargs):\n raise NotImplementedError\n setattr(klass, f, func)\n\n # add the aggregate operations\n klass._add_aggregate_operations()\n\n return klass\n"
] | [
[
"scipy.sparse.linalg.interface.aslinearoperator",
"numpy.linalg.norm",
"numpy.zeros"
],
[
"numpy.testing.Tester"
],
[
"numpy.testing.assert_equal",
"numpy.testing.run_module_suite",
"numpy.sqrt",
"scipy.signal.freqz",
"numpy.arange",
"scipy.signal.tf2zpk",
"numpy.ones",
"numpy.testing.assert_array_equal",
"scipy.signal.normalize",
"numpy.poly",
"numpy.testing.assert_raises",
"scipy.signal.zpk2tf",
"numpy.array",
"numpy.testing.assert_array_almost_equal"
],
[
"numpy.asarray",
"numpy.cumsum",
"numpy.asmatrix",
"numpy.isscalar",
"numpy.array",
"numpy.unravel_index",
"scipy.lib.six.moves.xrange",
"numpy.empty"
],
[
"numpy.cos",
"numpy.sin",
"numpy.copy",
"numpy.shape",
"numpy.random.rand",
"numpy.exp",
"numpy.array",
"numpy.zeros"
],
[
"scipy.fftpack.irfft",
"numpy.take",
"numpy.asarray",
"scipy.fftpack._fftpack.zfft",
"numpy.issubdtype",
"numpy.dtype",
"scipy.fftpack.rfft",
"scipy.fftpack.fft",
"numpy.random.randn",
"numpy.exp",
"numpy.testing.assert_equal",
"numpy.swapaxes",
"numpy.arange",
"scipy.fftpack._fftpack.drfft",
"scipy.fftpack.fft2",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal",
"scipy.fftpack.ifft",
"scipy.fftpack._fftpack.zrfft",
"numpy.testing.assert_array_almost_equal_nulp",
"scipy.fftpack.fftn",
"numpy.fft.ifft",
"numpy.testing.assert_raises",
"numpy.random.rand",
"numpy.testing.assert_",
"numpy.array",
"numpy.testing.run_module_suite",
"scipy.fftpack.ifftn",
"numpy.random.seed",
"numpy.testing.dec.knownfailureif",
"numpy.add.outer",
"numpy.fft.fft",
"numpy.linalg.norm"
],
[
"numpy.testing.assert_equal",
"scipy.sparse.csgraph.csgraph_from_dense",
"numpy.array",
"scipy.sparse.csgraph.connected_components",
"numpy.testing.assert_array_almost_equal"
],
[
"numpy.zeros"
],
[
"numpy.deprecate",
"numpy.testing.Tester"
],
[
"scipy.signal._arraytools.odd_ext",
"numpy.testing.run_module_suite",
"scipy.signal._arraytools.axis_reverse",
"numpy.arange",
"scipy.signal._arraytools.even_ext",
"numpy.testing.assert_array_equal",
"scipy.signal._arraytools.axis_slice",
"numpy.testing.assert_raises",
"scipy.signal._arraytools.const_ext",
"numpy.array"
],
[
"numpy.diag",
"numpy.dot",
"scipy.linalg.lapack.dgbtrf",
"scipy.linalg.svd",
"numpy.matrix",
"scipy.linalg.lapack.dsbevd",
"scipy.linalg.eigvals_banded",
"scipy.linalg.schur",
"numpy.asarray",
"numpy.testing.TestCase.__init__",
"numpy.sqrt",
"numpy.dtype",
"numpy.all",
"numpy.seterr",
"scipy.linalg._testutils.assert_no_overwrite",
"scipy.linalg.decomp._datacopied",
"scipy.linalg.qr_multiply",
"scipy.lib.six.moves.xrange",
"numpy.tril",
"scipy.linalg.lapack.dsbevx",
"numpy.conjugate",
"numpy.testing.assert_equal",
"scipy.linalg.lapack.dsbev",
"scipy.linalg.lapack.zgbtrs",
"numpy.arange",
"numpy.eye",
"numpy.linalg.eig",
"scipy.linalg.lapack.zhbevx",
"scipy.linalg.lapack.zgbtrf",
"scipy.linalg.eigh",
"numpy.frombuffer",
"scipy.linalg.diagsvd",
"numpy.triu",
"numpy.outer",
"numpy.zeros",
"scipy.linalg.solve",
"numpy.testing.assert_array_almost_equal",
"scipy.linalg.qz",
"scipy.linalg.hessenberg",
"scipy.linalg.eigvals",
"numpy.random.rand",
"numpy.testing.assert_",
"numpy.transpose",
"numpy.identity",
"numpy.testing.assert_raises",
"scipy.linalg.lu_factor",
"numpy.array",
"numpy.argsort",
"scipy.linalg.svdvals",
"scipy.linalg.eig",
"scipy.linalg.lapack.dgbtrs",
"numpy.iscomplex",
"numpy.testing.run_module_suite",
"numpy.linalg.solve",
"scipy.linalg.qr",
"numpy.random.seed",
"numpy.testing.dec.knownfailureif",
"numpy.isfinite",
"scipy.linalg.eig_banded",
"numpy.conj",
"numpy.abs",
"numpy.sort",
"numpy.ones",
"numpy.sign",
"numpy.bmat",
"numpy.random.normal",
"scipy.linalg.lapack.zhbevd",
"numpy.shape",
"scipy.linalg.lu",
"numpy.testing.assert_array_equal",
"scipy.linalg.rsf2csf",
"scipy.linalg.rq",
"scipy.linalg.lu_solve",
"numpy.empty"
],
[
"numpy.putmask",
"pandas.core.categorical.Categorical",
"pandas.core.algorithms.quantile",
"numpy.linspace",
"numpy.asarray",
"pandas.core.algorithms.unique",
"numpy.modf",
"pandas.core.common.is_integer",
"pandas.core.nanops.nanmax",
"numpy.diff",
"numpy.isscalar",
"pandas.core.common.isnull",
"numpy.iterable",
"pandas.core.nanops.nanmin",
"pandas.core.common.is_float"
],
[
"numpy.testing.Tester"
],
[
"pandas.lib.AxisProperty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.11",
"1.10",
"1.12",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.21",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sisl/mechamodlearn | [
"ed514b5d1193ce546b0221ba9222b0228d6c319a"
] | [
"mechamodlearn/rigidbody.py"
] | [
"# File: rigidbody.py\n\nimport abc\nimport torch\n\nfrom mechamodlearn import nn, utils\nfrom mechamodlearn.models import CholeskyMMNet, PotentialNet, GeneralizedForceNet\n\n\nclass AbstractRigidBody:\n\n @property\n @abc.abstractmethod\n def thetamask(self):\n \"\"\"Returns theta mask for configuration q.\n These should use utils.diffangles to compute differences\n \"\"\"\n\n @abc.abstractmethod\n def mass_matrix(self, q):\n \"\"\"Return mass matrix for configuration q\"\"\"\n\n @abc.abstractmethod\n def potential(self, q):\n \"\"\"Return potential for configuration q\"\"\"\n\n @abc.abstractmethod\n def generalized_force(self, q, v, u):\n \"\"\"Return generalized force for configuration q, velocity v, external torque u\"\"\"\n\n def kinetic_energy(self, q, v):\n mass_matrix = self.mass_matrix(q)\n # TODO(jkg): Check if this works correctly for batched\n kenergy = 0.5 * (v.unsqueeze(1) @ (mass_matrix @ v.unsqueeze(2))).squeeze(2)\n return kenergy\n\n def lagrangian(self, q, v):\n \"\"\" Returns the Lagrangian of a mechanical system\n \"\"\"\n kenergy = self.kinetic_energy(q, v)\n pot = self.potential(q)\n lag = kenergy - pot\n return lag\n\n def hamiltonian(self, q, v):\n \"\"\" Returns the Hamiltonian of a mechanical system\n \"\"\"\n kenergy = self.kinetic_energy(q, v)\n pot = self.potential(q)\n ham = kenergy + pot\n return ham\n\n def corriolisforce(self, q, v, mass_matrix=None):\n \"\"\" Computes the corriolis matrix times v\n \"\"\"\n with torch.enable_grad():\n if mass_matrix is None:\n mass_matrix = self.mass_matrix(q)\n\n Mv = mass_matrix @ v.unsqueeze(2)\n\n KE = 0.5 * v.unsqueeze(1) @ Mv\n\n Cv_KE = torch.autograd.grad(KE.sum(), q, retain_graph=True, create_graph=True)[0]\n\n gMv = torch.stack([\n torch.autograd.grad(Mv[:, i].sum(), q, retain_graph=True, create_graph=True)[0]\n for i in range(q.size(1))\n ], dim=1)\n\n Cv = gMv @ v.unsqueeze(2) - Cv_KE.unsqueeze(2)\n\n return Cv\n\n def corriolis(self, q, v, mass_matrix=None):\n \"\"\" Computes the corriolis matrix\n \"\"\"\n with torch.enable_grad():\n if mass_matrix is None:\n mass_matrix = self.mass_matrix(q)\n\n qdim = q.size(1)\n B = mass_matrix.size(0)\n\n mass_matrix = mass_matrix.reshape(-1, qdim, qdim)\n\n # TODO vectorize\n rows = []\n\n for i in range(qdim):\n cols = []\n for j in range(qdim):\n qgrad = torch.autograd.grad(\n torch.sum(mass_matrix[:, i, j]), q, retain_graph=True, create_graph=True)[0]\n cols.append(qgrad)\n\n rows.append(torch.stack(cols, dim=1))\n\n dMijk = torch.stack(rows, dim=1)\n\n corriolis = 0.5 * ((dMijk + dMijk.transpose(2, 3) - dMijk.transpose(1, 3)\n ) @ v.reshape(B, 1, qdim, 1)).squeeze(3)\n return corriolis\n\n def gradpotential(self, q):\n \"\"\" Returns the conservative forces acting on the system\n \"\"\"\n with torch.enable_grad():\n pot = self.potential(q)\n gvec = torch.autograd.grad(torch.sum(pot), q, retain_graph=True, create_graph=True)[0]\n return gvec\n\n def solve_euler_lagrange(self, q, v, u=None):\n \"\"\" Computes `qddot` (generalized acceleration) by solving\n the Euler-Lagrange equation (Eq 7 in the paper)\n \\qddot = M^-1 (F - Cv - G)\n \"\"\"\n with torch.enable_grad():\n with utils.temp_require_grad((q, v)):\n M = self.mass_matrix(q)\n Cv = self.corriolisforce(q, v, M)\n G = self.gradpotential(q)\n\n F = torch.zeros_like(Cv)\n\n if u is not None:\n F = self.generalized_force(q, v, u)\n\n # Solve M \\qddot = F - Cv - G\n qddot = torch.gesv(F - Cv - G.unsqueeze(2), M)[0].squeeze(2)\n return qddot\n\n\nclass LearnedRigidBody(AbstractRigidBody, torch.nn.Module):\n\n def __init__(self, qdim: int, udim: int, thetamask: torch.tensor, mass_matrix=None,\n potential=None, generalized_force=None, hidden_sizes=None):\n \"\"\"\n\n Arguments:\n - `qdim`:\n - `udim`: [int]\n - `thetamask`: [torch.Tensor (1, qdim)] 1 if angle, 0 otherwise\n - `mass_matrix`: [torch.nn.Module]\n - `potential`: [torch.nn.Module]\n - `generalized_force`: [torch.nn.Module]\n - hidden_sizes: [list]\n \"\"\"\n self._qdim = qdim\n self._udim = udim\n\n self._thetamask = thetamask\n\n super().__init__()\n\n if mass_matrix is None:\n mass_matrix = CholeskyMMNet(qdim, hidden_sizes=hidden_sizes)\n\n self._mass_matrix = mass_matrix\n\n if potential is None:\n potential = PotentialNet(qdim, hidden_sizes=hidden_sizes)\n\n self._potential = potential\n\n if generalized_force is None:\n generalized_force = GeneralizedForceNet(qdim, udim, hidden_sizes)\n\n self._generalized_force = generalized_force\n\n def mass_matrix(self, q):\n return self._mass_matrix(q)\n\n def potential(self, q):\n return self._potential(q)\n\n def generalized_force(self, q, v, u):\n return self._generalized_force(q, v, u)\n\n @property\n def thetamask(self):\n return self._thetamask\n\n def forward(self, q, v, u=None):\n return self.solve_euler_lagrange(q, v, u)\n"
] | [
[
"torch.stack",
"torch.sum",
"torch.zeros_like",
"torch.enable_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ucx-code/ucXception | [
"6b1f4fe4aa53a28e87584d07f540095c20ee50e9"
] | [
"framework/parsers/ucXception_fi_parser.py"
] | [
"import numpy as np\n\ndef map_reg_to_text(reg_code):\n\treg_dict = (\"rip\", \"rsp\", \"rax\", \"rbx\", \"rcx\", \"rdx\", \"cs\", \"ss\", \"eflags\", \"rbp\", \"r8\", \"r9\",\n\t\t\t\t\"r10\", \"r11\", \"r12\", \"r13\", \"r14\", \"r15\", \"rsi\", \"rdi\", \"orig_rax\", \"fs_base\", \"gs_base\",\n\t\t\t\t\"ds\", \"es\", \"fs\", \"gs\")\n\n\treturn reg_dict[reg_code]\n\n\nclass ucXception_fi_parser:\n\t\n\tdef parse(self, inj_time, reg, bit, chosen_thread, stdout, stderr):\n\t\trow = {}\n\n\t\trow[\"inj_time\"] = inj_time\n\t\trow[\"reg\"] = map_reg_to_text(reg)\n\t\trow[\"reg_d\"] = np.int32(reg)\n\t\trow[\"bit\"] = np.int32(bit)\n\t\trow[\"pid\"] = np.int32(chosen_thread)\n\n\t\t# Get the values of old and new registers\n\t\tprefix = \"none\"\n\t\tfor line in stdout.split(\"\\n\")[:-1]:\n\t\t\tif \"Old register values\" in line:\n\t\t\t\tprefix=\"old_\"\n\t\t\telif \"New register values\" in line:\n\t\t\t\tprefix=\"new_\"\n\t\t\telse:\t\n\t\t\t\t(reg_name, reg_val) = line.split(\": \")\n\t\t\t\treg_name = reg_name.rstrip().lower()\n\t\t\t\treg_val = \"0x%s\" % reg_val.rstrip()\n\t\t\t\t#print reg_name, reg_val, type(reg_val)\n\t\t\t\trow[prefix + reg_name] = reg_val\n\t\t\t\t# We also add the register value in decimal\n\t\t\t\trow[prefix + reg_name + \"_d\"] = np.int(reg_val, 16) # np.int64 gives a strange exception--- (numpy bug?)\n\t\treturn row"
] | [
[
"numpy.int",
"numpy.int32"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lyuyangh/Cross-Attention-VizWiz-VQA | [
"853bfe480dac5bd1363f60c6b17e25134acdc2fa"
] | [
"demo/predict.py"
] | [
"import datetime\nimport json\nimport os\nimport sys\nimport time\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom utils.flags import FLAGS\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom model.vqa_model import ModelParams, VQAModel\nimport demo.demo_dataset as dataset\nimport demo.visualize as visualize\n\n\nclass Inference:\n def __init__(self):\n self.model = self._load_model()\n self.demo_data = dataset.VQAFeatureDataset()\n\n def _get_answer(self, p, dataloader):\n _m, idx = p.max(1)\n return dataloader.dataset.label2ans[idx.item()]\n\n def _load_model(self):\n data_params = json.load(open(FLAGS.data_params_path))\n model_params = ModelParams(\n add_self_attention=FLAGS.add_self_attention,\n fusion_method=FLAGS.fusion_method,\n question_sequence_length=dataset.MAX_QUES_SEQ_LEN,\n number_of_objects=dataset.NO_OBJECTS,\n word_embedding_dimension=data_params[\"word_feat_dimension\"],\n object_embedding_dimension=data_params[\"image_feat_dimension\"],\n vocabulary_size=data_params[\"vocabulary_size\"],\n num_ans_candidates=data_params[\"number_of_answer_candidiates\"],\n )\n model = VQAModel(\n glove_path=FLAGS.glove_path,\n model_params=model_params,\n hidden_dimension=FLAGS.hidden_dimension,\n ).cuda()\n FLAGS.snapshot_path = (\n \"/home/rachana/Documents/vizwiz/save_folder/self_cross_3/final\"\n )\n model_path = FLAGS.snapshot_path\n print(\"loading %s\" % model_path)\n model_data = torch.load(model_path)\n\n model = nn.DataParallel(model).cuda()\n model.load_state_dict(model_data.get(\"model_state\", model_data))\n model.train(False)\n return model\n\n def get_prediction(self, image_id, question, batch_size=1):\n self.demo_data.set_input(image_id, question)\n demo_data_loader = DataLoader(\n self.demo_data,\n batch_size,\n shuffle=False,\n num_workers=1,\n )\n visual_feature, bboxes, question = iter(demo_data_loader).next()\n visual_feature = Variable(visual_feature).cuda()\n bboxes = Variable(bboxes).cuda()\n question = Variable(question).cuda()\n pred, i_att, q_att = self.model(visual_feature, question)\n answer = self._get_answer(pred.data, demo_data_loader)\n\n return (\n answer,\n i_att,\n q_att,\n bboxes,\n )\n"
] | [
[
"torch.autograd.Variable",
"torch.nn.DataParallel",
"torch.utils.data.DataLoader",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Janetteeeeeeee/nnUNet | [
"db654c445aa5ced436dbf842d432dbbcdc01f4b5",
"db654c445aa5ced436dbf842d432dbbcdc01f4b5"
] | [
"nnunet/experiment_planning/experiment_planner_baseline_3DUNet.py",
"nnunet/training/network_training/nnUNetTrainer.py"
] | [
"# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport shutil\nfrom collections import OrderedDict\nfrom copy import deepcopy\n\nimport nnunet\nimport numpy as np\nfrom batchgenerators.utilities.file_and_folder_operations import *\nfrom nnunet.configuration import default_num_threads\nfrom nnunet.experiment_planning.DatasetAnalyzer import DatasetAnalyzer\nfrom nnunet.experiment_planning.common_utils import get_pool_and_conv_props_poolLateV2\nfrom nnunet.experiment_planning.utils import create_lists_from_splitted_dataset\nfrom nnunet.network_architecture.generic_UNet import Generic_UNet\nfrom nnunet.paths import *\nfrom nnunet.preprocessing.cropping import get_case_identifier_from_npz\nfrom nnunet.training.model_restore import recursive_find_python_class\n\n\nclass ExperimentPlanner(object):\n def __init__(self, folder_with_cropped_data, preprocessed_output_folder):\n self.folder_with_cropped_data = folder_with_cropped_data\n self.preprocessed_output_folder = preprocessed_output_folder\n self.list_of_cropped_npz_files = subfiles(self.folder_with_cropped_data, True, None, \".npz\", True)\n\n self.preprocessor_name = \"GenericPreprocessor\"\n\n assert isfile(join(self.folder_with_cropped_data, \"dataset_properties.pkl\")), \\\n \"folder_with_cropped_data must contain dataset_properties.pkl\"\n self.dataset_properties = load_pickle(join(self.folder_with_cropped_data, \"dataset_properties.pkl\"))\n\n self.plans_per_stage = OrderedDict()\n self.plans = OrderedDict()\n self.plans_fname = join(self.preprocessed_output_folder, \"nnUNetPlans\" + \"fixed_plans_3D.pkl\")\n self.data_identifier = default_data_identifier\n\n self.transpose_forward = [0, 1, 2]\n self.transpose_backward = [0, 1, 2]\n\n self.unet_base_num_features = Generic_UNet.BASE_NUM_FEATURES_3D\n self.unet_max_num_filters = 320\n self.unet_max_numpool = 999\n self.unet_min_batch_size = 2\n self.unet_featuremap_min_edge_length = 4\n\n self.target_spacing_percentile = 50\n self.anisotropy_threshold = 3\n self.how_much_of_a_patient_must_the_network_see_at_stage0 = 4 # 1/4 of a patient\n self.batch_size_covers_max_percent_of_dataset = 0.05 # all samples in the batch together cannot cover more\n # than 5% of the entire dataset\n\n self.conv_per_stage = 2\n\n def get_target_spacing(self):\n spacings = self.dataset_properties['all_spacings']\n\n # target = np.median(np.vstack(spacings), 0)\n # if target spacing is very anisotropic we may want to not downsample the axis with the worst spacing\n # uncomment after mystery task submission\n \"\"\"worst_spacing_axis = np.argmax(target)\n if max(target) > (2.5 * min(target)):\n spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis]\n target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 5)\n target[worst_spacing_axis] = target_spacing_of_that_axis\"\"\"\n\n target = np.percentile(np.vstack(spacings), self.target_spacing_percentile, 0)\n return target\n\n def save_my_plans(self):\n with open(self.plans_fname, 'wb') as f:\n pickle.dump(self.plans, f)\n\n def load_my_plans(self):\n self.plans = load_pickle(self.plans_fname)\n\n self.plans_per_stage = self.plans['plans_per_stage']\n self.dataset_properties = self.plans['dataset_properties']\n\n self.transpose_forward = self.plans['transpose_forward']\n self.transpose_backward = self.plans['transpose_backward']\n\n def determine_postprocessing(self):\n pass\n \"\"\"\n Spoiler: This is unused, postprocessing was removed. Ignore it.\n :return:\n print(\"determining postprocessing...\")\n\n props_per_patient = self.dataset_properties['segmentation_props_per_patient']\n\n all_region_keys = [i for k in props_per_patient.keys() for i in props_per_patient[k]['only_one_region'].keys()]\n all_region_keys = list(set(all_region_keys))\n\n only_keep_largest_connected_component = OrderedDict()\n\n for r in all_region_keys:\n all_results = [props_per_patient[k]['only_one_region'][r] for k in props_per_patient.keys()]\n only_keep_largest_connected_component[tuple(r)] = all(all_results)\n\n print(\"Postprocessing: only_keep_largest_connected_component\", only_keep_largest_connected_component)\n\n all_classes = self.dataset_properties['all_classes']\n classes = [i for i in all_classes if i > 0]\n\n props_per_patient = self.dataset_properties['segmentation_props_per_patient']\n\n min_size_per_class = OrderedDict()\n for c in classes:\n all_num_voxels = []\n for k in props_per_patient.keys():\n all_num_voxels.append(props_per_patient[k]['volume_per_class'][c])\n if len(all_num_voxels) > 0:\n min_size_per_class[c] = np.percentile(all_num_voxels, 1) * MIN_SIZE_PER_CLASS_FACTOR\n else:\n min_size_per_class[c] = np.inf\n\n min_region_size_per_class = OrderedDict()\n for c in classes:\n region_sizes = [l for k in props_per_patient for l in props_per_patient[k]['region_volume_per_class'][c]]\n if len(region_sizes) > 0:\n min_region_size_per_class[c] = min(region_sizes)\n # we don't need that line but better safe than sorry, right?\n min_region_size_per_class[c] = min(min_region_size_per_class[c], min_size_per_class[c])\n else:\n min_region_size_per_class[c] = 0\n\n print(\"Postprocessing: min_size_per_class\", min_size_per_class)\n print(\"Postprocessing: min_region_size_per_class\", min_region_size_per_class)\n return only_keep_largest_connected_component, min_size_per_class, min_region_size_per_class\n \"\"\"\n\n def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases,\n num_modalities, num_classes):\n \"\"\"\n Computation of input patch size starts out with the new median shape (in voxels) of a dataset. This is\n opposed to prior experiments where I based it on the median size in mm. The rationale behind this is that\n for some organ of interest the acquisition method will most likely be chosen such that the field of view and\n voxel resolution go hand in hand to show the doctor what they need to see. This assumption may be violated\n for some modalities with anisotropy (cine MRI) but we will have t live with that. In future experiments I\n will try to 1) base input patch size match aspect ratio of input size in mm (instead of voxels) and 2) to\n try to enforce that we see the same 'distance' in all directions (try to maintain equal size in mm of patch)\n\n The patches created here attempt keep the aspect ratio of the new_median_shape\n\n :param current_spacing:\n :param original_spacing:\n :param original_shape:\n :param num_cases:\n :return:\n \"\"\"\n new_median_shape = np.round(original_spacing / current_spacing * original_shape).astype(int)\n dataset_num_voxels = np.prod(new_median_shape) * num_cases\n\n # the next line is what we had before as a default. The patch size had the same aspect ratio as the median shape of a patient. We swapped t\n # input_patch_size = new_median_shape\n\n # compute how many voxels are one mm\n input_patch_size = 1 / np.array(current_spacing)\n\n # normalize voxels per mm\n input_patch_size /= input_patch_size.mean()\n\n # create an isotropic patch of size 512x512x512mm\n input_patch_size *= 1 / min(input_patch_size) * 512 # to get a starting value\n input_patch_size = np.round(input_patch_size).astype(int)\n\n # clip it to the median shape of the dataset because patches larger then that make not much sense\n input_patch_size = [min(i, j) for i, j in zip(input_patch_size, new_median_shape)]\n\n network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \\\n shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(input_patch_size,\n self.unet_featuremap_min_edge_length,\n self.unet_max_numpool,\n current_spacing)\n\n ref = Generic_UNet.use_this_for_batch_size_computation_3D\n here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,\n self.unet_base_num_features,\n self.unet_max_num_filters, num_modalities,\n num_classes,\n pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage)\n while here > ref:\n axis_to_be_reduced = np.argsort(new_shp / new_median_shape)[-1]\n\n tmp = deepcopy(new_shp)\n tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced]\n _, _, _, _, shape_must_be_divisible_by_new = \\\n get_pool_and_conv_props_poolLateV2(tmp,\n self.unet_featuremap_min_edge_length,\n self.unet_max_numpool,\n current_spacing)\n new_shp[axis_to_be_reduced] -= shape_must_be_divisible_by_new[axis_to_be_reduced]\n\n # we have to recompute numpool now:\n network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \\\n shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(new_shp,\n self.unet_featuremap_min_edge_length,\n self.unet_max_numpool,\n current_spacing)\n\n here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,\n self.unet_base_num_features,\n self.unet_max_num_filters, num_modalities,\n num_classes, pool_op_kernel_sizes,\n conv_per_stage=self.conv_per_stage)\n # print(new_shp)\n\n input_patch_size = new_shp\n\n batch_size = Generic_UNet.DEFAULT_BATCH_SIZE_3D # This is what works with 128**3\n batch_size = int(np.floor(max(ref / here, 1) * batch_size))\n\n # check if batch size is too large\n max_batch_size = np.round(self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels /\n np.prod(input_patch_size, dtype=np.int64)).astype(int)\n max_batch_size = max(max_batch_size, self.unet_min_batch_size)\n batch_size = max(1, min(batch_size, max_batch_size))\n\n do_dummy_2D_data_aug = (max(input_patch_size) / input_patch_size[\n 0]) > self.anisotropy_threshold\n\n plan = {\n 'batch_size': batch_size,\n 'num_pool_per_axis': network_num_pool_per_axis,\n 'patch_size': input_patch_size,\n 'median_patient_size_in_voxels': new_median_shape,\n 'current_spacing': current_spacing,\n 'original_spacing': original_spacing,\n 'do_dummy_2D_data_aug': do_dummy_2D_data_aug,\n 'pool_op_kernel_sizes': pool_op_kernel_sizes,\n 'conv_kernel_sizes': conv_kernel_sizes,\n }\n return plan\n\n def plan_experiment(self):\n use_nonzero_mask_for_normalization = self.determine_whether_to_use_mask_for_norm()\n print(\"Are we using the nonzero mask for normalizaion?\", use_nonzero_mask_for_normalization)\n spacings = self.dataset_properties['all_spacings']\n sizes = self.dataset_properties['all_sizes']\n\n all_classes = self.dataset_properties['all_classes']\n modalities = self.dataset_properties['modalities']\n num_modalities = len(list(modalities.keys()))\n\n target_spacing = self.get_target_spacing()\n new_shapes = [np.array(i) / target_spacing * np.array(j) for i, j in zip(spacings, sizes)]\n\n max_spacing_axis = np.argmax(target_spacing)\n remaining_axes = [i for i in list(range(3)) if i != max_spacing_axis]\n self.transpose_forward = [max_spacing_axis] + remaining_axes\n self.transpose_backward = [np.argwhere(np.array(self.transpose_forward) == i)[0][0] for i in range(3)]\n\n # we base our calculations on the median shape of the datasets\n median_shape = np.median(np.vstack(new_shapes), 0)\n print(\"the median shape of the dataset is \", median_shape)\n\n max_shape = np.max(np.vstack(new_shapes), 0)\n print(\"the max shape in the dataset is \", max_shape)\n min_shape = np.min(np.vstack(new_shapes), 0)\n print(\"the min shape in the dataset is \", min_shape)\n\n print(\"we don't want feature maps smaller than \", self.unet_featuremap_min_edge_length, \" in the bottleneck\")\n\n # how many stages will the image pyramid have?\n self.plans_per_stage = list()\n\n target_spacing_transposed = np.array(target_spacing)[self.transpose_forward]\n median_shape_transposed = np.array(median_shape)[self.transpose_forward]\n print(\"the transposed median shape of the dataset is \", median_shape_transposed)\n\n print(\"generating configuration for 3d_fullres\")\n self.plans_per_stage.append(self.get_properties_for_stage(target_spacing_transposed, target_spacing_transposed,\n median_shape_transposed,\n len(self.list_of_cropped_npz_files),\n num_modalities, len(all_classes) + 1))\n\n # thanks Zakiyi (https://github.com/MIC-DKFZ/nnUNet/issues/61) for spotting this bug :-)\n # if np.prod(self.plans_per_stage[-1]['median_patient_size_in_voxels'], dtype=np.int64) / \\\n # architecture_input_voxels < HOW_MUCH_OF_A_PATIENT_MUST_THE_NETWORK_SEE_AT_STAGE0:\n architecture_input_voxels_here = np.prod(self.plans_per_stage[-1]['patch_size'], dtype=np.int64)\n if np.prod(median_shape) / architecture_input_voxels_here < \\\n self.how_much_of_a_patient_must_the_network_see_at_stage0:\n more = False\n else:\n more = True\n\n if more:\n print(\"generating configuration for 3d_lowres\")\n # if we are doing more than one stage then we want the lowest stage to have exactly\n # HOW_MUCH_OF_A_PATIENT_MUST_THE_NETWORK_SEE_AT_STAGE0 (this is 4 by default so the number of voxels in the\n # median shape of the lowest stage must be 4 times as much as the network can process at once (128x128x128 by\n # default). Problem is that we are downsampling higher resolution axes before we start downsampling the\n # out-of-plane axis. We could probably/maybe do this analytically but I am lazy, so here\n # we do it the dumb way\n\n lowres_stage_spacing = deepcopy(target_spacing)\n num_voxels = np.prod(median_shape, dtype=np.float64)\n while num_voxels > self.how_much_of_a_patient_must_the_network_see_at_stage0 * architecture_input_voxels_here:\n max_spacing = max(lowres_stage_spacing)\n if np.any((max_spacing / lowres_stage_spacing) > 2):\n lowres_stage_spacing[(max_spacing / lowres_stage_spacing) > 2] \\\n *= 1.01\n else:\n lowres_stage_spacing *= 1.01\n num_voxels = np.prod(target_spacing / lowres_stage_spacing * median_shape, dtype=np.float64)\n\n lowres_stage_spacing_transposed = np.array(lowres_stage_spacing)[self.transpose_forward]\n new = self.get_properties_for_stage(lowres_stage_spacing_transposed, target_spacing_transposed,\n median_shape_transposed,\n len(self.list_of_cropped_npz_files),\n num_modalities, len(all_classes) + 1)\n architecture_input_voxels_here = np.prod(new['patch_size'], dtype=np.int64)\n if 2 * np.prod(new['median_patient_size_in_voxels'], dtype=np.int64) < np.prod(\n self.plans_per_stage[0]['median_patient_size_in_voxels'], dtype=np.int64):\n self.plans_per_stage.append(new)\n\n self.plans_per_stage = self.plans_per_stage[::-1]\n self.plans_per_stage = {i: self.plans_per_stage[i] for i in range(len(self.plans_per_stage))} # convert to dict\n\n print(self.plans_per_stage)\n print(\"transpose forward\", self.transpose_forward)\n print(\"transpose backward\", self.transpose_backward)\n\n normalization_schemes = self.determine_normalization_scheme()\n only_keep_largest_connected_component, min_size_per_class, min_region_size_per_class = None, None, None\n # removed training data based postprocessing. This is deprecated\n\n # these are independent of the stage\n plans = {'num_stages': len(list(self.plans_per_stage.keys())), 'num_modalities': num_modalities,\n 'modalities': modalities, 'normalization_schemes': normalization_schemes,\n 'dataset_properties': self.dataset_properties, 'list_of_npz_files': self.list_of_cropped_npz_files,\n 'original_spacings': spacings, 'original_sizes': sizes,\n 'preprocessed_data_folder': self.preprocessed_output_folder, 'num_classes': len(all_classes),\n 'all_classes': all_classes, 'base_num_features': self.unet_base_num_features,\n 'use_mask_for_norm': use_nonzero_mask_for_normalization,\n 'keep_only_largest_region': only_keep_largest_connected_component,\n 'min_region_size_per_class': min_region_size_per_class, 'min_size_per_class': min_size_per_class,\n 'transpose_forward': self.transpose_forward, 'transpose_backward': self.transpose_backward,\n 'data_identifier': self.data_identifier, 'plans_per_stage': self.plans_per_stage,\n 'preprocessor_name': self.preprocessor_name,\n 'conv_per_stage': self.conv_per_stage,\n }\n\n self.plans = plans\n self.save_my_plans()\n\n def determine_normalization_scheme(self):\n schemes = OrderedDict()\n modalities = self.dataset_properties['modalities']\n num_modalities = len(list(modalities.keys()))\n\n for i in range(num_modalities):\n if modalities[i] == \"CT\" or modalities[i] == 'ct':\n schemes[i] = \"CT\"\n elif modalities[i] == 'noNorm':\n schemes[i] = \"noNorm\"\n else:\n schemes[i] = \"nonCT\"\n return schemes\n\n def save_properties_of_cropped(self, case_identifier, properties):\n with open(join(self.folder_with_cropped_data, \"%s.pkl\" % case_identifier), 'wb') as f:\n pickle.dump(properties, f)\n\n def load_properties_of_cropped(self, case_identifier):\n with open(join(self.folder_with_cropped_data, \"%s.pkl\" % case_identifier), 'rb') as f:\n properties = pickle.load(f)\n return properties\n\n def determine_whether_to_use_mask_for_norm(self):\n # only use the nonzero mask for normalization of the cropping based on it resulted in a decrease in\n # image size (this is an indication that the data is something like brats/isles and then we want to\n # normalize in the brain region only)\n modalities = self.dataset_properties['modalities']\n num_modalities = len(list(modalities.keys()))\n use_nonzero_mask_for_norm = OrderedDict()\n\n for i in range(num_modalities):\n if \"CT\" in modalities[i]:\n use_nonzero_mask_for_norm[i] = False\n else:\n all_size_reductions = []\n for k in self.dataset_properties['size_reductions'].keys():\n all_size_reductions.append(self.dataset_properties['size_reductions'][k])\n\n if np.median(all_size_reductions) < 3 / 4.:\n print(\"using nonzero mask for normalization\")\n use_nonzero_mask_for_norm[i] = True\n else:\n print(\"not using nonzero mask for normalization\")\n use_nonzero_mask_for_norm[i] = False\n\n for c in self.list_of_cropped_npz_files:\n case_identifier = get_case_identifier_from_npz(c)\n properties = self.load_properties_of_cropped(case_identifier)\n properties['use_nonzero_mask_for_norm'] = use_nonzero_mask_for_norm\n self.save_properties_of_cropped(case_identifier, properties)\n use_nonzero_mask_for_normalization = use_nonzero_mask_for_norm\n return use_nonzero_mask_for_normalization\n\n def write_normalization_scheme_to_patients(self):\n \"\"\"\n This is used for test set preprocessing\n :return: \n \"\"\"\n for c in self.list_of_cropped_npz_files:\n case_identifier = get_case_identifier_from_npz(c)\n properties = self.load_properties_of_cropped(case_identifier)\n properties['use_nonzero_mask_for_norm'] = self.plans['use_mask_for_norm']\n self.save_properties_of_cropped(case_identifier, properties)\n\n def run_preprocessing(self, num_threads):\n if os.path.isdir(join(self.preprocessed_output_folder, \"gt_segmentations\")):\n shutil.rmtree(join(self.preprocessed_output_folder, \"gt_segmentations\"))\n shutil.copytree(join(self.folder_with_cropped_data, \"gt_segmentations\"),\n join(self.preprocessed_output_folder, \"gt_segmentations\"))\n normalization_schemes = self.plans['normalization_schemes']\n use_nonzero_mask_for_normalization = self.plans['use_mask_for_norm']\n intensityproperties = self.plans['dataset_properties']['intensityproperties']\n preprocessor_class = recursive_find_python_class([join(nnunet.__path__[0], \"preprocessing\")],\n self.preprocessor_name, current_module=\"nnunet.preprocessing\")\n assert preprocessor_class is not None\n preprocessor = preprocessor_class(normalization_schemes, use_nonzero_mask_for_normalization,\n self.transpose_forward,\n intensityproperties)\n target_spacings = [i[\"current_spacing\"] for i in self.plans_per_stage.values()]\n if self.plans['num_stages'] > 1 and not isinstance(num_threads, (list, tuple)):\n num_threads = (default_num_threads, num_threads)\n elif self.plans['num_stages'] == 1 and isinstance(num_threads, (list, tuple)):\n num_threads = num_threads[-1]\n preprocessor.run(target_spacings, self.folder_with_cropped_data, self.preprocessed_output_folder,\n self.plans['data_identifier'], num_threads)\n\ndef maybe_mkdir_p(directory):\n directory = os.path.abspath(directory)\n splits = directory.split(\"\\\\\")[1:]\n base = directory.split('\\\\')[0]\n for i in range(0, len(splits)):\n if not os.path.isdir(join(base, join(\"\\\\\", *splits[:i+1]))):\n try:\n os.mkdir(join(base, join(\"\\\\\", *splits[:i+1])))\n except FileExistsError:\n # this can sometimes happen when two jobs try to create the same directory at the same time,\n # especially on network drives.\n print(\"WARNING: Folder %s already existed and does not need to be created\" % directory)\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--task_ids\", nargs=\"+\", help=\"list of int\")\n parser.add_argument(\"-p\", action=\"store_true\", help=\"set this if you actually want to run the preprocessing. If \"\n \"this is not set then this script will only create the plans file\")\n parser.add_argument(\"-tl\", type=int, required=False, default=8, help=\"num_threads_lowres\")\n parser.add_argument(\"-tf\", type=int, required=False, default=8, help=\"num_threads_fullres\")\n\n args = parser.parse_args()\n task_ids = args.task_ids\n run_preprocessing = args.p\n tl = args.tl\n tf = args.tf\n\n tasks = []\n for i in task_ids:\n i = int(i)\n candidates = subdirs(nnUNet_cropped_data, prefix=\"Task%03.0d\" % i, join=False)\n assert len(candidates) == 1\n tasks.append(candidates[0])\n\n for t in tasks:\n try:\n print(\"\\n\\n\\n\", t)\n cropped_out_dir = os.path.join(nnUNet_cropped_data, t)\n preprocessing_output_dir_this_task = os.path.join(preprocessing_output_dir, t)\n splitted_4d_output_dir_task = os.path.join(nnUNet_raw_data, t)\n lists, modalities = create_lists_from_splitted_dataset(splitted_4d_output_dir_task)\n\n dataset_analyzer = DatasetAnalyzer(cropped_out_dir, overwrite=False)\n _ = dataset_analyzer.analyze_dataset() # this will write output files that will be used by the ExperimentPlanner\n\n maybe_mkdir_p(preprocessing_output_dir_this_task)\n shutil.copy(join(cropped_out_dir, \"dataset_properties.pkl\"), preprocessing_output_dir_this_task)\n shutil.copy(join(nnUNet_raw_data, t, \"dataset.json\"), preprocessing_output_dir_this_task)\n\n threads = (tl, tf)\n\n print(\"number of threads: \", threads, \"\\n\")\n\n exp_planner = ExperimentPlanner(cropped_out_dir, preprocessing_output_dir_this_task)\n exp_planner.plan_experiment()\n if run_preprocessing:\n exp_planner.run_preprocessing(threads)\n except Exception as e:\n print(e)\n",
"# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport shutil\nfrom collections import OrderedDict\nfrom multiprocessing import Pool\nfrom time import sleep\nfrom typing import Tuple, List\n\nimport matplotlib\nimport numpy as np\nimport torch\nfrom batchgenerators.utilities.file_and_folder_operations import *\nfrom torch import nn\nfrom torch.optim import lr_scheduler\n\nimport nnunet\nfrom nnunet.configuration import default_num_threads\nfrom nnunet.evaluation.evaluator import aggregate_scores\nfrom nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax\nfrom nnunet.network_architecture.generic_UNet import Generic_UNet\nfrom nnunet.network_architecture.initialization import InitWeights_He\nfrom nnunet.network_architecture.neural_network import SegmentationNetwork\nfrom nnunet.postprocessing.connected_components import determine_postprocessing\nfrom nnunet.training.data_augmentation.default_data_augmentation import default_3D_augmentation_params, \\\n default_2D_augmentation_params, get_default_augmentation, get_patch_size\nfrom nnunet.training.dataloading.dataset_loading import load_dataset, DataLoader3D, DataLoader2D, unpack_dataset\nfrom nnunet.training.loss_functions.dice_loss import DC_and_CE_loss\nfrom nnunet.training.network_training.network_trainer import NetworkTrainer\nfrom nnunet.utilities.nd_softmax import softmax_helper\nfrom nnunet.utilities.tensor_utilities import sum_tensor\n\nmatplotlib.use(\"agg\")\n\ndef maybe_mkdir_p(directory):\n directory = os.path.abspath(directory)\n splits = directory.split(\"\\\\\")[1:]\n base = directory.split('\\\\')[0]\n for i in range(0, len(splits)):\n if not os.path.isdir(join(base, join(\"\\\\\", *splits[:i+1]))):\n try:\n os.mkdir(join(base, join(\"\\\\\", *splits[:i+1])))\n except FileExistsError:\n # this can sometimes happen when two jobs try to create the same directory at the same time,\n # especially on network drives.\n print(\"WARNING: Folder %s already existed and does not need to be created\" % directory)\n\nclass nnUNetTrainer(NetworkTrainer):\n def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,\n unpack_data=True, deterministic=True, fp16=False):\n \"\"\"\n :param deterministic:\n :param fold: can be either [0 ... 5) for cross-validation, 'all' to train on all available training data or\n None if you wish to load some checkpoint and do inference only\n :param plans_file: the pkl file generated by preprocessing. This file will determine all design choices\n :param subfolder_with_preprocessed_data: must be a subfolder of dataset_directory (just the name of the folder,\n not the entire path). This is where the preprocessed data lies that will be used for network training. We made\n this explicitly available so that differently preprocessed data can coexist and the user can choose what to use.\n Can be None if you are doing inference only.\n :param output_folder: where to store parameters, plot progress and to the validation\n :param dataset_directory: the parent directory in which the preprocessed Task data is stored. This is required\n because the split information is stored in this directory. For running prediction only this input is not\n required and may be set to None\n :param batch_dice: compute dice loss for each sample and average over all samples in the batch or pretend the\n batch is a pseudo volume?\n :param stage: The plans file may contain several stages (used for lowres / highres / pyramid). Stage must be\n specified for training:\n if stage 1 exists then stage 1 is the high resolution stage, otherwise it's 0\n :param unpack_data: if False, npz preprocessed data will not be unpacked to npy. This consumes less space but\n is considerably slower! Running unpack_data=False with 2d should never be done!\n\n IMPORTANT: If you inherit from nnUNetTrainer and the init args change then you need to redefine self.init_args\n in your init accordingly. Otherwise checkpoints won't load properly!\n \"\"\"\n super(nnUNetTrainer, self).__init__(deterministic, fp16)\n self.unpack_data = unpack_data\n self.init_args = (plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,\n deterministic, fp16)\n # set through arguments from init\n self.stage = stage\n self.experiment_name = self.__class__.__name__\n self.plans_file = plans_file\n self.output_folder = output_folder\n self.dataset_directory = dataset_directory\n self.output_folder_base = self.output_folder\n self.fold = fold\n\n self.plans = None\n\n # if we are running inference only then the self.dataset_directory is set (due to checkpoint loading) but it\n # irrelevant\n if self.dataset_directory is not None and isdir(self.dataset_directory):\n self.gt_niftis_folder = join(self.dataset_directory, \"gt_segmentations\")\n else:\n self.gt_niftis_folder = None\n\n self.folder_with_preprocessed_data = None\n\n # set in self.initialize()\n\n self.dl_tr = self.dl_val = None\n self.num_input_channels = self.num_classes = self.net_pool_per_axis = self.patch_size = self.batch_size = \\\n self.threeD = self.base_num_features = self.intensity_properties = self.normalization_schemes = \\\n self.net_num_pool_op_kernel_sizes = self.net_conv_kernel_sizes = None # loaded automatically from plans_file\n self.basic_generator_patch_size = self.data_aug_params = self.transpose_forward = self.transpose_backward = None\n\n self.batch_dice = batch_dice\n self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {})\n\n self.online_eval_foreground_dc = []\n self.online_eval_tp = []\n self.online_eval_fp = []\n self.online_eval_fn = []\n\n self.classes = self.do_dummy_2D_aug = self.use_mask_for_norm = self.only_keep_largest_connected_component = \\\n self.min_region_size_per_class = self.min_size_per_class = None\n\n self.inference_pad_border_mode = \"constant\"\n self.inference_pad_kwargs = {'constant_values': 0}\n\n self.update_fold(fold)\n self.pad_all_sides = None\n\n self.lr_scheduler_eps = 1e-3\n self.lr_scheduler_patience = 30\n self.initial_lr = 3e-4\n self.weight_decay = 3e-5\n\n self.oversample_foreground_percent = 0.33\n\n self.conv_per_stage = None\n self.regions_class_order = None\n\n def update_fold(self, fold):\n \"\"\"\n used to swap between folds for inference (ensemble of models from cross-validation)\n DO NOT USE DURING TRAINING AS THIS WILL NOT UPDATE THE DATASET SPLIT AND THE DATA AUGMENTATION GENERATORS\n :param fold:\n :return:\n \"\"\"\n if fold is not None:\n if isinstance(fold, str):\n assert fold == \"all\", \"if self.fold is a string then it must be \\'all\\'\"\n if self.output_folder.endswith(\"%s\" % str(self.fold)):\n self.output_folder = self.output_folder_base\n self.output_folder = join(self.output_folder, \"%s\" % str(fold))\n else:\n if self.output_folder.endswith(\"fold_%s\" % str(self.fold)):\n self.output_folder = self.output_folder_base\n self.output_folder = join(self.output_folder, \"fold_%s\" % str(fold))\n self.fold = fold\n\n def setup_DA_params(self):\n if self.threeD:\n self.data_aug_params = default_3D_augmentation_params\n if self.do_dummy_2D_aug:\n self.data_aug_params[\"dummy_2D\"] = True\n self.print_to_log_file(\"Using dummy2d data augmentation\")\n self.data_aug_params[\"elastic_deform_alpha\"] = \\\n default_2D_augmentation_params[\"elastic_deform_alpha\"]\n self.data_aug_params[\"elastic_deform_sigma\"] = \\\n default_2D_augmentation_params[\"elastic_deform_sigma\"]\n self.data_aug_params[\"rotation_x\"] = default_2D_augmentation_params[\"rotation_x\"]\n else:\n self.do_dummy_2D_aug = False\n if max(self.patch_size) / min(self.patch_size) > 1.5:\n default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi)\n self.data_aug_params = default_2D_augmentation_params\n self.data_aug_params[\"mask_was_used_for_normalization\"] = self.use_mask_for_norm\n\n if self.do_dummy_2D_aug:\n self.basic_generator_patch_size = get_patch_size(self.patch_size[1:],\n self.data_aug_params['rotation_x'],\n self.data_aug_params['rotation_y'],\n self.data_aug_params['rotation_z'],\n self.data_aug_params['scale_range'])\n self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size))\n else:\n self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'],\n self.data_aug_params['rotation_y'],\n self.data_aug_params['rotation_z'],\n self.data_aug_params['scale_range'])\n\n self.data_aug_params['selected_seg_channels'] = [0]\n self.data_aug_params['patch_size_for_spatialtransform'] = self.patch_size\n\n def initialize(self, training=True, force_load_plans=False):\n \"\"\"\n For prediction of test cases just set training=False, this will prevent loading of training data and\n training batchgenerator initialization\n :param training:\n :return:\n \"\"\"\n\n maybe_mkdir_p(self.output_folder)\n\n if force_load_plans or (self.plans is None):\n self.load_plans_file()\n\n self.process_plans(self.plans)\n\n self.setup_DA_params()\n\n if training:\n self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +\n \"_stage%d\" % self.stage)\n\n self.dl_tr, self.dl_val = self.get_basic_generators()\n if self.unpack_data:\n self.print_to_log_file(\"unpacking dataset\")\n unpack_dataset(self.folder_with_preprocessed_data)\n self.print_to_log_file(\"done\")\n else:\n self.print_to_log_file(\n \"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you \"\n \"will wait all winter for your model to finish!\")\n self.tr_gen, self.val_gen = get_default_augmentation(self.dl_tr, self.dl_val,\n self.data_aug_params[\n 'patch_size_for_spatialtransform'],\n self.data_aug_params)\n self.print_to_log_file(\"TRAINING KEYS:\\n %s\" % (str(self.dataset_tr.keys())),\n also_print_to_console=False)\n self.print_to_log_file(\"VALIDATION KEYS:\\n %s\" % (str(self.dataset_val.keys())),\n also_print_to_console=False)\n else:\n pass\n self.initialize_network()\n self.initialize_optimizer_and_scheduler()\n # assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))\n self.was_initialized = True\n\n def initialize_network(self):\n \"\"\"\n This is specific to the U-Net and must be adapted for other network architectures\n :return:\n \"\"\"\n # self.print_to_log_file(self.net_num_pool_op_kernel_sizes)\n # self.print_to_log_file(self.net_conv_kernel_sizes)\n\n net_numpool = len(self.net_num_pool_op_kernel_sizes)\n\n if self.threeD:\n conv_op = nn.Conv3d\n dropout_op = nn.Dropout3d\n norm_op = nn.InstanceNorm3d\n else:\n conv_op = nn.Conv2d\n dropout_op = nn.Dropout2d\n norm_op = nn.InstanceNorm2d\n\n norm_op_kwargs = {'eps': 1e-5, 'affine': True}\n dropout_op_kwargs = {'p': 0, 'inplace': True}\n net_nonlin = nn.LeakyReLU\n net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}\n self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, net_numpool,\n self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,\n dropout_op_kwargs,\n net_nonlin, net_nonlin_kwargs, False, False, lambda x: x, InitWeights_He(1e-2),\n self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)\n self.network.inference_apply_nonlin = softmax_helper\n\n if torch.cuda.is_available():\n self.network.cuda()\n\n def initialize_optimizer_and_scheduler(self):\n assert self.network is not None, \"self.initialize_network must be called first\"\n self.optimizer = torch.optim.Adam(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,\n amsgrad=True)\n self.lr_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.2,\n patience=self.lr_scheduler_patience,\n verbose=True, threshold=self.lr_scheduler_eps,\n threshold_mode=\"abs\")\n\n def plot_network_architecture(self):\n try:\n from batchgenerators.utilities.file_and_folder_operations import join\n import hiddenlayer as hl\n if torch.cuda.is_available():\n g = hl.build_graph(self.network, torch.rand((1, self.num_input_channels, *self.patch_size)).cuda(),\n transforms=None)\n else:\n g = hl.build_graph(self.network, torch.rand((1, self.num_input_channels, *self.patch_size)),\n transforms=None)\n dot = g.build_dot()\n dot.format = 'pdf'\n dot.attr(\"graph\", rankdir=\"TD\")\n file_name = \"network_architecture\"\n dot.render(file_name, directory=self.output_folder, cleanup=True)\n del g\n except Exception as e:\n self.print_to_log_file(\"Unable to plot network architecture:\")\n self.print_to_log_file(e)\n\n self.print_to_log_file(\"\\nprinting the network instead:\\n\")\n self.print_to_log_file(self.network)\n self.print_to_log_file(\"\\n\")\n finally:\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n\n def save_debug_information(self):\n # saving some debug information\n dct = OrderedDict()\n for k in self.__dir__():\n if not k.startswith(\"__\"):\n if not callable(getattr(self, k)):\n dct[k] = str(getattr(self, k))\n del dct['plans']\n del dct['intensity_properties']\n del dct['dataset']\n del dct['dataset_tr']\n del dct['dataset_val']\n save_json(dct, join(self.output_folder, \"debug.json\"))\n\n import shutil\n\n shutil.copy(self.plans_file, join(self.output_folder_base, \"plans.pkl\"))\n\n def run_training(self):\n self.save_debug_information()\n super(nnUNetTrainer, self).run_training()\n\n def load_plans_file(self):\n \"\"\"\n This is what actually configures the entire experiment. The plans file is generated by experiment planning\n :return:\n \"\"\"\n self.plans = load_pickle(self.plans_file)\n\n def process_plans(self, plans):\n if self.stage is None:\n assert len(list(plans['plans_per_stage'].keys())) == 1, \\\n \"If self.stage is None then there can be only one stage in the plans file. That seems to not be the \" \\\n \"case. Please specify which stage of the cascade must be trained\"\n self.stage = list(plans['plans_per_stage'].keys())[0]\n self.plans = plans\n\n stage_plans = self.plans['plans_per_stage'][self.stage]\n self.batch_size = stage_plans['batch_size']\n self.net_pool_per_axis = stage_plans['num_pool_per_axis']\n self.patch_size = np.array(stage_plans['patch_size']).astype(int)\n self.do_dummy_2D_aug = stage_plans['do_dummy_2D_data_aug']\n\n if 'pool_op_kernel_sizes' not in stage_plans.keys():\n assert 'num_pool_per_axis' in stage_plans.keys()\n self.print_to_log_file(\"WARNING! old plans file with missing pool_op_kernel_sizes. Attempting to fix it...\")\n self.net_num_pool_op_kernel_sizes = []\n for i in range(max(self.net_pool_per_axis)):\n curr = []\n for j in self.net_pool_per_axis:\n if (max(self.net_pool_per_axis) - j) <= i:\n curr.append(2)\n else:\n curr.append(1)\n self.net_num_pool_op_kernel_sizes.append(curr)\n else:\n self.net_num_pool_op_kernel_sizes = stage_plans['pool_op_kernel_sizes']\n\n if 'conv_kernel_sizes' not in stage_plans.keys():\n self.print_to_log_file(\"WARNING! old plans file with missing conv_kernel_sizes. Attempting to fix it...\")\n self.net_conv_kernel_sizes = [[3] * len(self.net_pool_per_axis)] * (max(self.net_pool_per_axis) + 1)\n else:\n self.net_conv_kernel_sizes = stage_plans['conv_kernel_sizes']\n\n self.pad_all_sides = None # self.patch_size\n self.intensity_properties = plans['dataset_properties']['intensityproperties']\n self.normalization_schemes = plans['normalization_schemes']\n self.base_num_features = plans['base_num_features']\n self.num_input_channels = plans['num_modalities']\n self.num_classes = plans['num_classes'] + 1 # background is no longer in num_classes\n self.classes = plans['all_classes']\n self.use_mask_for_norm = plans['use_mask_for_norm']\n self.only_keep_largest_connected_component = plans['keep_only_largest_region']\n self.min_region_size_per_class = plans['min_region_size_per_class']\n self.min_size_per_class = None # DONT USE THIS. plans['min_size_per_class']\n\n if plans.get('transpose_forward') is None or plans.get('transpose_backward') is None:\n print(\"WARNING! You seem to have data that was preprocessed with a previous version of nnU-Net. \"\n \"You should rerun preprocessing. We will proceed and assume that both transpose_foward \"\n \"and transpose_backward are [0, 1, 2]. If that is not correct then weird things will happen!\")\n plans['transpose_forward'] = [0, 1, 2]\n plans['transpose_backward'] = [0, 1, 2]\n self.transpose_forward = plans['transpose_forward']\n self.transpose_backward = plans['transpose_backward']\n\n if len(self.patch_size) == 2:\n self.threeD = False\n elif len(self.patch_size) == 3:\n self.threeD = True\n else:\n raise RuntimeError(\"invalid patch size in plans file: %s\" % str(self.patch_size))\n\n if \"conv_per_stage\" in plans.keys(): # this ha sbeen added to the plans only recently\n self.conv_per_stage = plans['conv_per_stage']\n else:\n self.conv_per_stage = 2\n\n def load_dataset(self):\n self.dataset = load_dataset(self.folder_with_preprocessed_data)\n\n def get_basic_generators(self):\n self.load_dataset()\n self.do_split()\n\n if self.threeD:\n dl_tr = DataLoader3D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size,\n False, oversample_foreground_percent=self.oversample_foreground_percent,\n pad_mode=\"constant\", pad_sides=self.pad_all_sides, memmap_mode='r')\n dl_val = DataLoader3D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, False,\n oversample_foreground_percent=self.oversample_foreground_percent,\n pad_mode=\"constant\", pad_sides=self.pad_all_sides, memmap_mode='r')\n else:\n dl_tr = DataLoader2D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size,\n oversample_foreground_percent=self.oversample_foreground_percent,\n pad_mode=\"constant\", pad_sides=self.pad_all_sides, memmap_mode='r')\n dl_val = DataLoader2D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size,\n oversample_foreground_percent=self.oversample_foreground_percent,\n pad_mode=\"constant\", pad_sides=self.pad_all_sides, memmap_mode='r')\n return dl_tr, dl_val\n\n def preprocess_patient(self, input_files):\n \"\"\"\n Used to predict new unseen data. Not used for the preprocessing of the training/test data\n :param input_files:\n :return:\n \"\"\"\n from nnunet.training.model_restore import recursive_find_python_class\n preprocessor_name = self.plans.get('preprocessor_name')\n if preprocessor_name is None:\n if self.threeD:\n preprocessor_name = \"GenericPreprocessor\"\n else:\n preprocessor_name = \"PreprocessorFor2D\"\n\n print(\"using preprocessor\", preprocessor_name)\n preprocessor_class = recursive_find_python_class([join(nnunet.__path__[0], \"preprocessing\")],\n preprocessor_name,\n current_module=\"nnunet.preprocessing\")\n assert preprocessor_class is not None, \"Could not find preprocessor %s in nnunet.preprocessing\" % \\\n preprocessor_name\n preprocessor = preprocessor_class(self.normalization_schemes, self.use_mask_for_norm,\n self.transpose_forward, self.intensity_properties)\n\n d, s, properties = preprocessor.preprocess_test_case(input_files,\n self.plans['plans_per_stage'][self.stage][\n 'current_spacing'])\n return d, s, properties\n\n def preprocess_predict_nifti(self, input_files: List[str], output_file: str = None,\n softmax_ouput_file: str = None, mixed_precision: bool = True) -> None:\n \"\"\"\n Use this to predict new data\n :param input_files:\n :param output_file:\n :param softmax_ouput_file:\n :param mixed_precision:\n :return:\n \"\"\"\n print(\"preprocessing...\")\n d, s, properties = self.preprocess_patient(input_files)\n print(\"predicting...\")\n pred = self.predict_preprocessed_data_return_seg_and_softmax(d, do_mirroring=self.data_aug_params[\"do_mirror\"],\n mirror_axes=self.data_aug_params['mirror_axes'],\n use_sliding_window=True, step_size=0.5,\n use_gaussian=True, pad_border_mode='constant',\n pad_kwargs={'constant_values': 0},\n verbose=True, all_in_gpu=False,\n mixed_precision=mixed_precision)[1]\n pred = pred.transpose([0] + [i + 1 for i in self.transpose_backward])\n\n if 'segmentation_export_params' in self.plans.keys():\n force_separate_z = self.plans['segmentation_export_params']['force_separate_z']\n interpolation_order = self.plans['segmentation_export_params']['interpolation_order']\n interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z']\n else:\n force_separate_z = None\n interpolation_order = 1\n interpolation_order_z = 0\n\n print(\"resampling to original spacing and nifti export...\")\n save_segmentation_nifti_from_softmax(pred, output_file, properties, interpolation_order,\n self.regions_class_order, None, None, softmax_ouput_file,\n None, force_separate_z=force_separate_z,\n interpolation_order_z=interpolation_order_z)\n print(\"done\")\n\n def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,\n mirror_axes: Tuple[int] = None,\n use_sliding_window: bool = True, step_size: float = 0.5,\n use_gaussian: bool = True, pad_border_mode: str = 'constant',\n pad_kwargs: dict = None, all_in_gpu: bool = False,\n verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n :param data:\n :param do_mirroring:\n :param mirror_axes:\n :param use_sliding_window:\n :param step_size:\n :param use_gaussian:\n :param pad_border_mode:\n :param pad_kwargs:\n :param all_in_gpu:\n :param verbose:\n :return:\n \"\"\"\n if pad_border_mode == 'constant' and pad_kwargs is None:\n pad_kwargs = {'constant_values': 0}\n\n if do_mirroring and mirror_axes is None:\n mirror_axes = self.data_aug_params['mirror_axes']\n\n if do_mirroring:\n assert self.data_aug_params[\"do_mirror\"], \"Cannot do mirroring as test time augmentation when training \" \\\n \"was done without mirroring\"\n\n valid = list((SegmentationNetwork, nn.DataParallel))\n assert isinstance(self.network, tuple(valid))\n\n current_mode = self.network.training\n self.network.eval()\n ret = self.network.predict_3D(data, do_mirroring=do_mirroring, mirror_axes=mirror_axes,\n use_sliding_window=use_sliding_window, step_size=step_size,\n patch_size=self.patch_size, regions_class_order=self.regions_class_order,\n use_gaussian=use_gaussian, pad_border_mode=pad_border_mode,\n pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu, verbose=verbose,\n mixed_precision=mixed_precision)\n self.network.train(current_mode)\n return ret\n\n def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5,\n save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,\n validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,\n segmentation_export_kwargs: dict = None, run_postprocessing_on_folds: bool = True):\n \"\"\"\n if debug=True then the temporary files generated for postprocessing determination will be kept\n \"\"\"\n\n current_mode = self.network.training\n self.network.eval()\n\n assert self.was_initialized, \"must initialize, ideally with checkpoint (or train first)\"\n if self.dataset_val is None:\n self.load_dataset()\n self.do_split()\n\n if segmentation_export_kwargs is None:\n if 'segmentation_export_params' in self.plans.keys():\n force_separate_z = self.plans['segmentation_export_params']['force_separate_z']\n interpolation_order = self.plans['segmentation_export_params']['interpolation_order']\n interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z']\n else:\n force_separate_z = None\n interpolation_order = 1\n interpolation_order_z = 0\n else:\n force_separate_z = segmentation_export_kwargs['force_separate_z']\n interpolation_order = segmentation_export_kwargs['interpolation_order']\n interpolation_order_z = segmentation_export_kwargs['interpolation_order_z']\n\n # predictions as they come from the network go here\n output_folder = join(self.output_folder, validation_folder_name)\n maybe_mkdir_p(output_folder)\n # this is for debug purposes\n my_input_args = {'do_mirroring': do_mirroring,\n 'use_sliding_window': use_sliding_window,\n 'step_size': step_size,\n 'save_softmax': save_softmax,\n 'use_gaussian': use_gaussian,\n 'overwrite': overwrite,\n 'validation_folder_name': validation_folder_name,\n 'debug': debug,\n 'all_in_gpu': all_in_gpu,\n 'segmentation_export_kwargs': segmentation_export_kwargs,\n }\n save_json(my_input_args, join(output_folder, \"validation_args.json\"))\n\n if do_mirroring:\n if not self.data_aug_params['do_mirror']:\n raise RuntimeError(\"We did not train with mirroring so you cannot do inference with mirroring enabled\")\n mirror_axes = self.data_aug_params['mirror_axes']\n else:\n mirror_axes = ()\n\n pred_gt_tuples = []\n\n export_pool = Pool(default_num_threads)\n results = []\n\n for k in self.dataset_val.keys():\n properties = load_pickle(self.dataset[k]['properties_file'])\n fname = properties['list_of_data_files'][0].split(\"/\")[-1][:-12]\n if overwrite or (not isfile(join(output_folder, fname + \".nii.gz\"))) or \\\n (save_softmax and not isfile(join(output_folder, fname + \".npz\"))):\n data = np.load(self.dataset[k]['data_file'])['data']\n\n print(k, data.shape)\n data[-1][data[-1] == -1] = 0\n\n softmax_pred = self.predict_preprocessed_data_return_seg_and_softmax(data[:-1],\n do_mirroring=do_mirroring,\n mirror_axes=mirror_axes,\n use_sliding_window=use_sliding_window,\n step_size=step_size,\n use_gaussian=use_gaussian,\n all_in_gpu=all_in_gpu,\n mixed_precision=self.fp16)[1]\n\n softmax_pred = softmax_pred.transpose([0] + [i + 1 for i in self.transpose_backward])\n\n if save_softmax:\n softmax_fname = join(output_folder, fname + \".npz\")\n else:\n softmax_fname = None\n\n \"\"\"There is a problem with python process communication that prevents us from communicating obejcts\n larger than 2 GB between processes (basically when the length of the pickle string that will be sent is\n communicated by the multiprocessing.Pipe object then the placeholder (\\%i I think) does not allow for long\n enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually\n patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will\n then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either\n filename or np.ndarray and will handle this automatically\"\"\"\n if np.prod(softmax_pred.shape) > (2e9 / 4 * 0.85): # *0.85 just to be save\n np.save(join(output_folder, fname + \".npy\"), softmax_pred)\n softmax_pred = join(output_folder, fname + \".npy\")\n\n results.append(export_pool.starmap_async(save_segmentation_nifti_from_softmax,\n ((softmax_pred, join(output_folder, fname + \".nii.gz\"),\n properties, interpolation_order, self.regions_class_order,\n None, None,\n softmax_fname, None, force_separate_z,\n interpolation_order_z),\n )\n )\n )\n\n pred_gt_tuples.append([join(output_folder, fname + \".nii.gz\"),\n join(self.gt_niftis_folder, fname + \".nii.gz\")])\n\n _ = [i.get() for i in results]\n self.print_to_log_file(\"finished prediction\")\n\n # evaluate raw predictions\n self.print_to_log_file(\"evaluation of raw predictions\")\n task = self.dataset_directory.split(\"\\\\\")[-1]\n job_name = self.experiment_name\n _ = aggregate_scores(pred_gt_tuples, labels=list(range(self.num_classes)),\n json_output_file=join(output_folder, \"summary.json\"),\n json_name=job_name + \" val tiled %s\" % (str(use_sliding_window)),\n json_author=\"Fabian\",\n json_task=task, num_threads=default_num_threads)\n\n if run_postprocessing_on_folds:\n # in the old nnunet we would stop here. Now we add a postprocessing. This postprocessing can remove everything\n # except the largest connected component for each class. To see if this improves results, we do this for all\n # classes and then rerun the evaluation. Those classes for which this resulted in an improved dice score will\n # have this applied during inference as well\n self.print_to_log_file(\"determining postprocessing\")\n determine_postprocessing(self.output_folder, self.gt_niftis_folder, validation_folder_name,\n final_subf_name=validation_folder_name + \"_postprocessed\", debug=debug)\n # after this the final predictions for the vlaidation set can be found in validation_folder_name_base + \"_postprocessed\"\n # They are always in that folder, even if no postprocessing as applied!\n\n # detemining postprocesing on a per-fold basis may be OK for this fold but what if another fold finds another\n # postprocesing to be better? In this case we need to consolidate. At the time the consolidation is going to be\n # done we won't know what self.gt_niftis_folder was, so now we copy all the niftis into a separate folder to\n # be used later\n gt_nifti_folder = join(self.output_folder_base, \"gt_niftis\")\n maybe_mkdir_p(gt_nifti_folder)\n for f in subfiles(self.gt_niftis_folder, suffix=\".nii.gz\"):\n success = False\n attempts = 0\n e = None\n while not success and attempts < 10:\n try:\n shutil.copy(f, gt_nifti_folder)\n success = True\n except OSError as e:\n attempts += 1\n sleep(1)\n if not success:\n print(\"Could not copy gt nifti file %s into folder %s\" % (f, gt_nifti_folder))\n if e is not None:\n raise e\n\n self.network.train(current_mode)\n\n def run_online_evaluation(self, output, target):\n with torch.no_grad():\n num_classes = output.shape[1]\n output_softmax = softmax_helper(output)\n output_seg = output_softmax.argmax(1)\n target = target[:, 0]\n axes = tuple(range(1, len(target.shape)))\n tp_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index)\n fp_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index)\n fn_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index)\n for c in range(1, num_classes):\n tp_hard[:, c - 1] = sum_tensor((output_seg == c).float() * (target == c).float(), axes=axes)\n fp_hard[:, c - 1] = sum_tensor((output_seg == c).float() * (target != c).float(), axes=axes)\n fn_hard[:, c - 1] = sum_tensor((output_seg != c).float() * (target == c).float(), axes=axes)\n\n tp_hard = tp_hard.sum(0, keepdim=False).detach().cpu().numpy()\n fp_hard = fp_hard.sum(0, keepdim=False).detach().cpu().numpy()\n fn_hard = fn_hard.sum(0, keepdim=False).detach().cpu().numpy()\n\n self.online_eval_foreground_dc.append(list((2 * tp_hard) / (2 * tp_hard + fp_hard + fn_hard + 1e-8)))\n self.online_eval_tp.append(list(tp_hard))\n self.online_eval_fp.append(list(fp_hard))\n self.online_eval_fn.append(list(fn_hard))\n\n def finish_online_evaluation(self):\n self.online_eval_tp = np.sum(self.online_eval_tp, 0)\n self.online_eval_fp = np.sum(self.online_eval_fp, 0)\n self.online_eval_fn = np.sum(self.online_eval_fn, 0)\n\n global_dc_per_class = [i for i in [2 * i / (2 * i + j + k) for i, j, k in\n zip(self.online_eval_tp, self.online_eval_fp, self.online_eval_fn)]\n if not np.isnan(i)]\n self.all_val_eval_metrics.append(np.mean(global_dc_per_class))\n\n self.print_to_log_file(\"Average global foreground Dice:\", [np.round(i, 4) for i in global_dc_per_class])\n self.print_to_log_file(\"(interpret this as an estimate for the Dice of the different classes. This is not \"\n \"exact.)\")\n\n self.online_eval_foreground_dc = []\n self.online_eval_tp = []\n self.online_eval_fp = []\n self.online_eval_fn = []\n\n def save_checkpoint(self, fname, save_optimizer=True):\n super(nnUNetTrainer, self).save_checkpoint(fname, save_optimizer)\n info = OrderedDict()\n info['init'] = self.init_args\n info['name'] = self.__class__.__name__\n info['class'] = str(self.__class__)\n info['plans'] = self.plans\n\n write_pickle(info, fname + \".pkl\")\n"
] | [
[
"numpy.median",
"numpy.round",
"numpy.argmax",
"numpy.any",
"numpy.prod",
"numpy.argsort",
"numpy.array",
"numpy.vstack"
],
[
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.zeros",
"numpy.isnan",
"matplotlib.use",
"torch.cuda.empty_cache",
"numpy.round",
"torch.no_grad",
"numpy.mean",
"torch.cuda.is_available",
"torch.rand",
"numpy.prod",
"numpy.load",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
echasnovski/randomvars | [
"15417b0e3ecd27f185b70471102c158f60d51c28",
"15417b0e3ecd27f185b70471102c158f60d51c28"
] | [
"randomvars/tests/test__continuous.py",
"experiments/from_sample.py"
] | [
"# pylint: disable=missing-function-docstring\n\"\"\"Tests for '_continuous.py' file\"\"\"\nimport numpy as np\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal\nimport scipy.stats.distributions as distrs\nfrom scipy.stats.kde import gaussian_kde\nfrom scipy.integrate import quad\nimport pytest\n\nfrom randomvars._continuous import Cont\nfrom randomvars.tests.commontests import (\n DECIMAL,\n _test_equal_rand,\n _test_equal_seq,\n _test_from_rv_rand,\n _test_from_sample_rand,\n _test_input_coercion,\n _test_log_fun,\n _test_one_value_input,\n _test_rvs_method,\n declass,\n h,\n)\nfrom randomvars.options import config\n\n\nDISTRIBUTIONS_COMMON = {\n \"beta\": distrs.beta(a=10, b=20),\n \"chi_sq\": distrs.chi2(df=10),\n \"expon\": distrs.expon(),\n \"f\": distrs.f(dfn=20, dfd=20),\n \"gamma\": distrs.gamma(a=10),\n \"laplace\": distrs.laplace(),\n \"lognorm\": distrs.lognorm(s=0.5),\n \"norm\": distrs.norm(),\n \"norm2\": distrs.norm(loc=10),\n \"norm3\": distrs.norm(scale=0.1),\n \"norm4\": distrs.norm(scale=10),\n \"norm5\": distrs.norm(loc=10, scale=0.1),\n \"t\": distrs.t(df=10),\n \"uniform\": distrs.uniform(),\n \"uniform2\": distrs.uniform(loc=10, scale=0.1),\n \"weibull_max\": distrs.weibull_max(c=2),\n \"weibull_min\": distrs.weibull_min(c=2),\n}\n\nDISTRIBUTIONS_INF_DENSITY = {\n \"inf_beta_both\": distrs.beta(a=0.4, b=0.6),\n \"inf_beta_left\": distrs.beta(a=0.5, b=2),\n \"inf_beta_right\": distrs.beta(a=2, b=0.5),\n \"inf_chi_sq\": distrs.chi2(df=1),\n \"inf_weibull_max\": distrs.weibull_max(c=0.5),\n \"inf_weibull_min\": distrs.weibull_min(c=0.5),\n}\n\nDISTRIBUTIONS_HEAVY_TAILS = {\n \"heavy_cauchy\": distrs.cauchy(),\n \"heavy_lognorm\": distrs.lognorm(s=1),\n \"heavy_t\": distrs.t(df=2),\n}\n\nDISTRIBUTIONS = {\n **DISTRIBUTIONS_COMMON,\n **DISTRIBUTIONS_HEAVY_TAILS,\n **DISTRIBUTIONS_INF_DENSITY,\n}\n\n\ndef augment_grid(x, n_inner_points):\n test_arr = [\n np.linspace(x[i], x[i + 1], n_inner_points + 1, endpoint=False)\n for i in np.arange(len(x) - 1)\n ]\n test_arr.append([x[-1]])\n return np.concatenate(test_arr)\n\n\ndef from_sample_cdf_max_error(x):\n rv = Cont.from_sample(x)\n density = config.estimator_cont(x)\n\n x_grid = augment_grid(rv.x, 10)\n\n # Efficient way of computing `quad(density, -np.inf, x_grid)`\n x_grid_ext = np.concatenate([[-np.inf], x_grid])\n cdf_intervals = np.array(\n [\n quad(density, x_l, x_r)[0]\n for x_l, x_r in zip(x_grid_ext[:-1], x_grid_ext[1:])\n ]\n )\n cdf_grid = np.cumsum(cdf_intervals)\n\n err = cdf_grid - rv.cdf(x_grid)\n return np.max(np.abs(err))\n\n\ndef circle_fun(x, low, high):\n x = np.array(x)\n center = 0.5 * (high + low)\n radius = 0.5 * (high - low)\n\n res = np.zeros_like(x)\n\n center_dist = np.abs(x - center)\n is_in = center_dist <= radius\n res[is_in] = np.sqrt(radius ** 2 - center_dist[is_in] ** 2)\n\n return res\n\n\ndef make_circ_density(intervals):\n \"\"\"Construct circular density\n\n Density looks like half-circles with diameters lying in elements of\n `intervals`. Total integral is equal to 1.\n\n Parameters\n ----------\n intervals : iterable with elements being 2-element iterables\n Iterable of intervals with non-zero density.\n\n Returns\n -------\n density : callable\n Function which returns density values.\n \"\"\"\n\n def density(x):\n res = np.zeros_like(x)\n tot_integral = 0\n for low, high in intervals:\n res += circle_fun(x, low, high)\n # There is only half of circle\n tot_integral += np.pi * (high - low) ** 2 / 8\n\n return res / tot_integral\n\n return density\n\n\nclass TestCont:\n \"\"\"Regression tests for `Cont` class\"\"\"\n\n def test_init_errors(self):\n def check_one_input(def_args, var):\n with pytest.raises(TypeError, match=f\"`{var}`.*numpy array\"):\n def_args[var] = {\"a\": None}\n Cont(**def_args)\n with pytest.raises(TypeError, match=f\"`{var}`.*float\"):\n def_args[var] = [\"a\", \"a\"]\n Cont(**def_args)\n with pytest.raises(TypeError, match=f\"`{var}`.*finite values\"):\n def_args[var] = [0, np.nan]\n Cont(**def_args)\n with pytest.raises(TypeError, match=f\"`{var}`.*finite values\"):\n def_args[var] = [0, np.inf]\n Cont(**def_args)\n with pytest.raises(ValueError, match=f\"`{var}`.*1d array\"):\n def_args[var] = [[0, 1]]\n Cont(**def_args)\n\n check_one_input({\"y\": [1, 1]}, \"x\")\n check_one_input({\"x\": [0, 1]}, \"y\")\n\n with pytest.raises(ValueError, match=\"[Ll]engths.*match\"):\n Cont([0, 1], [1, 1, 1])\n\n with pytest.raises(ValueError, match=\"two\"):\n Cont([1], [1])\n\n with pytest.warns(UserWarning, match=\"`x`.*not sorted.*`x` and `y`\"):\n rv = Cont([1, 0], [0, 2])\n rv_ref = Cont([0, 1], [2, 0])\n _test_equal_rand(rv, rv_ref)\n\n with pytest.raises(ValueError, match=\"`y`.*negative\"):\n Cont([0, 1], [1, -1])\n\n with pytest.raises(ValueError, match=\"`y`.*no positive\"):\n Cont([0, 1], [0, 0])\n\n def test_init(self):\n x_ref = np.array([0, 1, 2])\n y_ref = np.array([0, 1, 0])\n rv_ref = Cont(x_ref, y_ref)\n\n # Simple case with non-numpy input\n rv_1 = Cont(x=x_ref.tolist(), y=y_ref.tolist())\n _test_equal_rand(rv_1, rv_ref)\n\n # Check if `y` is normalized\n rv_2 = Cont(x=x_ref, y=10 * y_ref)\n _test_equal_rand(rv_2, rv_ref)\n\n # Check if `x` and `y` are rearranged if not sorted\n with pytest.warns(UserWarning, match=\"`x`.*not sorted\"):\n rv_3 = Cont(x=x_ref[[1, 0, 2]], y=y_ref[[1, 0, 2]])\n _test_equal_rand(rv_3, rv_ref)\n\n # Check if duplicated values are removed from `x`\n with pytest.warns(UserWarning, match=\"duplicated\"):\n # First pair of xy-grid is taken among duplicates\n rv_4 = Cont(x=x_ref[[0, 1, 1, 2]], y=y_ref[[0, 1, 2, 2]])\n _test_equal_rand(rv_4, rv_ref)\n\n def test_str(self):\n rv = Cont([0, 2, 4], [0, 1, 0])\n assert str(rv) == \"Continuous RV with 2 intervals (support: [0.0, 4.0])\"\n\n # Uses singular noun with one interval\n rv = Cont([0, 1], [1, 1])\n assert str(rv) == \"Continuous RV with 1 interval (support: [0.0, 1.0])\"\n\n def test_properties(self):\n x = np.arange(11)\n y = np.repeat(0.1, 11)\n rv = Cont(x, y)\n\n assert list(rv.params.keys()) == [\"x\", \"y\"]\n assert_array_equal(rv.params[\"x\"], x)\n assert_array_equal(rv.params[\"y\"], y)\n\n assert_array_equal(rv.x, x)\n assert_array_equal(rv.y, y)\n assert rv.a == 0.0\n assert rv.b == 10.0\n\n def test_support(self):\n rv = Cont([0.5, 1.5, 4.5], [0, 0.5, 0])\n assert rv.support() == (0.5, 4.5)\n\n def test_compress(self):\n # Zero tails\n ## Left tail\n _test_equal_rand(\n Cont([0, 1, 2, 3], [0, 0, 0, 2]).compress(), Cont([2, 3], [0, 2])\n )\n _test_equal_rand(\n Cont([0, 1, 2, 3], [0, 0, 1, 0]).compress(), Cont([1, 2, 3], [0, 1, 0])\n )\n\n ## Right tail\n _test_equal_rand(\n Cont([0, 1, 2, 3], [2, 0, 0, 0]).compress(), Cont([0, 1], [2, 0])\n )\n _test_equal_rand(\n Cont([0, 1, 2, 3], [0, 1, 0, 0]).compress(), Cont([0, 1, 2], [0, 1, 0])\n )\n\n ## Both tails\n _test_equal_rand(\n Cont([0, 1, 2, 3, 4], [0, 0, 1, 0, 0]).compress(),\n Cont([1, 2, 3], [0, 1, 0]),\n )\n\n # Extra linearity\n ## Non-zero slope\n _test_equal_rand(\n Cont([0, 1, 2, 3, 4], [0.5, 0.25, 0, 0.25, 0.5]).compress(),\n Cont([0, 2, 4], [0.5, 0, 0.5]),\n )\n\n ## Zero slope, non-zero y\n _test_equal_rand(\n Cont([0, 1, 2], [0.5, 0.5, 0.5]).compress(), Cont([0, 2], [0.5, 0.5])\n )\n\n ## Zero slope, zero y, outside of tails\n _test_equal_rand(\n Cont([0, 1, 2, 3, 4], [1, 0, 0, 0, 1]).compress(),\n Cont([0, 1, 3, 4], [1, 0, 0, 1]),\n )\n\n # All features\n _test_equal_rand(\n Cont(np.arange(14), [0, 0, 0, 1, 2, 2, 2, 1, 0, 0, 0, 1, 0, 0]).compress(),\n Cont([2, 4, 6, 8, 10, 11, 12], [0, 2, 2, 0, 0, 1, 0]),\n )\n\n # If nothing to compress, self should be returned\n rv = Cont([0, 1], [1, 1])\n assert rv.compress() is rv\n\n def test_ground(self):\n w = config.small_width\n\n # Basic usage\n rv = Cont([0, 1], [1, 1])\n _test_equal_rand(\n rv.ground(), Cont([-w, 0, w, 1 - w, 1, 1 + w], [0, 0.5, 1, 1, 0.5, 0])\n )\n\n # Argument `direction`\n _test_equal_rand(\n rv.ground(direction=\"both\"),\n Cont([-w, 0, w, 1 - w, 1, 1 + w], [0, 0.5, 1, 1, 0.5, 0]),\n )\n _test_equal_rand(\n rv.ground(direction=\"left\"), Cont([-w, 0, w, 1], [0, 0.5, 1, 1])\n )\n _test_equal_rand(\n rv.ground(direction=\"right\"), Cont([0, 1 - w, 1, 1 + w], [1, 1, 0.5, 0])\n )\n _test_equal_rand(rv.ground(direction=\"none\"), rv)\n\n # Argument `w`\n w2 = 0.1\n _test_equal_rand(\n rv.ground(w=w2, direction=\"both\"),\n Cont([-w2, 0, w2, 1 - w2, 1, 1 + w2], [0, 0.5, 1, 1, 0.5, 0]),\n )\n\n # Close neighbors\n rv2 = Cont([0, 0.25 * w, 0.5, 1 - 0.1 * w, 1], [1, 1, 1, 1, 1])\n rv2_grounded = rv2.ground(direction=\"both\")\n ## Check that only outer points were added\n assert_array_equal(rv2_grounded.x[1:-1], rv2.x)\n ## Check that grounded actually happend\n assert_array_equal(rv2_grounded.y[[0, -1]], 0.0)\n ## Check that non-edge x-values havae same y-values\n assert_array_equal(rv2_grounded.pdf(rv2.x[1:-1]), rv2.pdf(rv2.x[1:-1]))\n\n def test_ground_options(self):\n rv = Cont([0, 1], [1, 1])\n with config.context({\"small_width\": 0.1}):\n w = config.small_width\n _test_equal_rand(\n rv.ground(), Cont([-w, 0, w, 1 - w, 1, 1 + w], [0, 0.5, 1, 1, 0.5, 0])\n )\n\n def test_ground_errors(self):\n rv = Cont([0, 1], [1, 1])\n with pytest.raises(ValueError, match=\"one of\"):\n rv.ground(direction=\"aaa\")\n\n def test__coeffs_by_ind(self):\n # All coefficients are returned if no `ind` is specified\n rv = Cont([0, 1, 2], [0, 1, 0])\n inter, slope = rv._coeffs_by_ind()\n assert_array_equal(inter, [0, 2])\n assert_array_equal(slope, [1, -1])\n\n def test__grid_by_ind(self):\n # All grid elements are returned if no `ind` is specified\n rv = Cont([0, 1, 2], [0, 1, 0])\n x_out, y_out, p_out = rv._grid_by_ind()\n x_ref, y_ref = rv.x, rv.y\n assert_array_equal(x_out, x_ref)\n assert_array_equal(y_out, y_ref)\n\n def test_pdf_coeffs(self):\n rv = Cont([0, 1, 2], [0, 1, 0])\n x = np.array([-1, 0, 0.5, 1, 1.5, 2, 2.5])\n\n with pytest.raises(ValueError, match=\"one of\"):\n rv.pdf_coeffs(x, side=\"a\")\n\n _test_equal_seq(\n rv.pdf_coeffs(x),\n (np.array([0, 0, 0, 2, 2, 2, 0]), np.array([0, 1, 1, -1, -1, -1, 0])),\n )\n _test_equal_seq(\n rv.pdf_coeffs(x, side=\"left\"),\n (np.array([0, 0, 0, 0, 2, 2, 0]), np.array([0, 1, 1, 1, -1, -1, 0])),\n )\n _test_equal_seq(\n rv.pdf_coeffs(np.array([-np.inf, np.nan, np.inf])),\n (np.array([0, np.nan, 0]), np.array([0, np.nan, 0])),\n )\n\n def test_from_rv_basic(self):\n uniform = distrs.uniform\n norm = distrs.norm\n\n # Basic usage\n rv_unif = Cont.from_rv(uniform)\n rv_unif_test = Cont(x=[0, 1], y=[1, 1])\n _test_equal_rand(rv_unif, rv_unif_test, decimal=DECIMAL)\n\n # Objects of `Rand` class should be `convert()`ed\n _test_from_rv_rand(cls=Cont, to_class=\"Cont\")\n\n # Forced support edges\n rv_right = Cont.from_rv(uniform, supp=(0.5, None))\n rv_right_test = Cont([0.5, 1], [2, 2])\n _test_equal_rand(rv_right, rv_right_test, decimal=DECIMAL)\n\n rv_left = Cont.from_rv(uniform, supp=(None, 0.5))\n rv_left_test = Cont([0, 0.5], [2, 2])\n _test_equal_rand(rv_left, rv_left_test, decimal=DECIMAL)\n\n rv_mid = Cont.from_rv(uniform, supp=(0.25, 0.75))\n rv_mid_test = Cont([0.25, 0.75], [2, 2])\n _test_equal_rand(rv_mid, rv_mid_test, decimal=DECIMAL)\n\n def test_from_rv_errors(self):\n # Absence of either `cdf` or `ppf` method should result intro error\n class Tmp:\n pass\n\n tmp1 = Tmp()\n tmp1.ppf = lambda x: np.where((0 <= x) & (x <= 1), 1, 0)\n with pytest.raises(ValueError, match=\"cdf\"):\n Cont.from_rv(tmp1)\n\n tmp2 = Tmp()\n tmp2.cdf = lambda x: np.where((0 <= x) & (x <= 1), 1, 0)\n with pytest.raises(ValueError, match=\"ppf\"):\n Cont.from_rv(tmp2)\n\n def test_from_rv_options(self):\n norm = distrs.norm\n\n # Finite support detection and usage of `small_prob` option\n with config.context({\"small_prob\": 1e-6}):\n rv_norm = Cont.from_rv(norm)\n assert_array_almost_equal(\n rv_norm.support(), norm.ppf([1e-6, 1 - 1e-6]), decimal=DECIMAL\n )\n\n with config.context({\"small_prob\": 1e-6}):\n rv_norm_right = Cont.from_rv(norm, supp=(-1, None))\n assert_array_almost_equal(\n rv_norm_right.support(), [-1, norm.ppf(1 - 1e-6)], decimal=DECIMAL\n )\n\n with config.context({\"small_prob\": 1e-6}):\n rv_norm_left = Cont.from_rv(norm, supp=(None, 1))\n assert_array_almost_equal(\n rv_norm_left.support(), [norm.ppf(1e-6), 1], decimal=DECIMAL\n )\n\n # Usage of `n_grid` option\n with config.context({\"n_grid\": 11}):\n rv_norm_small = Cont.from_rv(norm)\n assert len(rv_norm_small.x) <= 20\n\n # Usage of `cdf_tolerance` option\n with config.context({\"cdf_tolerance\": 1e-4}):\n rv_norm_1 = Cont.from_rv(norm)\n with config.context({\"cdf_tolerance\": 1e-1}):\n rv_norm_2 = Cont.from_rv(norm)\n ## Increasing CDF tolerance should lead to decrease of density grid\n assert len(rv_norm_1.x) > len(rv_norm_2.x)\n\n def test_from_sample_basic(self):\n norm = distrs.norm()\n\n rng = np.random.default_rng(101)\n x = norm.rvs(100, random_state=rng)\n rv = Cont.from_sample(x)\n assert isinstance(rv, Cont)\n\n def test_from_sample_errors(self):\n with pytest.raises(TypeError, match=\"numpy array with float\"):\n Cont.from_sample([\"a\"])\n\n with pytest.raises(ValueError, match=\"1d\"):\n Cont.from_sample([[1], [2]])\n\n def test_from_sample_options(self):\n norm = distrs.norm()\n\n rng = np.random.default_rng(101)\n x = norm.rvs(100, random_state=rng)\n\n # \"estimator_cont\"\n def uniform_estimator(x):\n x_min, x_max = x.min(), x.max()\n\n def res(x):\n return np.where((x >= x_min) & (x <= x_max), 1 / (x_max - x_min), 0)\n\n return res\n\n with config.context({\"estimator_cont\": uniform_estimator}):\n rv = Cont.from_sample(x)\n assert len(rv.y) == 2\n assert np.allclose(rv.y, rv.y[0], atol=1e-13)\n\n # \"estimator_cont\" which returns allowed classes\n ## `Rand` class should be forwarded to `from_rv()` method\n _test_from_sample_rand(\n cls=Cont,\n sample=x,\n estimator_option=\"estimator_cont\",\n )\n\n ## \"Scipy\" distribution should be forwarded to `Cont.from_rv()`\n rv_norm = distrs.norm()\n with config.context({\"estimator_cont\": lambda x: rv_norm}):\n rv = Cont.from_sample(np.asarray([0, 1, 2]))\n rv_ref = Cont.from_rv(rv_norm)\n _test_equal_rand(rv, rv_ref)\n\n # \"density_mincoverage\"\n with config.context({\"density_mincoverage\": 0.0}):\n rv = Cont.from_sample(x)\n ## With minimal density mincoverage output range should be equal to\n ## sample range\n assert_array_equal(rv.x[[0, -1]], [x.min(), x.max()])\n\n # \"n_grid\"\n with config.context({\"n_grid\": 11}):\n rv = Cont.from_sample(x)\n assert len(rv.x) <= 22\n\n # \"cdf_tolerance\"\n with config.context({\"cdf_tolerance\": 2.0}):\n rv = Cont.from_sample(x)\n ## With very high CDF tolerance downgridding should result into grid\n ## with three elements. That is because CDF is approximated with\n ## simplest quadratic spline with single segment. That requires three\n ## knots.\n assert len(rv.x) == 3\n\n @pytest.mark.slow\n def test_from_sample_single_value(self):\n \"\"\"How well `from_sample()` handles single unique value in sample\n\n Main problem here is how density range is initialized during estimation.\n \"\"\"\n\n zero_vec = np.zeros(10)\n\n # Default density estimator can't handle situation with single unique\n # sample value (gives `LinAlgError: singular matrix`).\n\n # Case when sample width is zero but density is not zero\n density_centered_interval = make_circ_density([(-1, 1)])\n with config.context({\"estimator_cont\": lambda x: density_centered_interval}):\n assert from_sample_cdf_max_error(zero_vec) <= 1e-4\n\n # Case when both sample width and density are zero\n density_shifted_interval = make_circ_density([(10, 20)])\n with config.context({\"estimator_cont\": lambda x: density_shifted_interval}):\n # Here currently the problem is that support is estimated way to\n # wide with very small (~1e-9) non-zero density outside of [10,\n # 20]. However, CDFs are still close.\n assert from_sample_cdf_max_error(zero_vec) <= 2e-4\n\n def test_pdf(self):\n rv = Cont([0, 1, 3], [0.5, 0.5, 0])\n\n # Regular checks\n x = np.array([-1, 0, 0.5, 1, 2, 3, 4])\n assert_array_equal(rv.pdf(x), np.array([0, 0.5, 0.5, 0.5, 0.25, 0, 0]))\n\n # Coercion of not ndarray input\n _test_input_coercion(rv.pdf, x)\n\n # Input around edges\n x = np.array([0 - 1e-10, 0 + 1e-10, 3 - 1e-10, 3 + 1e-10])\n assert_array_almost_equal(\n rv.pdf(x), np.array([0, 0.5, 0.25e-10, 0]), decimal=DECIMAL\n )\n\n # Bad input\n x = np.array([-np.inf, np.nan, np.inf])\n assert_array_equal(rv.pdf(x), np.array([0, np.nan, 0]))\n\n # Dirac-like random variable\n rv_dirac = Cont([10 - h, 10, 10 + h], [0, 1, 0])\n x = np.array([10 - h, 10 - 0.5e-8, 10, 10 + 0.5e-8, 10 + h])\n ## Accuracy is of order of 10 due to extreme magnitudes of values\n assert_array_almost_equal(\n rv_dirac.pdf(x), np.array([0, 0.5e8, 1e8, 0.5e8, 0]), decimal=-1\n )\n\n # Broadcasting\n x = np.array([[-1, 0.5], [2, 4]])\n assert_array_equal(rv.pdf(x), np.array([[0.0, 0.5], [0.25, 0.0]]))\n\n # One value input\n _test_one_value_input(rv.pdf, 0.5)\n _test_one_value_input(rv.pdf, -1)\n _test_one_value_input(rv.pdf, np.nan)\n\n def test_logpdf(self):\n rv = Cont([0, 1, 3], [0.5, 0.5, 0])\n _test_log_fun(rv.logpdf, rv.pdf, x_ref=[-1, 0.1, 3, np.inf, np.nan])\n\n def test_pmf(self):\n rv = Cont([0, 1, 3], [0.5, 0.5, 0])\n with pytest.raises(AttributeError, match=r\"Use `pdf\\(\\)`\"):\n rv.pmf(0)\n\n def test_logpmf(self):\n rv = Cont([0, 1, 3], [0.5, 0.5, 0])\n with pytest.raises(AttributeError, match=r\"Use `logpdf\\(\\)`\"):\n rv.logpmf(0)\n\n def test_cdf(self):\n rv_1 = Cont([0, 1, 2], [0, 1, 0])\n\n # Regular checks\n x = np.array([-1, 0, 0.5, 1, 1.5, 2, 3])\n assert_array_equal(rv_1.cdf(x), np.array([0, 0, 0.125, 0.5, 0.875, 1, 1]))\n\n # Coercion of not ndarray input\n _test_input_coercion(rv_1.cdf, x)\n\n # Bad input\n x = np.array([-np.inf, np.nan, np.inf])\n assert_array_equal(rv_1.cdf(x), np.array([0, np.nan, 1]))\n\n # Dirac-like random variable\n rv_dirac = Cont([10 - h, 10, 10 + h], [0, 1, 0])\n x = np.array([10 - h, 10 - 0.5e-8, 10, 10 + 0.5e-8, 10 + h])\n assert_array_almost_equal(\n rv_dirac.cdf(x), np.array([0, 0.125, 0.5, 0.875, 1]), decimal=DECIMAL\n )\n\n # Broadcasting\n x = np.array([[-1, 0.5], [2, 4]])\n assert_array_equal(rv_1.cdf(x), np.array([[0.0, 0.125], [1.0, 1.0]]))\n\n # One value input\n _test_one_value_input(rv_1.cdf, 0.5)\n _test_one_value_input(rv_1.cdf, -1)\n _test_one_value_input(rv_1.cdf, np.nan)\n\n def test_logcdf(self):\n rv = Cont([0, 1, 3], [0.5, 0.5, 0])\n _test_log_fun(rv.logcdf, rv.cdf, x_ref=[-1, 0.1, 3, np.inf, np.nan])\n\n def test_sf(self):\n rv = Cont([0, 1, 3], [0.5, 0.5, 0])\n x_ref = [-1, 0.1, 3, np.inf, np.nan]\n assert_array_equal(rv.sf(x_ref), 1 - rv.cdf(x_ref))\n\n def test_logsf(self):\n rv = Cont([0, 1, 3], [0.5, 0.5, 0])\n _test_log_fun(rv.logsf, rv.sf, x_ref=[-1, 0.1, 3, np.inf, np.nan])\n\n def test_ppf(self):\n # `ppf()` method should be inverse to `cdf()` for every sensible input\n rv_1 = Cont([0, 1, 2], [0, 1, 0])\n\n # Regular checks\n q = np.array([0, 0.125, 0.5, 0.875, 1])\n assert_array_equal(rv_1.ppf(q), np.array([0, 0.5, 1, 1.5, 2]))\n\n # Coercion of not ndarray input\n _test_input_coercion(rv_1.ppf, q)\n\n # Bad input\n q = np.array([-np.inf, -h, np.nan, 1 + h, np.inf])\n assert_array_equal(\n rv_1.ppf(q), np.array([np.nan, np.nan, np.nan, np.nan, np.nan])\n )\n\n # Dirac-like random variable\n rv_dirac = Cont([10 - h, 10, 10 + h], [0, 1, 0])\n q = np.array([0, 0.125, 0.5, 0.875, 1])\n assert_array_almost_equal(\n rv_dirac.ppf(q),\n np.array([10 - h, 10 - 0.5e-8, 10, 10 + 0.5e-8, 10 + h]),\n decimal=DECIMAL,\n )\n\n # Broadcasting\n q = np.array([[0, 0.5], [0.0, 1.0]])\n assert_array_equal(rv_1.ppf(q), np.array([[0.0, 1.0], [0.0, 2.0]]))\n\n # One value input\n _test_one_value_input(rv_1.ppf, 0.25)\n _test_one_value_input(rv_1.ppf, -1)\n _test_one_value_input(rv_1.ppf, np.nan)\n\n # Should return the smallest x-value in case of zero-density interval(s)\n rv_zero_density = Cont([0, 1, 2, 3, 4, 5, 6], [0, 0.5, 0, 0, 0, 0.5, 0])\n assert rv_zero_density.ppf(0.5) == 2\n\n def test_isf(self):\n rv = Cont([0, 1, 2], [0, 1, 0])\n\n # Regular checks\n q_ref = np.array([0, 0.125, 0.5, 0.875, 1])\n assert_array_equal(rv.sf(rv.isf(q_ref)), q_ref)\n\n def test_rvs(self):\n rv_1 = Cont([0, 1, 2], [0, 1, 0])\n\n _test_rvs_method(rv_1)\n\n def test__cdf_spline(self):\n rv = Cont([0, 1, 2], [0, 1, 0])\n x = [-10, 0, 0.5, 1, 1.5, 2, 10]\n assert_array_equal(rv._cdf_spline(x), rv.cdf(x))\n\n def test_integrate_cdf(self):\n rv = Cont([0, 1, 2], [0, 1, 0])\n assert np.allclose(rv.integrate_cdf(-10, 10), quad(rv.cdf, -10, 10)[0])\n\n def test_convert(self):\n import randomvars._boolean as bool\n import randomvars._discrete as disc\n import randomvars._mixture as mixt\n\n rv = Cont([0, 1, 2], [0, 1, 0])\n\n # By default and supplying `None` should return self\n assert rv.convert() is rv\n assert rv.convert(None) is rv\n\n # Converting to Bool should result into boolean with probability of\n # `False` being 0 (because probability of continuous RV being exactly\n # zero is 0).\n out_bool = rv.convert(\"Bool\")\n assert isinstance(out_bool, bool.Bool)\n assert out_bool.prob_true == 1.0\n\n # Converting to own class should return self\n out_cont = rv.convert(\"Cont\")\n assert out_cont is rv\n\n # Converting to Disc should result into discrete RV with the same `x`\n # values as in input's xy-grid\n out_disc = rv.convert(\"Disc\")\n assert isinstance(out_disc, disc.Disc)\n assert_array_equal(out_disc.x, rv.x)\n\n # Converting to Mixt should result into degenerate mixture with only\n # continuous component\n out_mixt = rv.convert(\"Mixt\")\n assert isinstance(out_mixt, mixt.Mixt)\n assert out_mixt.cont is rv\n assert out_mixt.weight_cont == 1.0\n\n # Any other target class should result into error\n with pytest.raises(ValueError, match=\"one of\"):\n rv.convert(\"aaa\")\n\n\nclass TestFromRVAccuracy:\n \"\"\"Accuracy of `Cont.from_rv()`\"\"\"\n\n # Output of `from_rv()` should have CDF that differs from original CDF by\n # no more than `thres`\n @pytest.mark.slow\n @pytest.mark.parametrize(\n \"distr_dict,thres\",\n [\n (DISTRIBUTIONS_COMMON, 1e-4),\n (DISTRIBUTIONS_INF_DENSITY, 1e-3),\n (DISTRIBUTIONS_HEAVY_TAILS, 5e-3),\n ],\n )\n def test_cdf_maxerror(self, distr_dict, thres):\n test_passed = {\n name: TestFromRVAccuracy.from_rv_cdf_maxerror(distr) <= thres\n for name, distr in distr_dict.items()\n }\n\n assert all(test_passed.values())\n\n def test_detected_support(self):\n \"\"\"Test correct trimming of zero tails\"\"\"\n rv_ref = Cont([0, 1, 2, 3, 4], [0, 0, 1, 0, 0])\n rv_out = Cont.from_rv(declass(rv_ref))\n _test_equal_rand(rv_out, rv_ref.compress(), decimal=4)\n\n @staticmethod\n def from_rv_cdf_maxerror(rv_base, n_inner_points=10, **kwargs):\n rv_test = Cont.from_rv(rv_base, **kwargs)\n x_grid = augment_grid(rv_test.x, n_inner_points)\n err = rv_base.cdf(x_grid) - rv_test.cdf(x_grid)\n return np.max(np.abs(err))\n\n\nclass TestFromSampleAccuracy:\n \"\"\"Accuracy of `Cont.from_sample()`\"\"\"\n\n # Output of `from_sample()` should differ from original density estimate by\n # no more than `thres` (with default density estimator)\n @pytest.mark.slow\n @pytest.mark.parametrize(\n \"distr_dict,thres\",\n [\n (DISTRIBUTIONS_COMMON, 1e-4),\n (DISTRIBUTIONS_INF_DENSITY, 1.5e-4),\n (DISTRIBUTIONS_HEAVY_TAILS, 1e-4),\n ],\n )\n def test_close_cdf(self, distr_dict, thres):\n rng = np.random.default_rng(101)\n test_passed = {\n name: TestFromSampleAccuracy.simulated_cdf_error(distr, rng) <= thres\n for name, distr in distr_dict.items()\n }\n\n assert all(test_passed.values())\n\n @pytest.mark.slow\n def test_density_range(self):\n density_mincoverage = config.density_mincoverage\n estimator_cont = config.estimator_cont\n rng = np.random.default_rng(101)\n\n def generate_density_coverage(distr):\n x = distr.rvs(size=100, random_state=rng)\n density = estimator_cont(x)\n rv = Cont.from_sample(x)\n return quad(density, rv.x[0], rv.x[-1])[0]\n\n test_passed = {\n distr_name: generate_density_coverage(distr) >= density_mincoverage\n for distr_name, distr in DISTRIBUTIONS.items()\n }\n\n assert all(test_passed.values())\n\n @staticmethod\n def simulated_cdf_error(distr, rng):\n x = distr.rvs(size=100, random_state=rng)\n\n # Testing with `gaussian_kde` as the most used density estimator. This\n # also enables to use rather fast way of computing CDF of estimated\n # density via `integrate_box_1d` method.\n with config.context({\"estimator_cont\": gaussian_kde}):\n rv = Cont.from_sample(x)\n density = config.estimator_cont(x)\n\n x_grid = augment_grid(rv.x, 10)\n\n # Interestingly enough, direct computation with `-np.inf` as left\n # integration limit is both accurate and more efficient than computing\n # integrals for each segment and then use `np.cumsum()`. Probably this\n # is because integration of gaussian curves with infinite left limit is\n # done directly through gaussian CDF.\n cdf_grid = np.array(\n [density.integrate_box_1d(-np.inf, cur_x) for cur_x in x_grid]\n )\n\n err = cdf_grid - rv.cdf(x_grid)\n return np.max(np.abs(err))\n\n\ndef test__extend_range():\n def extra_estimator(x):\n x_min, x_max = x.min(), x.max()\n prob_height = 1 / (x_max - x_min + 1)\n\n def res(x):\n return np.where(\n ((x_min < x) & (x < x_max)) | ((x_max + 1 < x) & (x < x_max + 2)),\n prob_height,\n 0,\n )\n\n return res\n\n norm = distrs.norm()\n rng = np.random.default_rng(101)\n x = norm.rvs(100, random_state=rng)\n\n with config.context({\"estimator_cont\": extra_estimator}):\n rv = Cont.from_sample(x)\n\n assert (rv.x[0] <= x.min()) and (rv.x[-1] >= x.max())\n",
"import numpy as np\nfrom scipy.stats.distributions import norm, beta\nfrom scipy.integrate import quad\nimport matplotlib.pyplot as plt\n\nfrom randomvars import Cont\nfrom randomvars.options import config\n\n# %% `from_sample()` from `Cont`\ndef sklearn_estimator_cont(*args, **kwargs):\n from sklearn.neighbors import KernelDensity\n\n def estimator_cont(x):\n dens = KernelDensity(*args, **kwargs)\n dens.fit(x.reshape(-1, 1))\n\n def res(x):\n x = np.asarray(x).reshape(-1, 1)\n return np.exp(dens.score_samples(x))\n\n return res\n\n return estimator_cont\n\n\ndef statsmodels_estimator_cont(*args, **kwargs):\n import statsmodels.api as sm\n\n def estimator_cont(x):\n density_class = sm.nonparametric.KDEUnivariate(x)\n density_class.fit()\n\n def res(x):\n return density_class.evaluate(x)\n\n return res\n\n return estimator_cont\n\n\ndef describe_output(rv, sample, name):\n estimator_cont = config.estimator_cont\n density = estimator_cont(sample)\n integral = quad(density, rv.x[0], rv.x[-1])[0]\n print(\n f\"\"\"\n {name}:\n Grid number of elements = {len(rv.x)}\n Integral coverage = {integral}\n Density range = {rv.x[0], rv.x[-1]}\n \"\"\"\n )\n\n\nnp.random.seed(101)\n\n# x = norm().rvs(size=10000)\nx = np.concatenate([norm().rvs(size=500), norm(loc=100).rvs(size=500)])\n# x = np.concatenate([norm().rvs(size=50), norm(loc=100).rvs(size=50)])\n# x = beta(a=99, b=1).rvs(size=10000)\n# beta1 = beta(a=10, b=20)\n# beta2 = beta(a=40, b=10)\n# x = np.concatenate([beta1.rvs(size=500), beta2.rvs(size=500)])\n# true_pdf = lambda x: 0.5 * beta1.pdf(x) + 0.5 * beta2.pdf(x)\n\nconfig.reset(\"estimator_cont\")\nrv_scipy = Cont.from_sample(x)\ndescribe_output(rv_scipy, x, \"Scipy\")\n\nwith config.context({\"estimator_cont\": sklearn_estimator_cont()}):\n rv_sklearn = Cont.from_sample(x)\n describe_output(rv_sklearn, x, \"Sklearn\")\n\nwith config.context({\"estimator_cont\": statsmodels_estimator_cont()}):\n rv_statsmodels = Cont.from_sample(x)\n describe_output(rv_statsmodels, x, \"Statsmodels\")\n\nplt.plot(rv_scipy.x, rv_scipy.y, \"-k\")\nplt.plot(rv_sklearn.x, rv_sklearn.y, \"-b\")\nplt.plot(rv_statsmodels.x, rv_statsmodels.y, \"-m\")\n# plt.plot(rv_scipy.x, true_pdf(rv_scipy.x), \"-r\")\nplt.show()\n\n\n# %% Stress testing\nimport time\n\nimport scipy.stats.distributions as distrs\n\n\nDISTRIBUTIONS = {\n # Common distributions\n \"beta\": distrs.beta(a=10, b=20),\n \"chi_sq\": distrs.chi2(df=10),\n \"expon\": distrs.expon(),\n \"f\": distrs.f(dfn=20, dfd=20),\n \"gamma\": distrs.gamma(a=10),\n \"lognorm\": distrs.lognorm(s=0.5),\n \"norm\": distrs.norm(),\n \"norm2\": distrs.norm(loc=10),\n \"norm3\": distrs.norm(scale=0.1),\n \"norm4\": distrs.norm(scale=10),\n \"norm5\": distrs.norm(loc=10, scale=0.1),\n \"t\": distrs.t(df=10),\n \"uniform\": distrs.uniform(),\n \"uniform2\": distrs.uniform(loc=10, scale=0.1),\n \"weibull_max\": distrs.weibull_max(c=2),\n \"weibull_min\": distrs.weibull_min(c=2),\n # Distributions with infinite density\n \"inf_beta_both\": distrs.beta(a=0.4, b=0.6),\n \"inf_beta_left\": distrs.beta(a=0.5, b=2),\n \"inf_beta_right\": distrs.beta(a=2, b=0.5),\n \"inf_chi_sq\": distrs.chi2(df=1),\n \"inf_weibull_max\": distrs.weibull_max(c=0.5),\n \"inf_weibull_min\": distrs.weibull_min(c=0.5),\n # Distributions with heavy tails\n \"heavy_cauchy\": distrs.cauchy(),\n \"heavy_lognorm\": distrs.lognorm(s=1),\n \"heavy_t\": distrs.t(df=2),\n}\ndistr_names = np.array(list(DISTRIBUTIONS.keys()))\n\n\ndef test_from_sample_accuracy(rng, low, high):\n distr_name = rng.choice(distr_names, size=1)[0]\n size = rng.integers(low=low, high=high, size=1)[0]\n x = DISTRIBUTIONS[distr_name].rvs(size=size, random_state=rng)\n\n time_start = time.time()\n rv = Cont.from_sample(x)\n time_end = time.time()\n\n density = config.estimator_cont(x)\n max_diff = np.max(np.abs(density(rv.x) - rv.y)) * 10 ** 5\n\n print(\n f\"Distr: {distr_name:15} Sample size: {len(x):3} \"\n f\"Grid size: {len(rv.x):4} Max. diff.: {max_diff:3.0f}e-5 \"\n f\"Duration: {(time_end-time_start)*1000:4.0f} ms\"\n )\n\n\nrng = np.random.default_rng(1001)\n\nfor _ in range(100):\n test_from_sample_accuracy(rng, low=2, high=10)\n\nfor _ in range(100):\n test_from_sample_accuracy(rng, low=100, high=1001)\n\nfor _ in range(100):\n test_from_sample_accuracy(rng, low=1001, high=1002)\n\nfor _ in range(100):\n test_from_sample_accuracy(rng, low=10001, high=10002)\n"
] | [
[
"numpy.sqrt",
"numpy.linspace",
"scipy.stats.distributions.weibull_min",
"numpy.asarray",
"scipy.stats.distributions.f",
"numpy.cumsum",
"numpy.concatenate",
"scipy.stats.distributions.norm",
"numpy.zeros_like",
"scipy.stats.distributions.t",
"scipy.stats.distributions.weibull_max",
"numpy.where",
"numpy.random.default_rng",
"scipy.stats.distributions.gamma",
"numpy.allclose",
"numpy.arange",
"scipy.stats.distributions.expon",
"numpy.repeat",
"numpy.zeros",
"scipy.stats.distributions.laplace",
"scipy.stats.distributions.beta",
"scipy.integrate.quad",
"numpy.array",
"scipy.stats.distributions.lognorm",
"numpy.abs",
"scipy.stats.distributions.cauchy",
"numpy.testing.assert_array_equal",
"scipy.stats.distributions.uniform",
"scipy.stats.distributions.chi2"
],
[
"scipy.stats.distributions.weibull_min",
"numpy.asarray",
"scipy.stats.distributions.f",
"matplotlib.pyplot.plot",
"scipy.stats.distributions.norm",
"scipy.stats.distributions.t",
"scipy.stats.distributions.weibull_max",
"numpy.random.default_rng",
"scipy.stats.distributions.gamma",
"scipy.stats.distributions.expon",
"scipy.stats.distributions.beta",
"scipy.integrate.quad",
"sklearn.neighbors.KernelDensity",
"matplotlib.pyplot.show",
"scipy.stats.distributions.lognorm",
"numpy.random.seed",
"scipy.stats.distributions.cauchy",
"scipy.stats.distributions.uniform",
"scipy.stats.distributions.chi2"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ChenShawn/Grad-Paper-Experiments | [
"00fe1142dae4077b197e99253cc5a4ab759db2ff"
] | [
"TD3/artest.py"
] | [
"import gym\nimport pybullet_envs\nfrom PIL import Image\nimport argparse\nimport numpy as np\nimport torch\nimport copy\nimport os\nfrom sklearn.preprocessing import normalize as Normalize\n\nfrom models import TD3, TD3_adv2\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(\"TESTING\")\n parser.add_argument('-p', \"--policy\", type=str, default='td3', help=\"td3/adv\")\n parser.add_argument('-e', \"--env\", type=str, default=\"LunarLanderContinuous-v2\", help=\"env name\")\n parser.add_argument('-n', \"--n-episodes\", type=int, default=10, help=\"number of episodes\")\n parser.add_argument(\"--mode\", type=str, default='nr', help=\"nr (default) / pr\")\n parser.add_argument(\"--train-seed\", type=int, default=1, help=\"random seed for training\")\n parser.add_argument(\"--test-seed\", type=int, default=1, help=\"random seed for testing\")\n parser.add_argument(\"--nr-delta\", type=float, default=0.0, help=\"delta for NR-MDP\") \n parser.add_argument(\"--pr-prob\", type=float, default=0.0, help=\"prob of PR-MDP\")\n parser.add_argument(\"--render\", action=\"store_true\", default=False)\n return parser.parse_args()\n\n\n\ndef get_policy(arglist, kwargs, max_action):\n\t# Initialize policy\n\tif arglist.policy == \"td3\":\n\t\t# Target policy smoothing is scaled wrt the action scale\n\t\tkwargs[\"policy_noise\"] = 0.0\n\t\tkwargs[\"noise_clip\"] = 0.0\n\t\tkwargs[\"policy_freq\"] = 2\n\t\tpolicy = TD3.TD3(**kwargs)\n\telif arglist.policy == \"OurDDPG\":\n\t\tpolicy = OurDDPG.DDPG(**kwargs)\n\telif arglist.policy == \"DDPG\":\n\t\tpolicy = DDPG.DDPG(**kwargs)\n\telif arglist.policy == 'adv':\n\t\tkwargs['alpha'] = 0.01\n\t\tkwargs['adv_epsilon'] = 0.01\n\t\tkwargs['logdir'] = f'./tensorboard/{arglist.policy}_{arglist.env}_{arglist.train_seed}/'\n\t\tpolicy = TD3_adv2.TD3(**kwargs)\n\telse:\n\t\traise NotImplementedError\n\treturn policy\n\n\ndef test(arglist):\n env_name = arglist.env\n random_seed = arglist.test_seed\n n_episodes = arglist.n_episodes\n lr = 0.002\n max_timesteps = 3000\n render = arglist.render\n \n filename = \"{}_{}_{}\".format(arglist.policy, env_name, arglist.train_seed)\n directory = \"./train/{}\".format(env_name)\n \n env = gym.make(env_name)\n state_dim = env.observation_space.shape[0]\n action_dim = env.action_space.shape[0]\n max_action = float(env.action_space.high[0])\n\n # Set random seed\n env.seed(random_seed)\n torch.manual_seed(random_seed)\n np.random.seed(random_seed)\n\n kwargs = {\n\t\t\"state_dim\": state_dim,\n\t\t\"action_dim\": action_dim,\n\t\t\"max_action\": max_action,\n\t\t\"discount\": 0.99,\n\t\t\"tau\": 0.005,\n \"policy_noise\": 0.001,\n \"noise_clip\": 1.0,\n \"policy_freq\": 2\n\t}\n policy = get_policy(arglist, kwargs, max_action)\n policy.load(os.path.join(directory, filename))\n \n total_reward_list = []\n for ep in range(1, n_episodes+1):\n ep_reward = 0.0\n state = env.reset()\n for t in range(max_timesteps):\n\n action = policy.select_action(state)\n if arglist.mode == 'nr':\n # use truncated gaussian noise for both nr-mdp and pr-mdp settings\n noise = np.random.normal(0.0, max_action, size=action.shape)\n noise = np.clip(noise, -max_action, max_action)\n adv_action = (1.0 - arglist.nr_delta) * action + arglist.nr_delta * noise\n elif arglist.mode == 'pr':\n adv_action = action\n if np.random.rand() < arglist.pr_prob:\n adv_action = np.random.normal(0.0, action_dim, size=action.shape)\n adv_action = np.clip(adv_action, -max_action, max_action)\n else:\n raise NotImplementedError('invalid mode')\n\n state, reward, done, _ = env.step(adv_action)\n ep_reward += reward\n if render:\n env.render()\n if done:\n break\n \n print('Episode: {}\\tReward: {}'.format(ep, int(ep_reward)))\n total_reward_list.append(ep_reward)\n ep_reward = 0.0\n env.close()\n return total_reward_list\n\n\n\nif __name__ == '__main__':\n args = parse_arguments()\n\n reward_list = test(args)\n\n reward_array = np.array(reward_list, dtype=np.float32)\n reward_mean = reward_array.mean()\n reward_half_std = reward_array.std() / 2.0\n loginfo = 'policy={} env={} load_seed={} random_seed={} mode={} pr-prob={} nr-delta={} result={}±{}'\n print(loginfo.format(args.policy, args.env, args.train_seed, args.test_seed, args.mode, args.pr_prob, args.nr_delta, reward_mean, reward_half_std))\n\n"
] | [
[
"numpy.random.seed",
"numpy.clip",
"torch.manual_seed",
"numpy.random.normal",
"numpy.random.rand",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cyanide1x/covid19-dashboard | [
"7da01c2477c0691caf869d7401587dc85cacef29"
] | [
"_notebooks/canada_voc.py"
] | [
"import pandas as pd\nimport plotly.express as px\n\nurl = 'https://health-infobase.canada.ca/src/data/covidLive/covid19-epiSummary-voc.csv' \n\nprov_dict = {\n\t\"AB\" : \"Alberta\",\n\t\"BC\" : \"British Columbia\",\n\t\"CA\" : \"Canada\",\n\t\"MB\" : \"Manitoba\",\t\n\t\"NB\" : \"New Brunswick\",\n\t\"NL\" : \"Newfoundland and Labrador\",\n\t\"NS\" : \"Nova Scotia\",\n\t\"NT\" : \"Northwest Territories\",\n\t\"NU\" : \"Nunavut\",\n\t\"ON\" : \"Ontario\",\n\t\"PE\" : \"Prince Edward Island\",\n\t\"QC\" : \"Quebec\",\n\t\"SK\" : \"Saskatchewan\",\n\t\"YK\" : \"Yukon\",\n\t\"YT\" : \"Yukon\"\n}\n\ncolours = [\"#012169\", \"#E03C31\", \"green\", \"lightgray\"]\n\ndef get_province(prov):\n\ttry:\n\t\treturn prov_dict[prov]\n\texcept:\n\t\treturn prov\n\ndef get_area(prov):\n\tif prov == 'YK':\n\t\treturn 'YT'\n\telse:\n\t\treturn prov\n\n\ndf = pd.read_csv(url).fillna(0)\ndfclean = df[ (df[\"report_date\"] > \"2021\") & (df[\"report_date\"] < \"2023\") & (df[\"b117\"] >= 0) & (df[\"b1351\"] >= 0) & (df[\"p1\"] >= 0) ]\ndfclean[\"Province\"] = dfclean.apply(lambda r: get_province(r[\"prov\"]), axis=1)\ndfclean[\"Area\"] = dfclean.apply(lambda r: get_area(r[\"prov\"]), axis=1)\n\ndfAlpha = dfclean.copy()\ndfAlpha[\"Variant\"] = \"B.1.1.7 (Alpha)\"\ndfAlpha[\"Count\"] = dfAlpha[\"b117\"]\n\ndfBeta = dfclean.copy()\ndfBeta[\"Variant\"] = \"B.1.351 (Beta)\"\ndfBeta[\"Count\"] = dfBeta[\"b1351\"]\n\ndfGamma = dfclean.copy()\ndfGamma[\"Variant\"] = \"P.1 (Gamma)\"\ndfGamma[\"Count\"] = dfGamma[\"p1\"]\n\ndfvoc = dfAlpha.append(dfBeta).append(dfGamma)\n\ndfvocmax = dfvoc.groupby([\"Province\", \"Variant\"]).max().reset_index() \\\n[[\"Province\", \"Variant\", \"Count\"]] \\\n.rename(columns={\"Count\" : \"MaxVocCount\"}) \n\ndfvoc = pd.merge(dfvoc, dfvocmax, how=\"left\", left_on=[\"Province\", \"Variant\"], right_on=[\"Province\", \"Variant\"])\ndfvoc = dfvoc.sort_values(by=[\"Variant\", \"MaxVocCount\", \"Province\", \"report_date\"], ascending=[True, False, True, True])\n\ndfvoc[\"New\"] = dfvoc.groupby([\"Province\", \"Variant\"])[\"Count\"].diff()\n\ndfprov = dfvoc[dfvoc[\"Province\"] != \"Canada\"]\n\nfiglineprov = px.line(dfprov, \n x=\"report_date\", y=\"Count\", color=\"Variant\", facet_col=\"Province\", facet_col_wrap=1,\n labels={\"report_date\" : \"Reported date\", \"Count\" : \"Cumulative cases\", \"Province\" : \"Province/Territory\"},\n title=\"Cumulative cases with a variant of concern<br>by reported date by province/territory by variant\",\n height=5000, template=\"plotly_white\", color_discrete_sequence=colours, facet_row_spacing=0.025\n )\n\nfigbarprovd = px.bar(dfprov, x=\"report_date\", y=\"New\", color=\"Variant\", facet_col=\"Province\", facet_col_wrap=1,\n labels={\"report_date\" : \"Reported date\", \"New\" : \"New cases\", \"Province\" : \"Province/Territory\", \"Variant\" : \"Variant of concern\"},\n hover_name=\"Variant\",\n title=\"New cases with a variant of concern by reported date<br>by province/territory\",\n height=5000, template=\"plotly_white\", color_discrete_sequence=colours, facet_row_spacing=0.025\n )\n \ndfcan = dfvoc[dfvoc[\"Province\"] == \"Canada\"]\n\nfiglinecan_c = px.line(dfcan, \n x=\"report_date\", y=\"Count\", color=\"Variant\", \n labels={\"report_date\" : \"Reported date\", \"Count\" : \"Cumulative cases\"},\n title=\"Cumulative cases in Canada with a variant of concern<br>by reported date by variant\",\n template=\"plotly_white\", color_discrete_sequence=colours\n )\n \n\nfigbarcan_d = px.bar(dfcan, x=\"report_date\", y=\"New\", color=\"Variant\",\n labels={\"report_date\" : \"Reported date\", \"New\" : \"New cases\", \"Variant\" : \"Variant of concern\"},\n hover_name=\"Variant\",\n title=\"New cases in Canada with a variant of concern by reported date\",\n template=\"plotly_white\", color_discrete_sequence=colours\n )\n\n# Accessibility\n\ndate_name = \"Date\" \n\n\ndef join(df, area, variant):\n\tdfarea = dfclean[dfclean[\"Area\"] == area][[\"report_date\", variant]].rename(columns={\"report_date\" : date_name, variant : area}) \n\treturn pd.merge(df, dfarea, how=\"left\", left_on=[date_name], right_on=[date_name])\n\ndef create_table(variant):\n\tdate_max = dfclean.max()[\"report_date\"]\n\tdf_max = dfclean[(dfclean[\"Area\"]!=\"CA\") & (dfclean[\"report_date\"] == date_max)][[\"Area\", variant]].sort_values(by=[variant, \"Area\"], ascending=[False, True])\n\tareas = df_max[\"Area\"].tolist()\n\n\tdf_variant = pd.DataFrame()\n\tdf_variant[date_name] = dfclean[dfclean[\"Area\"]==\"CA\"][\"report_date\"]\n\n\tfor area in areas:\n\t df_variant = join(df_variant, area, variant)\n\t \n\tdf_variant = join(df_variant, \"CA\", variant)\n\treturn df_variant.set_index(date_name).sort_values(by=[date_name], ascending=[False]).round().astype(int)\n\t\ndf_Alpha = create_table(\"b117\")\ndf_Beta = create_table(\"b1351\")\ndf_Gamma = create_table(\"p1\")\n\n"
] | [
[
"pandas.merge",
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
yufei1900/cs231n-homework | [
"b7f5a03d5a2b650603074a7c43f203b465b74333"
] | [
"assignment1/cs231n/classifiers/neural_net.py"
] | [
"from __future__ import print_function\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass TwoLayerNet(object):\n \"\"\"\n A two-layer fully-connected neural network. The net has an input dimension of\n N, a hidden layer dimension of H, and performs classification over C classes.\n We train the network with a softmax loss function and L2 regularization on the\n weight matrices. The network uses a ReLU nonlinearity after the first fully\n connected layer.\n\n In other words, the network has the following architecture:\n\n input - fully connected layer - ReLU - fully connected layer - softmax\n\n The outputs of the second fully-connected layer are the scores for each class.\n \"\"\"\n\n def __init__(self, input_size, hidden_size, output_size, std=1e-4):\n \"\"\"\n Initialize the model. Weights are initialized to small random values and\n biases are initialized to zero. Weights and biases are stored in the\n variable self.params, which is a dictionary with the following keys:\n\n W1: First layer weights; has shape (D, H)\n b1: First layer biases; has shape (H,)\n W2: Second layer weights; has shape (H, C)\n b2: Second layer biases; has shape (C,)\n\n Inputs:\n - input_size: The dimension D of the input data.\n - hidden_size: The number of neurons H in the hidden layer.\n - output_size: The number of classes C.\n \"\"\"\n self.params = {}\n self.params['W1'] = std * np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = std * np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)\n\n def loss(self, X, y=None, reg=0.0):\n \"\"\"\n Compute the loss and gradients for a two layer fully connected neural\n network.\n\n Inputs:\n - X: Input data of shape (N, D). Each X[i] is a training sample.\n - y: Vector of training labels. y[i] is the label for X[i], and each y[i] is\n an integer in the range 0 <= y[i] < C. This parameter is optional; if it\n is not passed then we only return scores, and if it is passed then we\n instead return the loss and gradients.\n - reg: Regularization strength.\n\n Returns:\n If y is None, return a matrix scores of shape (N, C) where scores[i, c] is\n the score for class c on input X[i].\n\n If y is not None, instead return a tuple of:\n - loss: Loss (data loss and regularization loss) for this batch of training\n samples.\n - grads: Dictionary mapping parameter names to gradients of those parameters\n with respect to the loss function; has the same keys as self.params.\n \"\"\"\n # Unpack variables from the params dictionary\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n N, D = X.shape\n\n # Compute the forward pass\n scores = None\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n out1 = np.maximum(0, X.dot(W1) + b1) # relu, (N, H)\n scores = out1.dot(W2) + b2 # (N, C)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n \n # If the targets are not given then jump out, we're done\n if y is None:\n return scores\n\n # Compute the loss\n loss = None\n #############################################################################\n # TODO: Finish the forward pass, and compute the loss. This should include #\n # both the data loss and L2 regularization for W1 and W2. Store the result #\n # in the variable loss, which should be a scalar. Use the Softmax #\n # classifier loss. #\n #############################################################################\n correct_class_score = scores[np.arange(N), y].reshape(N, 1)\n exp_sum = np.sum(np.exp(scores), axis=1).reshape(N, 1)\n loss = np.sum(np.log(exp_sum) - correct_class_score)\n loss /= N\n loss += 0.5 * reg * np.sum(W1 * W1)+ 0.5 * reg * np.sum(W2 * W2)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # Backward pass: compute gradients\n grads = {}\n #############################################################################\n # TODO: Compute the backward pass, computing the derivatives of the weights #\n # and biases. Store the results in the grads dictionary. For example, #\n # grads['W1'] should store the gradient on W1, and be a matrix of same size #\n #############################################################################\n margin = np.exp(scores) / exp_sum\n margin[np.arange(N), y] += -1\n margin /= N #(N, C)\n dW2 = out1.T.dot(margin) #(H ,C)\n dW2 += reg * W2 \n grads['W2'] = dW2\n grads['b2'] = np.sum(margin, axis = 0)\n \n margin1 = margin.dot(W2.T) #(N, H)\n margin1[out1 <= 0] = 0\n dW1 = X.T.dot(margin1) #(D, H)\n dW1 += reg * W1 \n grads['W1'] = dW1\n grads['b1'] = np.sum(margin1, axis = 0)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, grads\n\n def train(self, X, y, X_val, y_val,\n learning_rate=1e-3, learning_rate_decay=0.95,\n reg=5e-6, num_iters=100,\n batch_size=200, verbose=False):\n \"\"\"\n Train this neural network using stochastic gradient descent.\n\n Inputs:\n - X: A numpy array of shape (N, D) giving training data.\n - y: A numpy array f shape (N,) giving training labels; y[i] = c means that\n X[i] has label c, where 0 <= c < C.\n - X_val: A numpy array of shape (N_val, D) giving validation data.\n - y_val: A numpy array of shape (N_val,) giving validation labels.\n - learning_rate: Scalar giving learning rate for optimization.\n - learning_rate_decay: Scalar giving factor used to decay the learning rate\n after each epoch.\n - reg: Scalar giving regularization strength.\n - num_iters: Number of steps to take when optimizing.\n - batch_size: Number of training examples to use per step.\n - verbose: boolean; if true print progress during optimization.\n \"\"\"\n num_train = X.shape[0]\n iterations_per_epoch = max(num_train / batch_size, 1)\n\n # Use SGD to optimize the parameters in self.model\n loss_history = []\n train_acc_history = []\n val_acc_history = []\n\n for it in range(num_iters):\n X_batch = None\n y_batch = None\n\n #########################################################################\n # TODO: Create a random minibatch of training data and labels, storing #\n # them in X_batch and y_batch respectively. #\n #########################################################################\n mask = np.random.choice(num_train, batch_size, replace=True)\n X_batch = X[mask]\n y_batch = y[mask]\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n\n # Compute loss and gradients using the current minibatch\n loss, grads = self.loss(X_batch, y=y_batch, reg=reg)\n loss_history.append(loss)\n\n #########################################################################\n # TODO: Use the gradients in the grads dictionary to update the #\n # parameters of the network (stored in the dictionary self.params) #\n # using stochastic gradient descent. You'll need to use the gradients #\n # stored in the grads dictionary defined above. #\n #########################################################################\n self.params['W1'] -= learning_rate * grads['W1']\n self.params['W2'] -= learning_rate * grads['W2']\n self.params['b1'] -= learning_rate * grads['b1']\n self.params['b2'] -= learning_rate * grads['b2']\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n\n if verbose and it % 100 == 0:\n print('iteration %d / %d: loss %f' % (it, num_iters, loss))\n\n # Every epoch, check train and val accuracy and decay learning rate.\n if it % iterations_per_epoch == 0:\n # Check accuracy\n train_acc = (self.predict(X_batch) == y_batch).mean()\n val_acc = (self.predict(X_val) == y_val).mean()\n train_acc_history.append(train_acc)\n val_acc_history.append(val_acc)\n\n # Decay learning rate\n learning_rate *= learning_rate_decay\n\n return {\n 'loss_history': loss_history,\n 'train_acc_history': train_acc_history,\n 'val_acc_history': val_acc_history,\n }\n\n def predict(self, X):\n \"\"\"\n Use the trained weights of this two-layer network to predict labels for\n data points. For each data point we predict scores for each of the C\n classes, and assign each data point to the class with the highest score.\n\n Inputs:\n - X: A numpy array of shape (N, D) giving N D-dimensional data points to\n classify.\n\n Returns:\n - y_pred: A numpy array of shape (N,) giving predicted labels for each of\n the elements of X. For all i, y_pred[i] = c means that X[i] is predicted\n to have class c, where 0 <= c < C.\n \"\"\"\n y_pred = None\n\n ###########################################################################\n # TODO: Implement this function; it should be VERY simple! #\n ###########################################################################\n out1 = np.maximum(0, X.dot(self.params['W1']) + self.params['b1']) # relu, (N, H)\n y_pred = np.argmax(out1.dot(self.params['W2']) + self.params['b2'],axis = 1) # (N, C)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return y_pred\n\n\n"
] | [
[
"numpy.log",
"numpy.random.choice",
"numpy.arange",
"numpy.random.randn",
"numpy.exp",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MarcoFavorito/breakout-env | [
"b41f9ed1da693874d7d34f83e7200fd51a59c97e"
] | [
"breakout_env/wrappers/wrappers.py"
] | [
"import copy\nfrom gym import Wrapper\nfrom pythogic.base.Symbol import Symbol\nfrom pythogic.base.Alphabet import Alphabet\nfrom pythogic.base.Formula import AtomicFormula, PathExpressionEventually, PathExpressionSequence, And, Not, \\\n LogicalTrue, PathExpressionStar\nfrom pythogic.base.utils import _to_pythomata_dfa\nfrom pythogic.ldlf_empty_traces.LDLf_EmptyTraces import LDLf_EmptyTraces\nimport numpy as np\nfrom pythomata.base.Simulator import Simulator\nfrom pythomata.base.utils import Sink\n\n\nclass BreakoutRABUWrapper(Wrapper):\n \"\"\"Env wrapper for bottom-up rows deletion\"\"\"\n def __init__(self, env):\n super().__init__(env)\n self.row_symbols = [Symbol(r) for r in [\"r0\", \"r1\", \"r2\"]]\n self.dfa = self._build_automata()\n self.goal_reward = 1000\n self.transition_reward = 100\n self.simulator = Simulator(self.dfa)\n self.last_status = None\n\n\n def reset(self):\n self.env.reset()\n self.simulator.reset()\n\n def step(self, action):\n obs, reward, done, _ = self.env.step(action)\n if done:\n # when we lose a life\n return obs, reward, done, _\n\n # overwrite old reward\n # reward = 0\n\n f = self.state2propositional_formula()\n\n old_state = self.simulator.cur_state\n self.simulator.make_transition(f)\n new_state = self.simulator.cur_state\n if new_state==Sink():\n done = True\n reward = -1000\n elif new_state in self.dfa.accepting_states:\n reward = 1000\n elif old_state!=new_state:\n reward = self.transition_reward\n\n return obs, reward, done or self.env.unwrapped.state.terminal, _\n\n\n\n def state2propositional_formula(self):\n e = self.unwrapped\n matrix = e.state.bricks.bricks_status_matrix\n row_status = np.all(matrix==0.0, axis=1)\n result = set()\n for rs, sym in zip(row_status, reversed(self.row_symbols)):\n if rs:\n result.add(sym)\n\n return frozenset(result)\n\n\n def _build_automata(self):\n rows = self.row_symbols\n atoms = [AtomicFormula(r) for r in rows]\n alphabet = Alphabet(set(rows))\n ldlf = LDLf_EmptyTraces(alphabet)\n f = PathExpressionEventually(\n PathExpressionSequence.chain([\n PathExpressionStar(And.chain([Not(atoms[0]), Not(atoms[1]), Not(atoms[2])])),\n PathExpressionStar(And.chain([atoms[0], Not(atoms[1]), Not(atoms[2])])),\n # Not(atoms[3]), Not(atoms[4]), Not(atoms[5])]),\n PathExpressionStar(And.chain([atoms[0], atoms[1], Not(atoms[2])])),\n # Not(atoms[3]), Not(atoms[4]), Not(atoms[5])]),\n # And.chain([atoms[0], atoms[1], atoms[2]]), # Not(atoms[3]), Not(atoms[4]), Not(atoms[5])]),\n # And.chain([atoms[0], atoms[1], atoms[2], atoms[3], Not(atoms[4]), Not(atoms[5])]),\n # And.chain([atoms[0], atoms[1], atoms[2], atoms[3], atoms[4], Not(atoms[5])]),\n # And.chain([atoms[0], atoms[1], atoms[2], atoms[3], atoms[4], atoms[5] ])\n ]),\n And.chain([atoms[0], atoms[1], atoms[2]])\n )\n nfa = ldlf.to_nfa(f)\n dfa = _to_pythomata_dfa(nfa)\n\n return dfa\n\n\n\n\n\n"
] | [
[
"numpy.all"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
salomonw/mixed-traffic-amod-route-rebalance | [
"7f1edeb195a7bfab835e596ad84deead2957943e",
"7f1edeb195a7bfab835e596ad84deead2957943e",
"7f1edeb195a7bfab835e596ad84deead2957943e"
] | [
"experiments/plots_journal_CARS.py",
"src/CARS.py",
"experiments/run_penRate_CARS.py"
] | [
"import src.tnet as tnet\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport pandas as pd\nimport math \n\nplt.style.use(['science','ieee', 'high-vis'])\n\n\ndef txt2list(fname):\n\treturn [line for line in open(fname)]\n\ndef read_result(fname):\n\tdf = pd.read_csv(fname)\n\tresults = df.T.values.tolist()\n\treturn results\n\ndef read_parameters(fname):\n\tdic = {}\n\tfor line in open(fname, 'r').readlines():\n\t\tp,v = line.split()\n\t\tdic[p] = v\n\treturn dic\n\n\ndef plot_topology(netname):\n netFile, gFile, fcoeffs, tstamp, dir_out = tnet.get_network_parameters(net_name=netname,\n\n experiment_name=netname + 'topo_plot')\n tNet = tnet.tNet(netFile=netFile, gFile=gFile, fcoeffs=fcoeffs)\n tNet.read_node_coordinates('data/pos/'+netname+'.txt')\n fig, ax = tnet.plot_network(tNet.G, width=0.3)\n return fig, ax\n\n\ndef plot_convergance(fname_sys, fname_usr):\n\treturn 1\n\n\ndef plot_costPenRate(fname, ax, parameters, k):\n\tj, cavsCost, noncavsCost, totCost, cavsFlow, nonCavsFlow, pedestrianFlow, rebalancingFlow, bikeFlow, subwayFlow = read_result(fname)\n\tif k == 'A':\n\t\tfor i in range(len(cavsCost)):\n\t\t\tcavsCost[i] = max(noncavsCost[i], cavsCost[i])\n\t\t\ttotCost[i] = max(noncavsCost[i], totCost[i])\n\tj = [round(.1 * i, 1) for i in range(11)]\n\tlstyle = ['-', '--', ':']\n\ti = 0\n\talg = 'CARS'+parameters['n:']\n\tax.plot(j, noncavsCost, label='Private', linestyle=lstyle[i], linewidth=2, marker='x')\n\tax.plot(j, cavsCost, label='AMoDs', linestyle=lstyle[i], linewidth=2, marker=\"^\")\n\tax.plot(j, totCost, label='Total', linestyle=lstyle[i], linewidth=2, marker='o')\n\tax.legend()\n\tax.set_xlabel('Penetration Rate')\n\tax.set_ylabel('Avg. Travel Time (min)')\n\tax.set_xlim((0, 1))\n\tax.legend(framealpha=0.8, fontsize='small', frameon=True, facecolor='w', fancybox='False')\n\t#ax.legend.get_frame().set_linewidth(0.2)\n\treturn ax\n\n\ndef plot_flowPenRate(fname, ax, parameters):\n\tn, cavsCost, noncavsCost, totCost, cavsFlow, nonCavsFlow, pedestrianFlow, rebalancingFlow, bikeFlow, subwayFlow = read_result(fname)\n\twidth = 0.9\n\tx_name = [round(.1 * i, 1) for i in range(11)]\n\tx = list(range(len(x_name)))\n\tp1 = ax.bar(x, nonCavsFlow, width, label='Private')\n\tp2 = ax.bar(x, cavsFlow, width,\n\t bottom=nonCavsFlow, label='AMoD')\n\tp3 = ax.bar(x, rebalancingFlow, width,\n\t bottom=[cavsFlow[i] + nonCavsFlow[i] for i in range(len(cavsFlow))], label='Rebalancing')\n\tif sum(subwayFlow)>10:\n\t\tp6 = ax.bar(x, subwayFlow, width,\n\t bottom=[cavsFlow[i] + nonCavsFlow[i] + rebalancingFlow[i] + pedestrianFlow[i] + bikeFlow[i] for i in\n\t range(len(cavsFlow))], label='Subway')\n\tif sum(pedestrianFlow)>10:\n\t\tp4 = ax.bar(x, pedestrianFlow, width,\n\t bottom=[cavsFlow[i] + nonCavsFlow[i] + rebalancingFlow[i] for i in range(len(cavsFlow))], label='Pedestrian')\n\tif sum(bikeFlow)>10:\n\t\tp5 = ax.bar(x, bikeFlow, width,\n\t bottom=[cavsFlow[i] + nonCavsFlow[i] + rebalancingFlow[i] + pedestrianFlow[i] for i in\n\t range(len(cavsFlow))], label='Biking')\n\n\n\tax.set_ylabel('Miles per mode of transport')\n\tax.set_xlabel('Penetration rate')\n\tax.set_xticks(x)\n\tax.set_xticklabels(x_name)\n\tax.legend(framealpha=0.8, fontsize='small', frameon=True, loc=3, facecolor='w', fancybox='False')\n\t#ax.legend.get_frame().set_linewidth(0.2)\n\treturn ax\n\n'''\ndire = '2021-01-08_11:51:44_penRate_NYC_1.5ASB_Reb_True'\nfname = 'results/' + dire + '/results.csv' \nparameters = read_parameters('results/' + dire + '/parameters.txt' )\n#print(read_result(fname))\n\nfig, ax = plt.subplots(1 ,figsize=(2.5,2))\nplot_costPenRate(fname, ax, parameters)\nplt.savefig('a.pdf')\n\nfig, ax = plt.subplots(1 ,figsize=(3.6,2))\nplot_flowPenRate(fname, ax, parameters)\nplt.savefig('b.pdf')\n'''\n\n# comparison\n\ndef plot_comparison(fnames, out):\n\tfig, ax = plt.subplots(ncols=2, \n\t\t\t\t\t\t\tnrows=len(fnames), \n\t\t\t\t\t\t#\twidth_ratios=[1,2], \n\t\t\t\t\t\t\tgridspec_kw={'width_ratios':[1,2]},\n\t\t\t\t\t\t\tfigsize=(3.6*1.7, 1.7*len(fnames)),\n\t\t\t\t\t\t\t#sharex=True, \n\t\t\t\t\t\t\tsharey=False)\n\tj = 0\n\tfor f in fnames:\n\t\tfname = 'results/' + f + '/results.csv'\n\t\tparameters = read_parameters('results/' + f + '/parameters.txt' )\n\t\tif out =='1c':\n\t\t\tplot_costPenRate(fname, ax[j,0], parameters, 'A')\n\t\telse:\n\t\t\tplot_costPenRate(fname, ax[j,0], parameters, 'B')\n\t\tplot_flowPenRate(fname, ax[j,1], parameters)\n\t\tj +=1\n\t#plt.legend(frameon=True, fancybox=False)\n\tplt.tight_layout()\n\tplt.savefig(out+'.pdf')\n\t#plt.show()\n\none = '2021-01-08_11/50/19_penRate_NYC_1.0A_Reb_True'.replace('/', ':')\ntwo = '2021-01-08_11/50/08_penRate_NYC_1.5A_Reb_True'.replace('/', ':')\nthree = '2021-01-08_11/51/44_penRate_NYC_2.0A_Reb_True'.replace('/', ':')\nfour = '2021-01-08_11/51/44_penRate_NYC_4.0A_Reb_True'.replace('/', ':')\nfnames = [one, two, three, four]\n\nplot_comparison(fnames,'1c')\n\n\none = '2021-01-08_11/50/19_penRate_NYC_1.0AS_Reb_True'.replace('/', ':')\ntwo = '2021-01-08_11/50/08_penRate_NYC_1.5AS_Reb_True'.replace('/', ':')\nthree = '2021-01-08_11/51/44_penRate_NYC_2.0AS_Reb_True'.replace('/', ':')\nfour = '2021-01-08_11/51/43_penRate_NYC_4.0AS_Reb_True'.replace('/', ':')\nfnames = [one, two, three, four]\n\nplot_comparison(fnames,'1_5c')\n\n\n\n\none = '2021-01-08_11/50/08_penRate_NYC_1.0ASP_Reb_True'.replace('/', ':')\ntwo = '2021-01-08_11/51/48_penRate_NYC_1.5ASP_Reb_True'.replace('/', ':')\t\nthree = '2021-01-08_11/51/44_penRate_NYC_2.0ASP_Reb_True'.replace('/', ':')\nfour = '2021-01-08_11/52/40_penRate_NYC_4.0ASP_Reb_True'.replace('/', ':')\nfnames = [one, two, three, four]\n\nplot_comparison(fnames,'2c')\n\n\n\none = '2021-01-08_11/50/08_penRate_NYC_1.0ASPB_Reb_True'.replace('/', ':')\ntwo = '2021-01-08_11/51/44_penRate_NYC_1.5ASPB_Reb_True'.replace('/', ':')\nthree = '2021-01-12_00:58:41_penRate_NYC_2.0ASPB_Reb_True'.replace('/', ':')\nfour = '2021-01-14_02:00:28_penRate_NYC_4.0ASPB_Reb_True'.replace('/', ':')\nfnames = [one, two, three, four]\n\nplot_comparison(fnames,'4c')\n\none = '2021-01-08_11/51/44_penRate_NYC_2.0A_Reb_True'.replace('/', ':')\ntwo = '2021-01-08_11/51/44_penRate_NYC_2.0AS_Reb_True'.replace('/', ':')\nthree = '2021-01-08_11/51/44_penRate_NYC_2.0ASP_Reb_True'.replace('/', ':')\nfour = '2021-01-12_00:58:41_penRate_NYC_2.0ASPB_Reb_True'.replace('/', ':')\nfnames = [one, two, three, four]\n\nplot_comparison(fnames,'4c')\n\n\n\n\n",
"\nfrom gurobipy import *\n#import pwlf as pw\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport networkx as nx\nfrom networkx.readwrite import json_graph\nimport json\nfrom src.utils import *\nimport src.pwapprox as pw\n\n\ndef get_theta(fun):\n return fun.fit_breaks[1:-1]\n\ndef get_beta(fun):\n return fun.slopes[1:]\n\n\ndef eval_travel_time(x, fcoeffs):\n return sum([fcoeffs[i]*x**i for i in range(len(fcoeffs))])\n\n\ndef eval_pw(a, b, theta, x):\n theta2 = theta.copy()\n theta2.append(1000)\n for i in range(len(theta2)-1):\n if (theta2[i]<=x) and (theta2[i+1]>x):\n y = b[i]+a[i]*x\n return y\n\n\n\ndef get_approx_fun(fcoeffs, range_=[0,2], nlines=3, theta=False, plot=False):\n # Generate data\n x = [i for i in list(np.linspace(range_[0], range_[1], 100))]\n y = [eval_travel_time(i, fcoeffs) for i in x]\n if theta==False:\n pws = pw.pwapprox(x, y, k=nlines)\n pws.fit_convex_boyd(N=30, L=30)\n rms = min(pws.rms_vec)\n i = pws.rms_vec.index(rms)\n a = pws.a_list[i]\n b = pws.b_list[i]\n theta = pws.thetas[i]\n theta.insert(0,0)\n theta.append(range_[1])\n else:\n pws = pw.pwapprox(x, y, k=nlines)\n pws.fit_convex_with_theta(theta)\n theta = theta\n a = pws.a\n rms = 0\n\n if plot == True:\n fig, ax = plt.subplots(2)\n ax[0].plot(x, y , label = 'Original', color='k')\n ypws = [eval_pw(a,b, theta[0:-1], i) for i in x]\n ax[0].plot(x, ypws, label='pwlinear', color='red')\n for th in theta:\n ax[0].axvline(x=th, linestyle=':')\n plt.grid()\n plt.xlabel('x')\n plt.ylabel('t(x)')\n plt.legend()\n plt.tight_layout()\n pws.plot_rms(ax=ax[1])\n plt.show()\n return theta, a, rms\n\n\n@timeit\ndef add_demand_cnstr(m, tnet, x, bush=False):\n # Set Constraints\n if bush==False:\n for j in tnet.G_supergraph.nodes():\n for w, d in tnet.g.items():\n if j == w[0]:\n m.addConstr(quicksum(m.getVarByName('x^'+str(w)+'_'+str(i)+'_'+str(j)) for i,l in tnet.G_supergraph.in_edges(nbunch=j)) + d == quicksum(m.getVarByName('x^'+str(w)+'_'+str(j)+'_'+str(k)) for l,k in tnet.G_supergraph.out_edges(nbunch=j)))\n elif j == w[1]:\n m.addConstr(quicksum(m.getVarByName('x^' + str(w) + '_' + str(i) + '_' + str(j)) for i,l in tnet.G_supergraph.in_edges(nbunch=j)) == quicksum(m.getVarByName('x^' + str(w) + '_' + str(j) + '_' + str(k)) for l,k in tnet.G_supergraph.out_edges(nbunch=j)) + d)\n else:\n m.addConstr(quicksum(m.getVarByName('x^' + str(w) + '_' + str(i) + '_' + str(j)) for i,l in tnet.G_supergraph.in_edges(nbunch=j)) == quicksum(m.getVarByName('x^' + str(w) + '_' + str(j) + '_' + str(k)) for l,k in tnet.G_supergraph.out_edges(nbunch=j)))\n else:\n p = {j:0 for j in tnet.G_supergraph.nodes()}\n for j in tnet.O:\n p[j] = sum([tnet.g[(s,t)] for s,t in tnet.g.keys() if t==j]) - sum([tnet.g[(s,t)] for s,t in tnet.g.keys() if s==j])\n\n # Global\n #[m.addConstr(quicksum([x[(i,j)] for i,l in tnet.G_supergraph.in_edges(nbunch=j)]) - quicksum([x[(j,k)] for l,k in tnet.G_supergraph.out_edges(nbunch=j)]) == p[j] ) for j in tnet.G_supergraph.nodes()]\n # Local\n #'''\n dsum = {s:sum([v for k,v in tnet.g.items() if k[0]==s]) for s in tnet.O}\n D = {s:list([d for o, d in tnet.g.keys() if o == s]) for s in tnet.O}\n #l = {s: [j for j in tnet.G_supergraph.nodes() if j not in set(D[s]) if j != s] for s in tnet.O}\n [m.addConstr(quicksum(m.getVarByName('x^' + str(s) + '_' + str(i) + '_' + str(j)) for i, l in tnet.G_supergraph.in_edges(nbunch=j)) - tnet.g[(s,j)] \\\n == quicksum(m.getVarByName('x^' + str(s) + '_' + str(j) + '_' + str(k)) for l, k in tnet.G_supergraph.out_edges(nbunch=j))) for s in tnet.O for j in D[s]]\n\n [m.addConstr(quicksum(m.getVarByName('x^' + str(s) + '_' + str(i) + '_' + str(s)) for i, l in tnet.G_supergraph.in_edges(nbunch=s)) \\\n == quicksum(m.getVarByName('x^' + str(s) + '_' + str(s) + '_' + str(k)) for l, k in tnet.G_supergraph.out_edges(nbunch=s)) - dsum[s]) for s in tnet.O]\n\n [m.addConstr(quicksum(m.getVarByName('x^' + str(s) + '_' + str(i) + '_' + str(j)) for i, l in tnet.G_supergraph.in_edges(nbunch=j)) \\\n == quicksum(m.getVarByName('x^' + str(s) + '_' + str(j) + '_' + str(k)) for l, k in tnet.G_supergraph.out_edges(nbunch=j))) \\\n for s in tnet.O for j in [j for j in tnet.G_supergraph.nodes() if j not in set(D[s]) if j != s]]\n '''\n for s in tnet.O:\n dsum = sum([v for k,v in tnet.g.items() if k[0]==s])\n D = [d for o, d in tnet.g.keys() if o == s]\n\n for j in tnet.G_supergraph.nodes(): \n if j in D:\n d1 = tnet.g[(s,j)]\n m.addConstr(quicksum(m.getVarByName('x^' + str(s) + '_' + str(i) + '_' + str(j)) for i, l in tnet.G_supergraph.in_edges(nbunch=j)) - d1 \\\n == quicksum(m.getVarByName('x^' + str(s) + '_' + str(j) + '_' + str(k)) for l, k in tnet.G_supergraph.out_edges(nbunch=j)))\n elif j == s:\n m.addConstr(quicksum(m.getVarByName('x^' + str(s) + '_' + str(i) + '_' + str(j)) for i, l in tnet.G_supergraph.in_edges(nbunch=j)) \\\n == quicksum(m.getVarByName('x^' + str(s) + '_' + str(j) + '_' + str(k)) for l, k in tnet.G_supergraph.out_edges(nbunch=j)) - dsum)\n else:\n m.addConstr(quicksum(m.getVarByName('x^' + str(s) + '_' + str(i) + '_' + str(j)) for i, l in tnet.G_supergraph.in_edges(nbunch=j)) \\\n == quicksum(m.getVarByName('x^' + str(s) + '_' + str(j) + '_' + str(k)) for l, k in tnet.G_supergraph.out_edges(nbunch=j)) )\n '''\n m.update()\n\n@timeit\ndef add_rebalancing_cnstr(m, tnet, xu):\n [m.addConstr(quicksum(m.getVarByName('x^R' + str(i) + '_' + str(j)) + xu[(i, j)] for i, l in tnet.G.in_edges(nbunch=j)) \\\n == quicksum(m.getVarByName('x^R' + str(j) + '_' + str(k)) + xu[j, k] for l, k in tnet.G.out_edges(nbunch=j))) for j in tnet.G.nodes()]\n\n #[m.addConstr(m.getVarByName('x^R'+str(i)+'_'+str(j))==0) for i,j in tnet.G_supergraph.edges() if (type(i)!=int) or (type(j)!=int)]\n\n m.update()\n\n@timeit\ndef set_optimal_flows(m , tnet, G_exogenous=False, bush=False):\n if bush:\n for i,j in tnet.G_supergraph.edges():\n tnet.G_supergraph[i][j]['flowNoRebalancing'] = sum(m.getVarByName('x^' + str(s) + '_' + str(i) + '_' + str(j)).X for s in tnet.O)\n tnet.G_supergraph[i][j]['flow'] = tnet.G_supergraph[i][j]['flowNoRebalancing']\n if isinstance(i, int) and isinstance(j, int):\n tnet.G_supergraph[i][j]['flowRebalancing'] = m.getVarByName('x^R' + str(i) + '_' + str(j)).X\n tnet.G_supergraph[i][j]['flow'] += tnet.G_supergraph[i][j]['flowRebalancing']\n #else:\n #tnet.G_supergraph[i][j]['flow'] = tnet.G_supergraph[i][j]['flowRebalancing'] + tnet.G_supergraph[i][j]['flowNoRebalancing']\n tnet.G_supergraph[i][j]['t_k'] = travel_time(tnet, i, j, G_exo=G_exogenous)\n else:\n for i,j in tnet.G_supergraph.edges():\n tnet.G_supergraph[i][j]['flowNoRebalancing'] = sum(m.getVarByName('x^' + str(w) + '_' + str(i) + '_' + str(j)).X for w in tnet.g.keys())\n tnet.G_supergraph[i][j]['flow'] = tnet.G_supergraph[i][j]['flowNoRebalancing']\n if isinstance(i, int) and isinstance(j, int):\n tnet.G_supergraph[i][j]['flowRebalancing'] = m.getVarByName('x^R' + str(i) + '_' + str(j)).X\n tnet.G_supergraph[i][j]['flow'] += tnet.G_supergraph[i][j]['flowRebalancing']\n\n #else:\n # tnet.G_supergraph[i][j]['flow'] = m.getVarByName('x^R' + str(i) + '_' + str(j)).X + sum(m.getVarByName('x^' + str(w) + '_' + str(i) + '_' + str(j)).X for w in tnet.g.keys())\n #tnet.G_supergraph[i][j]['flowRebalancing'] = m.getVarByName('x^R' + str(i) + '_' + str(j)).X\n #tnet.G_supergraph[i][j]['flowNoRebalancing'] = sum(m.getVarByName('x^' + str(w) + '_' + str(i) + '_' + str(j)).X for w in tnet.g.keys())\n tnet.G_supergraph[i][j]['t_k'] = travel_time(tnet, i, j, G_exo=G_exogenous)\n\n\n\n@timeit\ndef set_optimal_rebalancing_flows(m,tnet):\n for i,j in tnet.G.edges():\n tnet.G_supergraph[i][j]['flow'] += m.getVarByName('x^R' + str(i) + '_' + str(j)).X\n tnet.G_supergraph[i][j]['flowRebalancing'] = m.getVarByName('x^R' + str(i) + '_' + str(j)).X\n tnet.G_supergraph[i][j]['flowNoRebalancing'] = tnet.G[i][j]['flow'] - tnet.G[i][j]['flowRebalancing']\n\n\ndef eval_obj_funct(tnet, G_exogenous):\n Vt, Vd, Ve = set_CARS_par(tnet)\n obj = Vt * get_totalTravelTime_without_Rebalancing(tnet, G_exogenous=G_exogenous)\n obj = obj + sum([(Vd*tnet.G_supergraph[i][j]['t_0'] + Ve *tnet.G_supergraph[i][j]['e']) * (tnet.G_supergraph[i][j]['flow']-tnet.G_supergraph[i][j]['flowNoRebalancing']) \\\n for i,j in tnet.G.edges()])\n return obj/tnet.totalDemand\n\ndef set_CARS_par(tnet):\n # Set obj func parameters\n Vt = 24.4\n Vd = 0.286\n Ve = 0.247\n # Set the electricity constant\n ro = 1.25\n Af = 0.4\n cd = 1\n cr = 0.008\n mv = 750\n g = 9.81\n nu = 0.72\n\n for i,j in tnet.G_supergraph.edges():\n tnet.G_supergraph[i][j]['e'] = (ro/2 *Af*cd * (tnet.G_supergraph[i][j]['t_0']/tnet.G_supergraph[i][j]['length'])**2 *cr * mv * g)* tnet.G_supergraph[i][j]['length']/nu\n return Vt, Vd, Ve\n\n@timeit\ndef set_exogenous_flow(tnet, exogenous_G):\n # Set exogenous flow\n exo_G = tnet.G_supergraph.copy()\n for i, j in tnet.G_supergraph.edges():\n exo_G[i][j]['flow'] = 0\n if exogenous_G != False:\n for i,j in exogenous_G.edges():\n exo_G[i][j]['flow'] = exogenous_G[i][j]['flow']\n return exo_G\n\n\n\n'''\n for i,j in tnet.G_supergraph.edges():\n obj += Vt *tnet.G_supergraph[i][j]['t_0'] * quicksum(m.getVarByName('x^'+str(w)+'_'+str(i)+'_'+str(j)) for w in tnet.g.keys()) \\\n + Vt * (tnet.G_supergraph[i][j]['t_0'] * beta[0]/tnet.G_supergraph[i][j]['capacity']) * m.getVarByName('e^1_'+str(i)+'_'+str(j)) \\\n * (m.getVarByName('e^1_'+str(i)+'_'+str(j)) + theta[0]*tnet.G_supergraph[i][j]['capacity'] - exogenous_G[i][j]['flow']) \\\n + Vt * (tnet.G_supergraph[i][j]['t_0'] * beta[1]/tnet.G_supergraph[i][j]['capacity']) * m.getVarByName('e^2_'+str(i)+'_'+str(j)) \\\n * (m.getVarByName('e^2_'+str(i)+'_'+str(j)) + theta[1]*tnet.G_supergraph[i][j]['capacity'] - exogenous_G[i][j]['flow']) \\\n + Vt * (tnet.G_supergraph[i][j]['t_0'] * beta[0]/tnet.G_supergraph[i][j]['capacity'] * m.getVarByName('e^2_'+str(i)+'_'+str(j)) \\\n * (theta[1]*tnet.G_supergraph[i][j]['capacity'] - theta[0]*tnet.G_supergraph[i][j]['capacity']) )\n\n for i,j in tnet.G.edges():\n obj += (Vd * tnet.G_supergraph[i][j]['t_0'] + Ve * tnet.G_supergraph[i][j]['e']) * ( \\\n #sum(m.getVarByName('x^' + str(w) + '_' + str(i) + '_' + str(j)) for w in tnet.g.keys()) +\\\n m.getVarByName('x^R' + str(i) + '_' + str(j)))\n'''\n@timeit\ndef get_obj_CARSn(m, tnet, xu, theta, a, exogenous_G, linear=False):#, userCentric=False):\n #TODO: this could be written more efficiently, include user-centric approach\n #if linear:\n #userCentric = False\n #if userCentric != True:\n Vt, Vd, Ve = set_CARS_par(tnet)\n if linear == True:\n obj = quicksum(Vt * tnet.G_supergraph[i][j]['t_0'] * xu[(i, j)] for i,j in tnet.G_supergraph.edges())\n obj += quicksum(\\\n quicksum( Vt * tnet.G_supergraph[i][j]['t_0'] * a[l]/tnet.G_supergraph[i][j]['capacity'] *( \\\n m.getVarByName('e^'+str(l)+'_'+str(i)+'_'+str(j)) * (0+quicksum(((theta[k + 1] - theta[k])*tnet.G_supergraph[i][j]['capacity']) for k in range(0,l))) \\\n + m.getVarByName('e^'+str(l)+'_'+str(i)+'_'+str(j)) * ( (theta[l + 1] - theta[l])*tnet.G_supergraph[i][j]['capacity'] ) \\\n + (theta[l+1] - theta[l])*tnet.G_supergraph[i][j]['capacity']*(0+quicksum(m.getVarByName('e^'+str(k)+'_'+str(i)+'_'+str(j)) for k in range(l+1, len(theta)-1))) \\\n - m.getVarByName('e^'+str(l)+'_'+str(i)+'_'+str(j)) * exogenous_G[i][j]['flow'] \\\n ) for l in range(len(theta)-1)) \\\n + (Vd * tnet.G_supergraph[i][j]['t_0'] + Ve * tnet.G_supergraph[i][j]['e']) * m.getVarByName('x^R' + str(i) + '_' + str(j))\\\n for i,j in tnet.G.edges())\n '''\n obj = quicksum( Vt * tnet.G_supergraph[i][j]['t_0'] * xu[(i, j)] \\\n + quicksum( Vt * tnet.G_supergraph[i][j]['t_0'] * a[l]/tnet.G_supergraph[i][j]['capacity'] * m.getVarByName('e^'+str(l)+'_'+str(i)+'_'+str(j)) * (quicksum(((theta[k + 1] - theta[k]) * tnet.G_supergraph[i][j]['capacity']) for k in range(l))) \\\n + Vt * tnet.G_supergraph[i][j]['t_0'] * a[l]/tnet.G_supergraph[i][j]['capacity'] * m.getVarByName('e^'+str(l)+'_'+str(i)+'_'+str(j)) * ((theta[l+1]-theta[l])*tnet.G_supergraph[i][j]['capacity']) \\\n + Vt * tnet.G_supergraph[i][j]['t_0'] * a[l]/tnet.G_supergraph[i][j]['capacity'] *(theta[l+1] - theta[l])*tnet.G_supergraph[i][j]['capacity']*(quicksum(m.getVarByName('e^'+str(k)+'_'+str(i)+'_'+str(j)) for k in range(l+1, len(theta)-1))) \\\n - Vt * tnet.G_supergraph[i][j]['t_0'] * a[l]/tnet.G_supergraph[i][j]['capacity'] * m.getVarByName('e^'+str(l)+'_'+str(i)+'_'+str(j)) * exogenous_G[i][j]['flow'] \\\n for l in range(len(theta)-1)) \\\n + (Vd * tnet.G_supergraph[i][j]['t_0'] + Ve * tnet.G_supergraph[i][j]['e']) * m.getVarByName('x^R' + str(i) + '_' + str(j)) \\\n for i,j in tnet.G_supergraph.edges())\n '''\n else:\n obj = quicksum(Vt * tnet.G_supergraph[i][j]['t_0'] * xu[(i, j)] for i,j in tnet.G_supergraph.edges())\n obj = obj+ quicksum(\\\n quicksum( Vt * tnet.G_supergraph[i][j]['t_0'] * a[l]/tnet.G_supergraph[i][j]['capacity'] * m.getVarByName('e^'+str(l)+'_'+str(i)+'_'+str(j)) * (quicksum(((theta[k + 1] - theta[k])*tnet.G_supergraph[i][j]['capacity']) for k in range(0,l))) \\\n + Vt * tnet.G_supergraph[i][j]['t_0'] * a[l]/tnet.G_supergraph[i][j]['capacity'] * m.getVarByName('e^'+str(l)+'_'+str(i)+'_'+str(j)) * ( m.getVarByName('e^'+str(l)+'_'+str(i)+'_'+str(j))) \\\n + Vt * tnet.G_supergraph[i][j]['t_0'] * a[l]/tnet.G_supergraph[i][j]['capacity'] *(theta[l+1] - theta[l])*tnet.G_supergraph[i][j]['capacity']*(quicksum(m.getVarByName('e^'+str(k)+'_'+str(i)+'_'+str(j)) for k in range(l+1, len(theta)-1))) \\\n - Vt * tnet.G_supergraph[i][j]['t_0'] * a[l]/tnet.G_supergraph[i][j]['capacity'] * m.getVarByName('e^'+str(l)+'_'+str(i)+'_'+str(j)) * exogenous_G[i][j]['flow'] \\\n for l in range(len(theta)-1)) \\\n + (Vd * tnet.G_supergraph[i][j]['t_0'] + Ve * tnet.G_supergraph[i][j]['e']) * m.getVarByName('x^R' + str(i) + '_' + str(j))\\\n for i,j in tnet.G.edges())\n\n\n '''\n if linear == True:\n Vt, Vd, Ve = set_CARS_par(tnet)\n obj = quicksum( Vt * tnet.G_supergraph[i][j]['t_0'] * xu[(i, j)] \\\n + quicksum( Vt * tnet.G_supergraph[i][j]['t_0'] * a[l]/tnet.G_supergraph[i][j]['capacity'] * m.getVarByName('e^'+str(l)+'_'+str(i)+'_'+str(j)) * (quicksum(((theta[k + 1] - theta[k]) * tnet.G_supergraph[i][j]['capacity']) for k in range(l))) \\\n + Vt * tnet.G_supergraph[i][j]['t_0'] * a[l]/tnet.G_supergraph[i][j]['capacity'] * m.getVarByName('e^'+str(l)+'_'+str(i)+'_'+str(j)) * ( m.getVarByName('e^'+str(l)+'_'+str(i)+'_'+str(j))) \\\n + Vt * tnet.G_supergraph[i][j]['t_0'] * a[l]/tnet.G_supergraph[i][j]['capacity'] *(theta[l+1] - theta[l])*tnet.G_supergraph[i][j]['capacity']*(quicksum(m.getVarByName('e^'+str(k)+'_'+str(i)+'_'+str(j)) for k in range(l+1, len(theta)-1))) \\\n - Vt * tnet.G_supergraph[i][j]['t_0'] * a[l]/tnet.G_supergraph[i][j]['capacity'] * m.getVarByName('e^'+str(l)+'_'+str(i)+'_'+str(j)) * exogenous_G[i][j]['flow'] \\\n for l in range(len(theta)-1)) \\\n + (Vd * tnet.G_supergraph[i][j]['t_0'] + Ve * tnet.G_supergraph[i][j]['e']) * m.getVarByName('x^R' + str(i) + '_' + str(j)) \\\n for i,j in tnet.G_supergraph.edges())\n\n else:\n Vt, Vd, Ve = set_CARS_par(tnet)\n obj = quicksum(Vt * tnet.G_supergraph[i][j]['t_0'] * xu[(i,j)] for i,j in tnet.G_supergraph.edges())\n for i,j in tnet.G_supergraph.edges():\n t0 = tnet.G_supergraph[i][j]['t_0']\n mij = tnet.G_supergraph[i][j]['capacity']\n ue = exogenous_G[i][j]['flow']\n for l in range(len(theta)-1):\n Vtt_0al = Vt * t0 * a[l]/mij\n e_l = m.getVarByName('e^'+str(l)+'_'+str(i)+'_'+str(j))\n obj += Vtt_0al * e_l * (quicksum(((theta[k + 1] - theta[k]) * mij) for k in range(l)))\n obj += Vtt_0al * e_l * ( e_l)\n obj += Vtt_0al *(theta[l+1] - theta[l])*mij*(quicksum(m.getVarByName('e^'+str(k)+'_'+str(i)+'_'+str(j)) for k in range(l+1, len(theta)-1)))\n obj -= Vtt_0al* e_l * ue\n '''\n\n\n ''' \n else:\n #if linear == True:\n Vt, Vd, Ve = set_CARS_par(tnet)\n obj = 0\n obj = quicksum(Vt * tnet.G_supergraph[i][j]['t_0'] for i,j in tnet.G_supergraph.edges())\n for i,j in tnet.G_supergraph.edges():\n t0 = tnet.G_supergraph[i][j]['t_0']\n mij = tnet.G_supergraph[i][j]['capacity']\n ue = exogenous_G[i][j]['flow']\n for l in range(len(theta)-1):\n Vtt_0al = Vt * t0 * a[l] / mij\n e_l = m.getVarByName('e^' + str(l) + '_' + str(i) + '_' + str(j))\n obj += quicksum(Vtt_0al * e_l / ((theta[k + 1] - theta[k]) * mij - ue) for k in range(l))\n obj += Vtt_0al * (1 - e_l / ue) #Vtt_0al * (e_l * e_l - e_l * ue)\n obj += quicksum(Vtt_0al * ((theta[l + 1] - theta[l]) * mij / m.getVarByName(\n 'e^' + str(k) + '_' + str(i) + '_' + str(j)) - e_l * ue) for k in range(l + 1, len(theta) - 1))\n '''\n\n #obj += quicksum((Vd * tnet.G_supergraph[i][j]['t_0'] + Ve * tnet.G_supergraph[i][j]['e']) * m.getVarByName('x^R' + str(i) + '_' + str(j)) for i,j in tnet.G_supergraph.edges())\n return obj\n\n\n\n\n\n\n@timeit\ndef add_epsilon_cnstr(m, tnet, xu, n, theta, exogenous_G):\n [m.addConstr(m.getVarByName('e^'+str(l)+'_'+str(i)+'_'+str(j))\\\n >= xu[(i,j)] \\\n + m.getVarByName('x^R' + str(i) + '_' + str(j)) \\\n + exogenous_G[i][j]['flow'] \\\n - theta[l]*tnet.G_supergraph[i][j]['capacity'] \\\n - quicksum(m.getVarByName('e^'+str(l+k+1)+'_'+str(i)+'_'+str(j)) for k in range(n-l-1))) for i,j in tnet.G.edges() for l in range(n) ]\n #- quicksum(m.getVarByName('e^' + str(l + k) + '_' + str(i) + '_' + str(j)) for k in range(n - l))) for i, j in tnet.G_supergraph.edges() for l in range(n)]\n #[m.addConstr(m.getVarByName('e^'+str(l)+'_'+str(i)+'_'+str(j))\\\n # <= (theta[l+1]-theta[l])*tnet.G_supergraph[i][j]['capacity'] )for i,j in tnet.G_supergraph.edges() for l in range(n-1)]\n # maximum flow\n #[m.addConstr(m.getVarByName('e^' + str(l) + '_' + str(i) + '_' + str(j)) \\\n # <= (theta[l + 1] - theta[l]) * tnet.G_supergraph[i][j]['capacity']) for i, j in\n # tnet.G_supergraph.edges() for l in range(n - 1)]\n\n'''\n@timeit\ndef solve_CARSn(tnet, fcoeffs, n=3, exogenous_G=False, rebalancing=True, linear=False, method=-1, theta=False, a=False):\n if (theta==False) or (a==False):\n systemOptimal = True\n if systemOptimal:\n fcoeffs = SO_fcoeffs(fcoeffs)\n print(fcoeffs)\n theta, a, rms = get_approx_fun(fcoeffs=fcoeffs, nlines=n, range_=[0,3], plot=False)\n theta.append(3)\n exogenous_G = set_exogenous_flow(tnet, exogenous_G)\n # Start model\n m = Model('QP')\n m.setParam('OutputFlag',0)\n m.setParam('BarHomogeneous', 1)\n m.setParam('Method', method)\n m.update()\n # Define variables\n [m.addVar(lb=0, name='x^'+str(w)+'_'+str(i)+'_'+str(j)) for i,j in tnet.G_supergraph.edges() for w in tnet.g.keys()]\n if rebalancing == True:\n [m.addVar(lb=0, name='x^R'+str(i)+'_'+str(j)) for i,j in tnet.G_supergraph.edges()]\n else:\n [m.addVar(lb=0, ub=0, name='x^R' + str(i) + '_' + str(j)) for i, j in tnet.G_supergraph.edges()]\n\n [m.addVar(lb=0, name='e^'+str(l)+'_'+str(i)+'_'+str(j)) for i,j in tnet.G_supergraph.edges() for l in range(n)]\n m.update()\n # Set Obj\n obj = get_obj_CARSn(m, tnet, theta, a, exogenous_G, linear=linear)\n # Set Constraints\n add_demand_cnstr(m, tnet)\n if rebalancing==True:\n add_rebalancing_cnstr(m,tnet)\n add_epsilon_cnstr(m, tnet, n, theta, exogenous_G)\n m.update()\n # Solve problem\n m.setObjective(obj, GRB.MINIMIZE)\n m.update()\n m.optimize()\n # saving results\n set_optimal_flows(m, tnet)\n tnet.cars_obj = obj.getValue()\n od_flows = get_OD_result_flows(m, tnet)\n return tnet, m.Runtime, od_flows\n'''\n\ndef add_bike_cnstr(m, tnet, xu):\n [m.addConstr(\n \tquicksum(xu[(i,j)] for i,l in tnet.G_supergraph.in_edges(nbunch=j)) == quicksum(xu[(j,k)] for l,k in tnet.G_supergraph.out_edges(nbunch=j)))\n \t\tfor j in tnet.G_supergraph.nodes() if 'b' in str(j)] \n m.update()\n\n@timeit\ndef solve_bush_CARSn(tnet, fcoeffs, n=3, exogenous_G=False, rebalancing=True, linear=False, LP_method=-1, QP_method=-1, theta=False, a=False, bush=False, theta_n=3, userCentric=False, od_flows_flag=True):\n #TODO: implement option to select between origin or destination\n fc = fcoeffs.copy()\n if (theta==False) or (a==False):\n if userCentric:\n #fc=fc\n #fc.insert(0,0)\n fc = UC_fcoeffs(fc)\n #print(fc)\n #else:\n #fc.insert(0, 0)\n theta, a, rms = get_approx_fun(fcoeffs=fc, nlines=n, range_=[0,theta_n], plot=False)\n #a.append(a[-1])\n exogenous_G = set_exogenous_flow(tnet, exogenous_G)\n # Start model\n m = Model('CARS')\n m.setParam('OutputFlag',0 )\n m.setParam('BarHomogeneous', 1)\n #m.setParam(\"LogToConsole\", 0)\n #m.setParam(\"CSClientLog\", 0)\n if linear:\n m.setParam('Method', LP_method)\n else:\n m.setParam('Method', QP_method)\n m.update()\n\n # Find origins\n tnet.O = list(set([w[0] for w, d in tnet.g.items() if d > 0]))\n\n # Define variables\n if bush == True:\n [m.addVar(lb=0, name='x^'+str(s)+'_'+str(i)+'_'+str(j)) for i,j in tnet.G_supergraph.edges() for s in tnet.O]\n else:\n [m.addVar(lb=0, name='x^' + str(w) + '_' + str(i) + '_' + str(j)) for i, j in tnet.G_supergraph.edges() for w, d in tnet.g.items()]\n\n m.update()\n\n if userCentric==True:\n for i, j in tnet.G_supergraph.edges():\n if isinstance(i, int) and isinstance(j, int):\n continue\n else:\n for s in tnet.O:\n m.addConstr(m.getVarByName('x^'+str(s)+'_'+str(i)+'_'+str(j)) == 0)\n\n if rebalancing == True:\n [m.addVar(lb=0, name='x^R'+str(i)+'_'+str(j)) for i,j in tnet.G.edges()]\n else:\n [m.addVar(lb=0, ub=0, name='x^R' + str(i) + '_' + str(j)) for i, j in tnet.G.edges()]\n\n [m.addVar(name='e^'+str(l)+'_'+str(i)+'_'+str(j), \\\n lb=0)# ub=theta[l+1]-theta[l]) \\\n for i,j in tnet.G.edges() for l in range(n)]\n #[m.addVar(name='e^'+str(n+1)+'_'+str(i)+'_'+str(j), lb=0) for i,j in tnet.G.edges()]\n m.update()\n\n if bush==True:\n xu = {(i, j): quicksum(m.getVarByName('x^'+str(s)+'_'+str(i)+'_'+str(j)) for s in tnet.O) for i, j in tnet.G_supergraph.edges()}\n else:\n xu = {(i, j): quicksum(m.getVarByName('x^'+str(w)+'_'+str(i)+'_'+str(j)) for w, d in tnet.g.items()) for i, j in tnet.G_supergraph.edges()}\n\n # Set Obj\n obj = get_obj_CARSn(m, tnet, xu, theta, a, exogenous_G, linear=linear)\n\n # Set Constraints\n add_epsilon_cnstr(m, tnet, xu, n, theta, exogenous_G)\n m.update()\n #print(m.display())\n #pause\n add_demand_cnstr(m, tnet, xu, bush=bush)\n if rebalancing==True:\n add_rebalancing_cnstr(m, tnet, xu)\n\n add_bike_cnstr(m, tnet, xu)\n\n # Solve problem\n m.setObjective(obj, GRB.MINIMIZE)\n m.update()\n m.optimize()\n status = {2:'optimal', 3:'infeasible !', 4:'infeasible or unbounded !', 5:'unbounded', 6:'cutoff', 7:'time limit'}\n #print('solver stats: ' + status[GRB.OPTIMAL])\n\n # saving results\n set_optimal_flows(m, tnet, G_exogenous=exogenous_G, bush=bush)\n tnet.cars_obj = obj.getValue()\n if od_flows_flag==True:\n od_flows = get_OD_result_flows(m, tnet, bush=bush)\n return tnet, m.Runtime, od_flows\n else:\n return tnet, m.Runtime\n\n'''\ndef solve_CARS2(tnet, fcoeffs, exogenous_G=False, rebalancing=True):\n #TODO: add description\n fun = get_approx_fun(fcoeffs, range_=[0,2])\n #beta = get_beta(fun)\n #theta = get_theta(fun)\n #theta = [0.9, 1.5] #BPR EMA\n #beta = [1.0, 1.88] #BPR EMA\n #theta = [0.7, 1.2] #BPR NYC\n #beta = [0.5, 1.88] #BPR NYC\n theta = [0.7, 1.2] #estimated NYC\n beta = [0.5, 2.4] #estimated NYC\n #theta = [1.19, 1.2] #CARS\n #beta = [2.44, 2.45] #CARS\n\n #print(theta)\n #print(beta)\n\n Vt, Vd, Ve = set_CARS_par(tnet)\n\n # Set exogenous flow\n if exogenous_G == False:\n exogenous_G = tnet.G_supergraph.copy()\n for i,j in tnet.G_supergraph.edges():\n exogenous_G[i][j]['flow'] = 0\n\n # Start model\n m = Model('QP')\n m.setParam('OutputFlag',0)\n m.setParam('BarHomogeneous', 1)\n m.setParam('Method', 1)\n m.update()\n\n # Define variables\n [m.addVar(lb=0, name='x^'+str(w)+'_'+str(i)+'_'+str(j)) for i,j in tnet.G_supergraph.edges() for w in tnet.g.keys()]\n\n if rebalancing == True:\n [m.addVar(lb=0, name='x^R'+str(i)+'_'+str(j)) for i,j in tnet.G.edges()]\n else:\n [m.addVar(lb=0, ub=0, name='x^R' + str(i) + '_' + str(j)) for i, j in tnet.G.edges()]\n\n [m.addVar(lb=0, name='e^1_'+str(i)+'_'+str(j)) for i,j in tnet.G_supergraph.edges()]\n [m.addVar(lb=0, name='e^2_'+str(i)+'_'+str(j)) for i,j in tnet.G_supergraph.edges()]\n\n xu = {(i, j): quicksum(m.getVarByName('x^' + str(w) + '_' + str(i) + '_' + str(j)) for w, d in tnet.g.items()) for i, j in tnet.G_supergraph.edges()}\n\n m.update()\n\n # Set objective\n obj = 0\n\n for i,j in tnet.G_supergraph.edges():\n obj += Vt *tnet.G_supergraph[i][j]['t_0'] * quicksum(m.getVarByName('x^'+str(w)+'_'+str(i)+'_'+str(j)) for w in tnet.g.keys()) \\\n + Vt * (tnet.G_supergraph[i][j]['t_0'] * beta[0]/tnet.G_supergraph[i][j]['capacity']) * m.getVarByName('e^1_'+str(i)+'_'+str(j)) \\\n * (m.getVarByName('e^1_'+str(i)+'_'+str(j)) + theta[0]*tnet.G_supergraph[i][j]['capacity'] - exogenous_G[i][j]['flow']) \\\n + Vt * (tnet.G_supergraph[i][j]['t_0'] * beta[1]/tnet.G_supergraph[i][j]['capacity']) * m.getVarByName('e^2_'+str(i)+'_'+str(j)) \\\n * (m.getVarByName('e^2_'+str(i)+'_'+str(j)) + theta[1]*tnet.G_supergraph[i][j]['capacity'] - exogenous_G[i][j]['flow']) \\\n + Vt * (tnet.G_supergraph[i][j]['t_0'] * beta[0]/tnet.G_supergraph[i][j]['capacity'] * m.getVarByName('e^2_'+str(i)+'_'+str(j)) \\\n * (theta[1]*tnet.G_supergraph[i][j]['capacity'] - theta[0]*tnet.G_supergraph[i][j]['capacity']) )\n\n for i,j in tnet.G.edges():\n obj += (Vd * tnet.G_supergraph[i][j]['t_0'] + Ve * tnet.G_supergraph[i][j]['e']) * ( \\\n #sum(m.getVarByName('x^' + str(w) + '_' + str(i) + '_' + str(j)) for w in tnet.g.keys()) +\\\n m.getVarByName('x^R' + str(i) + '_' + str(j)))\n m.update()\n\n # Set Constraints\n add_demand_cnstr(m, tnet, xu)\n if rebalancing==True:\n add_rebalancing_cnstr(m,tnet, xu)\n for i,j in tnet.G.edges():\n m.addConstr(m.getVarByName('e^1_'+str(i)+'_'+str(j)) >= quicksum(m.getVarByName('x^' + str(w) + '_' + str(i) + '_' + str(j)) for w in tnet.g.keys()) + m.getVarByName('x^R'+str(i)+'_'+str(j)) + exogenous_G[i][j]['flow'] - theta[0]*tnet.G[i][j]['capacity'] - m.getVarByName('e^2_'+str(i)+'_'+str(j)))\n m.addConstr(m.getVarByName('e^2_'+str(i)+'_'+str(j)) >= quicksum(m.getVarByName('x^' + str(w) + '_' + str(i) + '_' + str(j)) for w in tnet.g.keys()) + m.getVarByName('x^R'+str(i)+'_'+str(j)) + exogenous_G[i][j]['flow'] - theta[1]*tnet.G[i][j]['capacity'])\n m.update()\n\n # Solve problem\n m.setObjective(obj, GRB.MINIMIZE)\n m.update()\n m.optimize()\n\n # saving results\n set_optimal_flows(m, tnet)\n tnet.cars_obj = obj.getValue()\n od_flows = get_OD_result_flows(m, tnet)\n return tnet, m.Runtime, od_flows\n\n\n\ndef solve_CARS(tnet, fcoeffs, exogenous_G=False, rebalancing=True, xa=0.01):\n fun = get_approx_fun(fcoeffs, xa=xa, nlines=2, range_=[0,2])\n beta = get_beta(fun)\n theta = get_theta(fun)\n\n #print(beta)\n #print(theta)\n\n Vt, Vd, Ve = set_CARS_par(tnet)\n # Set exogenous flow\n if exogenous_G == False:\n exogenous_G = tnet.G_supergraph.copy()\n for i,j in tnet.G_supergraph.edges():\n exogenous_G[i][j]['flow'] = 0\n\n m = Model('QP')\n m.setParam('OutputFlag',0)\n m.setParam('BarHomogeneous', 1)\n m.setParam('Method', 1)\n m.update()\n\n # Define variables\n [m.addVar(lb=0, name='x^'+str(w)+'_'+str(i)+'_'+str(j)) for i,j in tnet.G_supergraph.edges() for w in tnet.g.keys()]\n\n if rebalancing == True:\n [m.addVar(lb=0, name='x^R'+str(i)+'_'+str(j)) for i,j in tnet.G.edges()]\n else:\n [m.addVar(lb=0, ub=0, name='x^R' + str(i) + '_' + str(j)) for i, j in tnet.G.edges()]\n\n [m.addVar(lb=0, name='e^1_'+str(i)+'_'+str(j)) for i,j in tnet.G_supergraph.edges()]\n m.update()\n\n xu = {(i, j): quicksum(m.getVarByName('x^' + str(w) + '_' + str(i) + '_' + str(j)) for w, d in tnet.g.items()) for\n i, j in tnet.G_supergraph.edges()}\n\n # Set objective\n obj = 0\n for i,j in tnet.G_supergraph.edges():\n for w in tnet.g.keys():\n obj += Vt * tnet.G_supergraph[i][j]['t_0'] * m.getVarByName('x^'+str(w)+'_'+str(i)+'_'+str(j))\n obj += Vt * (tnet.G_supergraph[i][j]['t_0'] * beta[0]/tnet.G_supergraph[i][j]['capacity']) * m.getVarByName('e^1_'+str(i)+'_'+str(j)) \\\n * (m.getVarByName('e^1_'+str(i)+'_'+str(j)) + theta[0]*tnet.G_supergraph[i][j]['capacity'] - exogenous_G[i][j]['flow'])\n for i,j in tnet.G.edges():\n obj += (Vd * tnet.G_supergraph[i][j]['t_0'] + Ve * tnet.G_supergraph[i][j]['e']) * ( \\\n #sum(m.getVarByName('x^'+str(w)+'_'+str(i)+'_'+str(j)) for w in tnet.g.keys()) \\\n m.getVarByName('x^R'+str(i)+'_'+str(j)))\n m.update()\n\n # Set Constraints\n add_demand_cnstr(m, tnet, xu)\n if rebalancing==True:\n add_rebalancing_cnstr(m,tnet, xu)\n\n for i,j in tnet.G.edges():\n m.addConstr(m.getVarByName('e^1_'+str(i)+'_'+str(j)) >= quicksum(m.getVarByName('x^' + str(w) + '_' + str(i) + '_' + str(j)) for w in tnet.g.keys()) + m.getVarByName('x^R'+str(i)+'_'+str(j)) + exogenous_G[i][j]['flow'] - theta[0]* tnet.G[i][j]['capacity'])\n m.update()\n\n m.setObjective(obj, GRB.MINIMIZE)\n m.update()\n m.optimize()\n # saving results\n set_optimal_flows(m, tnet)\n tnet.cars_obj = obj.getValue()\n return tnet, m.Runtime\n\n\n\ndef get_obj_CARSn(m, tnet, xu, theta, a, exogenous_G, linear=True, userCentric=False):\n if userCentric != True:\n if linear == True:\n Vt, Vd, Ve = set_CARS_par(tnet)\n obj = quicksum(Vt * tnet.G_supergraph[i][j]['t_0'] * xu[(i,j)] for i,j in tnet.G_supergraph.edges())\n for i,j in tnet.G_supergraph.edges():\n t0 = tnet.G_supergraph[i][j]['t_0']\n for l in range(len(theta)-1):\n mij = tnet.G_supergraph[i][j]['capacity']\n ue = exogenous_G[i][j]['flow']\n Vtt_0al = Vt * t0 * a[l]/mij\n e_l = m.getVarByName('e^'+str(l)+'_'+str(i)+'_'+str(j))\n obj += quicksum(Vtt_0al * e_l * ((theta[k + 1] - theta[k]) * mij - ue) for k in range(l))\n obj += Vtt_0al * (e_l*(theta[l+1]-theta[l])*mij - e_l*ue)\n obj += quicksum(Vtt_0al * ((theta[l+1] - theta[l])*mij * m.getVarByName('e^'+str(k)+'_'+str(i)+'_'+str(j)) - e_l*ue) for k in range(l+1, len(theta)-1))\n else:\n Vt, Vd, Ve = set_CARS_par(tnet)\n obj = quicksum(Vt * tnet.G_supergraph[i][j]['t_0'] * xu[(i,j)] for i,j in tnet.G_supergraph.edges())\n for i,j in tnet.G_supergraph.edges():\n t0 = tnet.G_supergraph[i][j]['t_0']\n for l in range(len(theta)-1):\n mij = tnet.G_supergraph[i][j]['capacity']\n ue = exogenous_G[i][j]['flow']\n Vtt_0al = Vt * t0 * a[l]/mij\n e_l = m.getVarByName('e^'+str(l)+'_'+str(i)+'_'+str(j))\n obj += quicksum(Vtt_0al * e_l * ((theta[k + 1] - theta[k]) * mij - ue) for k in range(l))\n obj += Vtt_0al * (e_l * e_l - e_l * ue)\n obj += quicksum(Vtt_0al * ((theta[l+1] - theta[l])*mij * m.getVarByName('e^'+str(k)+'_'+str(i)+'_'+str(j)) - e_l*ue) for k in range(l+1, len(theta)-1))\n else:\n #if linear == True:\n Vt, Vd, Ve = set_CARS_par(tnet)\n obj = 0\n obj = quicksum(Vt * tnet.G_supergraph[i][j]['t_0'] for i,j in tnet.G_supergraph.edges())\n for i,j in tnet.G_supergraph.edges():\n t0 = tnet.G_supergraph[i][j]['t_0']\n mij = tnet.G_supergraph[i][j]['capacity']\n ue = exogenous_G[i][j]['flow']\n for l in range(len(theta)-1):\n Vtt_0al = Vt * t0 * a[l] / mij\n e_l = m.getVarByName('e^' + str(l) + '_' + str(i) + '_' + str(j))\n obj += quicksum(Vtt_0al * e_l / ((theta[k + 1] - theta[k]) * mij - ue) for k in range(l))\n obj += Vtt_0al * (1 - e_l / ue)#Vtt_0al * (e_l * e_l - e_l * ue)\n obj += quicksum(Vtt_0al * ((theta[l + 1] - theta[l]) * mij / m.getVarByName(\n 'e^' + str(k) + '_' + str(i) + '_' + str(j)) - e_l * ue) for k in range(l + 1, len(theta) - 1))\n\n\n obj += quicksum((Vd * tnet.G_supergraph[i][j]['t_0'] + Ve * tnet.G_supergraph[i][j]['e']) * m.getVarByName('x^R' + str(i) + '_' + str(j)) for i,j in tnet.G_supergraph.edges())\n return obj\n'''\n\n\ndef get_CARS_obj_val(tnet, G_exogenous):\n Vt, Vd, Ve = set_CARS_par(tnet)\n tt = get_totalTravelTime_without_Rebalancing(tnet, G_exogenous=G_exogenous)\n reb = get_rebalancing_total_cost(tnet)\n obj = Vt * tt + reb\n return obj\n\ndef get_rebalancing_total_cost(tnet):\n Vt, Vd, Ve = set_CARS_par(tnet)\n reb = get_rebalancing_flow(tnet)\n obj = sum((Vd * tnet.G_supergraph[i][j]['t_0'] + Ve * tnet.G_supergraph[i][j]['e']) * tnet.G_supergraph[i][j]['flowRebalancing'] for i,j in tnet.G_supergraph.edges())\n return obj\n\n@timeit\ndef get_OD_result_flows(m, tnet, bush=False):\n dic = {}\n if bush==True:\n for s in tnet.O:\n dic[s] = {}\n for i,j in tnet.G_supergraph.edges():\n dic[s][(i,j)] = m.getVarByName('x^' + str(s) + '_' + str(i) + '_' + str(j)).X\n else:\n for w in tnet.g.keys():\n dic[w] = {}\n for i,j in tnet.G_supergraph.edges():\n dic[w][(i,j)] = m.getVarByName('x^' + str(w) + '_' + str(i) + '_' + str(j)).X\n return dic\n\n\ndef solve_rebalancing(tnet, exogenous_G=0):\n Vt, Vd, Ve = set_CARS_par(tnet)\n # Set exogenous flow\n if exogenous_G == 0:\n exogenous_G = tnet.G.copy()\n for i, j in tnet.G.edges():\n exogenous_G[i][j]['flow'] = 0\n\n m = Model('QP')\n m.setParam('OutputFlag', 1)\n m.setParam('BarHomogeneous', 0)\n m.setParam('Method', 1)\n m.update()\n\n # Define variables\n [m.addVar(lb=0, name='x^R' + str(i) + '_' + str(j)) for i, j in tnet.G.edges()]\n m.update()\n\n # Set objective\n #obj = quicksum((Vd * tnet.G[i][j]['t_0'] + Ve * tnet.G[i][j]['e']) * m.getVarByName('x^R' + str(i) + '_' + str(j)) for i,j in tnet.G.edges())\n obj = quicksum((Vt * tnet.G[i][j]['t_k']) * m.getVarByName('x^R' + str(i) + '_' + str(j)) for i, j in\n tnet.G.edges())\n m.update()\n\n # Set Constraints\n for j in tnet.G.nodes():\n m.addConstr(quicksum(m.getVarByName('x^R' + str(i) + '_' + str(j)) + tnet.G[i][l]['flow'] for i, l in\n tnet.G.in_edges(nbunch=j)) \\\n == quicksum(m.getVarByName('x^R' + str(j) + '_' + str(k)) + tnet.G[j][k]['flow'] for l, k in\n tnet.G.out_edges(nbunch=j)))\n m.update()\n m.update()\n\n m.setObjective(obj, GRB.MINIMIZE)\n m.update()\n m.optimize()\n # saving results\n set_optimal_rebalancing_flows(m,tnet)\n return m.Runtime\n\ndef get_totalTravelTime_approx(tnet, fcoeffs, xa):\n fun = get_approx_fun(fcoeffs, xa=xa, nlines=2)\n beta = get_beta(fun)\n theta = get_theta(fun)\n print(beta)\n obj=0\n for i,j in tnet.G_supergraph.edges():\n if tnet.G_supergraph[i][j]['flow']/tnet.G_supergraph[i][j]['capacity'] <= xa:\n obj += tnet.G_supergraph[i][j]['flow'] * tnet.G_supergraph[i][j]['t_0']\n else:\n obj += tnet.G_supergraph[i][j]['flow'] * \\\n (tnet.G_supergraph[i][j]['t_0'] + (beta[0] *tnet.G_supergraph[i][j]['flow'] /tnet.G_supergraph[i][j]['capacity']))\n return obj\n\n\ndef travel_time(tnet, i, j, G_exo=False):\n \"\"\"\n evalute the travel time function for edge i->j\n\n Parameters\n ----------\n tnet: transportation network object\n i: starting node of edge\n j: ending node of edge\n\n Returns\n -------\n float\n\n \"\"\"\n if G_exo == False:\n return sum(\n [tnet.fcoeffs[n] * (tnet.G_supergraph[i][j]['flow'] / tnet.G_supergraph[i][j]['capacity']) ** n for n in\n range(len(tnet.fcoeffs))])\n else:\n return sum([tnet.fcoeffs[n] * ((tnet.G_supergraph[i][j]['flow'] + G_exo[i][j]['flow'])/ tnet.G_supergraph[i][j]['capacity']) ** n for n in range(len(tnet.fcoeffs))])\n\n\ndef get_totalTravelTime(tnet, G_exogenous=False):\n \"\"\"\n evalute the travel time function on the SuperGraph level\n\n Parameters\n ----------\n\n tnet: transportation network object\n\n Returns\n -------\n float\n\n \"\"\"\n if G_exogenous == False:\n return sum([tnet.G_supergraph[i][j]['flow'] * tnet.G_supergraph[i][j]['t_0'] * travel_time(tnet, i, j) for i, j in tnet.G_supergraph.edges()])\n else:\n ret = 0\n for i,j in tnet.G_supergraph.edges():\n if isinstance(tnet.G_supergraph[i][j]['type'], float)==True:\n ret += tnet.G_supergraph[i][j]['flow'] * tnet.G_supergraph[i][j]['t_0'] * travel_time_without_Rebalancing(tnet, i, j, G_exogenous[i][j]['flow'])\n else:\n ret += tnet.G_supergraph[i][j]['flow'] * tnet.G_supergraph[i][j]['t_0'] * travel_time_without_Rebalancing(tnet, i, j)\n return ret\n\n\ndef travel_time_without_Rebalancing(tnet, i, j, exo=0):\n \"\"\"\n evalute the travel time function for edge i->j\n\n Parameters\n ----------\n tnet: transportation network object\n i: starting node of edge\n j: ending node of edge\n\n Returns\n -------\n float\n\n \"\"\"\n return sum(\n [tnet.fcoeffs[n] * ((tnet.G_supergraph[i][j]['flowNoRebalancing'] +exo )/ tnet.G_supergraph[i][j]['capacity']) ** n for n in range(len(tnet.fcoeffs))])\n\ndef get_totalTravelTime_without_Rebalancing(tnet, G_exogenous=False):\n \"\"\"\n evalute the travel time function on the SuperGraph level\n\n Parameters\n ----------\n\n tnet: transportation network object\n\n Returns\n -------\n float\n\n \"\"\"\n if G_exogenous==False:\n return sum([tnet.G_supergraph[i][j]['flowNoRebalancing'] * tnet.G_supergraph[i][j][\n 't_0'] * travel_time(tnet, i, j) for i, j in\n tnet.G_supergraph.edges()])\n else:\n return sum([tnet.G_supergraph[i][j]['flowNoRebalancing'] * tnet.G_supergraph[i][j][\n 't_0'] * travel_time(tnet, i, j, G_exo=G_exogenous) for i, j in\n tnet.G_supergraph.edges()])\n\n\n\ndef get_pedestrian_flow(tnet):\n \"\"\"\n get pedestrian flow in a supergraph\n\n Parameters\n ----------\n\n tnet: transportation network object\n\n Returns\n -------\n float\n\n \"\"\"\n return sum([tnet.G_supergraph[i][j]['flow']*tnet.G_supergraph[i][j]['length'] for i,j in tnet.G_supergraph.edges() if tnet.G_supergraph[i][j]['type']=='p'])\n\n\ndef get_layer_flow(tnet, symb=\"'\"):\n \"\"\"\n get flow in a layer of supergraph\n\n Parameters\n ----------\n\n tnet: transportation network object\n\n Returns\n -------\n float\n\n \"\"\"\n return sum([tnet.G_supergraph[i][j]['flow']*tnet.G_supergraph[i][j]['length'] for i,j in tnet.G_supergraph.edges() if tnet.G_supergraph[i][j]['type']==symb])\n\n\ndef get_amod_flow(tnet):\n \"\"\"\n get amod flow in a supergraph\n\n Parameters\n ----------\n\n tnet: transportation network object\n\n Returns\n -------\n float\n\n \"\"\"\n return sum([tnet.G_supergraph[i][j]['flowNoRebalancing']*tnet.G_supergraph[i][j]['length'] for i,j in tnet.G.edges()])\n\ndef get_rebalancing_flow(tnet):\n \"\"\"\n get rebalancing flow in a supergraph\n\n Parameters\n ----------\n\n tnet: transportation network object\n\n Returns\n -------\n float\n\n \"\"\"\n return sum([(tnet.G_supergraph[i][j]['flow']-tnet.G_supergraph[i][j]['flowNoRebalancing'])*tnet.G_supergraph[i][j]['length'] for i,j in tnet.G.edges()])\n\n\ndef UC_fcoeffs(fcoeffs):\n f = [fcoeffs[i]/(i+1) for i in range(len(fcoeffs))]\n return f\n\ndef plot_supergraph_car_flows(tnet, weight='flow', width=3, cmap=plt.cm.Blues):\n #TODO: add explaination\n fig, ax = plt.subplots()\n pos = nx.get_node_attributes(tnet.G, 'pos')\n d = {(i,j): tnet.G_supergraph[i][j][weight] for i,j in tnet.G.edges()}\n edges, weights = zip(*d.items())\n labels = {(i,j): int(tnet.G_supergraph[i][j][weight]) for i,j in tnet.G.edges()}\n nx.draw(tnet.G, pos, node_color='b', edgelist=edges, edge_color=weights, width=width, edge_cmap=cmap)\n nx.draw_networkx_edge_labels(tnet.G, pos=pos, edge_labels=labels)\n return fig, ax\n\ndef plot_supergraph_pedestrian_flows(G, weight='flow', width=3, cmap=plt.cm.Blues):\n\t#TODO: add explaination\n\tfig, ax = plt.subplots()\n\tpos = nx.get_node_attributes(G, 'pos')\n\tedges, weights = zip(*nx.get_edge_attributes(G, weight).items())\n\tnx.draw(G, pos, node_color='b', edgelist=edges, edge_color=weights, width=width, edge_cmap=cmap)\n\treturn fig, ax\n\ndef supergraph2G(tnet):\n # TODO: add explaination\n tnet.G = tnet.G_supergraph.subgraph(list([i for i in tnet.G.nodes()]))\n\ndef G2supergraph(tnet):\n # TODO: add explaination\n #tnet.G_supergraph = tnet.G\n for i,j in tnet.G_supergraph.edges():\n try:\n tnet.G_supergraph[i][j]['flow'] = tnet.G[i][j]['flow']\n except:\n tnet.G_supergraph[i][j]['flow'] = 0\n tnet.G_supergraph[i][j]['flowNoRebalancing'] = tnet.G_supergraph[i][j]['flow']\n\ndef add_G_flows_no_rebalancing(array):\n # TODO: add description\n\n G = array[0].copy()\n for tn in array[1:]:\n for i, j in G.edges():\n G[i][j]['flow'] += tn[i][j]['flowNoRebalancing']\n return G\n\n\ndef solveMSAsocialCARS(tnet, exogenous_G=False):\n runtime = time.process_time()\n if exogenous_G == False:\n tnet.solveMSAsocial_supergraph()\n else:\n tnet.solveMSAsocial_supergraph(exogenous_G=exogenous_G)\n t = time.process_time() - runtime\n G2supergraph(tnet)\n return t, tnet.TAP.RG\n\n\ndef nx2json(G, fname, exo=False):\n if exo==False:\n D = G.copy()\n for i,j in D.edges():\n D[i][j]['flow'] = 0\n with open(fname, 'w') as outfile1:\n outfile1.write(json.dumps(json_graph.node_link_data(D)))\n else:\n with open(fname, 'w') as outfile1:\n outfile1.write(json.dumps(json_graph.node_link_data(exo)))\n\n'''\ndef solve_social_Julia(tnet, exogenous_G=False):\n\n # Save to json files\n nx.write_gml(tnet.G, \"tmp/G.txt\")\n nx.write_graphml(tnet.G_supergraph, \"tmp/G_supergraph.txt\")\n if exogenous_G != False:\n nx.write_graphml(exogenous_G, \"tmp/exogenous_G.txt\")\n \n f = open(\"tmp/g.txt\",\"w\")\n f.write( str(tnet.g) )\n f.close()\n\n f = open(\"tmp/fcoeffs.txt\",\"w\")\n f.write( str(tnet.fcoeffs) )\n f.close()\n'''\n\n\ndef juliaJson2nx(tnet, dict, exogenous_G=False):\n d = {}\n for key in dict.keys():\n orig, dest = key.split(',')\n orig = orig.split(\"(\")[1].replace('\"', '').replace(' ', '')\n dest = dest.split(\")\")[0].replace('\"', '').replace(' ', '')\n if \"'\" in orig:\n s = orig\n else:\n s = int(orig)\n if \"'\" in dest:\n t = dest\n else:\n t = int(dest)\n tnet.G_supergraph[s][t]['flow'] = dict[key]['flow']\n tnet.G_supergraph[s][t]['flowNoRebalancing'] = dict[key]['flow']\n if exogenous_G==False:\n tnet.G_supergraph[s][t]['t_k'] = travel_time(tnet,s,t, G_exo=exogenous_G)\n else:\n tnet.G_supergraph[s][t]['t_k'] = travel_time(tnet, s, t, G_exo=exogenous_G)\n\n\ndef solve_social_Julia(tnet, exogenous_G=False):\n # Save to json files\n nx2json(tnet.G, \"tmp/G.json\")\n nx2json(tnet.G_supergraph, \"tmp/G_supergraph.json\")\n nx2json(tnet.G_supergraph, \"tmp/exogenous_G.json\", exo=exogenous_G)\n\n js = json.dumps({str(k):v for k,v in tnet.g.items()})\n f = open(\"tmp/g.json\", \"w\")\n f.write(js)\n f.close()\n\n f = open(\"tmp/fcoeffs.json\", \"w\")\n f.write(str(tnet.fcoeffs))\n f.close()\n\n # Solve system-centric in julia\n shell(\"julia src/CARS.jl\", printOut=True)\n\n # Parse results back\n dict_G = json2dict(\"tmp/out.json\")\n juliaJson2nx(tnet, dict_G, exogenous_G=exogenous_G)\n # Get solve time\n f = open('tmp/solvetime.txt')\n line = f.readline()\n f.close()\n solvetime = float(line)\n shell(\"rm tmp/out.json\", printOut=False)\n shell(\"rm tmp/G.json\", printOut=False)\n shell(\"rm tmp/G_supergraph.json\", printOut=False)\n shell(\"rm tmp/exogenous_G.json\", printOut=False)\n shell(\"rm tmp/g.jsonn\", printOut=False)\n shell(\"rm tmp/fcoeffs.json\", printOut=False)\n shell(\"rm tmp/solvetime.txt\", printOut=False)\n\n return solvetime\n #TODO: add delete funtion of out json\n\n\n\n\ndef solve_social_altruistic_Julia(tnet, exogenous_G=False):\n # Save to json files\n nx2json(tnet.G, \"tmp/G.json\")\n nx2json(tnet.G_supergraph, \"tmp/G_supergraph.json\")\n if exogenous_G != False:\n nx2json(exogenous_G, \"tmp/exogenous_G.json\", exo=True)\n else:\n nx2json(tnet.G, \"tmp/exogenous_G.json\", exo=False)\n\n js = json.dumps({str(k):v for k,v in tnet.g.items()})\n f = open(\"tmp/g.json\", \"w\")\n f.write(js)\n f.close()\n\n f = open(\"tmp/fcoeffs.json\", \"w\")\n f.write(str(tnet.fcoeffs))\n f.close()\n\n # Solve system-centric in julia\n shell(\"julia src/CARS_altruistic.jl\", printOut=False)\n\n # Parse results back\n dict_G = json2dict(\"tmp/out.json\")\n juliaJson2nx(tnet, dict_G)\n\n\n\n'''\nimport cvxpy as cp\ndef solve_social_NLP(tnet, exogenous_G=False):\n\n # Build variables\n xc = {}\n for i,j in tnet.G_supergraph.edges():\n xc[(i,j)] = cp.Variable(name='xc('+str(i) + ','+ str(j) + \")\")\n\n # objective\n if exogenous_G != False:\n obj = 0\n for i,j in tnet.G_supergraph.edges():\n for n in range(len(tnet.fcoeffs)):\n obj += tnet.G_supergraph[i][j]['t_0']*xc[(i,j)]*tnet.fcoeffs[n]*cp.power(xc[(i,j)]+exogenous_G[i][j], n)\n else:\n obj = 0\n for i,j in tnet.G_supergraph.edges():\n for n in range(len(tnet.fcoeffs)):\n obj += tnet.G_supergraph[i][j]['t_0']*xc[(i,j)]*tnet.fcoeffs[n]*cp.power(xc[(i,j)], n)\n\n cp.Minimize(obj)\n # constraints\n\n\n cp.Problem(cp.Minimize(obj)).solve(verbose=True)\n\n print(xc.values())\n\n'''\n\ndef hist_flows(G, G_exo=True):\n if G_exo:\n norm_flows = [(G[i][j]['flow'] + G_exo[i][j]['flow']) / G[i][j]['capacity'] for i,j in G.edges()]\n else:\n norm_flows = [G[i][j]['flow'] / G[i][j]['capacity'] for i, j in G.edges()]\n #_ = plt.hist(norm_flows, bins='auto')\n #count, bins = np.histogram(norm_flows, bins=5)\n #print('bins:' + str(bins))\n print('max flow:' + str(round(max(norm_flows),2)))\n\n #fig, axs = plt.subplots(1, 1)\n #axs[0].hist(norm_flows, bins=5)\n #plt.show()\n",
"import src.tnet as tnet\nimport src.CARS as cars\nfrom src.utils import *\nimport numpy as np\nimport copy\nimport matplotlib as mpl\nfrom matplotlib import rc\nimport matplotlib.pyplot as plt\nrc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nrc('text', usetex=True)\n\n\n\n#netFile, gFile, fcoeffs, tstamp, dir_out = tnet.get_network_parameters('Braess1')\nnetFile, gFile, fcoeffs, tstamp, dir_out = tnet.get_network_parameters('EMA', experiment_name='EMA_penRate_CARS')\n#netFile, gFile, fcoeffs, tstamp, dir_out = tnet.get_network_parameters('NYC_small')\n#netFile, gFile, fcoeffs, tstamp, dir_out = tnet.get_network_parameters('NYC_Uber_small', experiment_name='NYC_Uber_small_penRate_comparison')\nxa = 0.8\n\n\n\n\nprint('Penetration Rate Experiment')\nprint('------------------------------------------------------------------------------------')\nprint('PenRate \\t AMoD \\t Private \\t Total')\nprint('------------------------------------------------------------------------------------')\ncavsCost = []\nnoncavsCost = []\ntotCost = []\ncavsFlow = []\nnonCavsFlow = []\npedestrianFlow = []\nrebalancingFlow = []\nfor penetration_rate in np.linspace(0.01,0.99, 11):\n tNet_cavs = tnet.tNet(netFile=netFile, gFile=gFile, fcoeffs=fcoeffs)\n tNet_non_cavs = copy.deepcopy(tNet_cavs)\n g_cavs = tnet.perturbDemandConstant(tNet_cavs.g, constant=penetration_rate/37)\n g_non_cavs = tnet.perturbDemandConstant(tNet_cavs.g, constant=(1-penetration_rate)/37)\n tNet_cavs.set_g(g_cavs)\n tNet_non_cavs.set_g(g_non_cavs)\n tNet_cavs.build_supergraph()\n tNet_non_cavs.build_supergraph()\n\n it = []\n for i in range(7):\n if i==0:\n #tNet_non_cavs.solveMSA()\n cars.solve_CARS2(tNet_cavs, exogenous_G=False, fcoeffs=fcoeffs, rebalancing=False)\n # cars.solve_CARS(tNet_cavs, exogenous_G=False, fcoeffs=fcoeffs, xa=xa, rebalancing=False)\n else:\n #tNet_non_cavs.solveMSA(exogenous_G=tNet_cavs.G_supergraph)\n cars.solve_CARS2(tNet_cavs, exogenous_G=tNet_non_cavs.G_supergraph, fcoeffs=fcoeffs, rebalancing=False)\n #cars.solve_CARS(tNet_cavs, exogenous_G=tNet_non_cavs.G_supergraph, fcoeffs=fcoeffs, xa=xa, rebalancing=False)\n\n cars.supergraph2G(tNet_cavs)\n tNet_non_cavs.solveMSA(exogenous_G=tNet_cavs.G_supergraph)\n cars.G2supergraph(tNet_non_cavs)\n\n totalCost = tnet.get_totalTravelTime(tNet_non_cavs.G, fcoeffs, G_exogenous=tNet_cavs.G) + cars.get_totalTravelTime_without_Rebalancing(tNet_cavs, G_exogenous=tNet_non_cavs.G)\n it.append(totalCost)\n\n if penetration_rate>.48 and penetration_rate < 0.52:\n mkdir_n('results/' + dir_out)\n plt.figure(num=None, figsize=(3.8, 5))\n plt.plot(it)\n plt.xlabel('Iteration')\n plt.ylabel('Total Objective')\n plt.tight_layout()\n plt.savefig('results/' + dir_out + '/iteration.png')\n\n\n\n cavCost = cars.get_totalTravelTime_without_Rebalancing(tnet=tNet_cavs, G_exogenous=tNet_non_cavs.G_supergraph)\n nonCavCost = tnet.get_totalTravelTime(tNet_non_cavs.G_supergraph, fcoeffs=fcoeffs, G_exogenous=tNet_cavs.G_supergraph)\n total_Cost = cavCost+nonCavCost\n\n cavsCost.append(cavCost/tNet_cavs.totalDemand*60)\n noncavsCost.append(nonCavCost/tNet_non_cavs.totalDemand*60)\n totCost.append(total_Cost/(tNet_cavs.totalDemand+tNet_non_cavs.totalDemand)*60) #TODO: calculate the cost with rebalancing\n\n print(str(round(penetration_rate, 2)) + '\\t' + str(round(cavCost/tNet_cavs.totalDemand*60, 2)) + '\\t' + str(\n round(nonCavCost/tNet_non_cavs.totalDemand*60, 2)) + '\\t' + str(round(total_Cost/(tNet_cavs.totalDemand+tNet_non_cavs.totalDemand)*60, 2)))\n\n cavsFlow.append(cars.get_amod_flow(tNet_cavs))\n nonCavsFlow.append(tnet.get_total_G_flow(tNet_non_cavs.G))\n pedestrianFlow.append(cars.get_pedestrian_flow(tNet_cavs))\n rebalancingFlow.append(cars.get_rebalancing_flow(tNet_cavs))\n\n del tNet_cavs, tNet_non_cavs\n\n\nmkdir_n('results/' + dir_out)\nmpl.rc('font',**{'family':'Times New Roman', 'size': 12})\nplt.figure(num=None, figsize=(3.8, 5))\nplt.plot(list(np.linspace(0.0001,1, 11)), totCost, label='Total')\nplt.plot(list(np.linspace(0.0001,1, 11)), cavsCost, label='AMoD')\nplt.plot(list(np.linspace(0.0001,1, 11)), noncavsCost, label='Private Vehicles')\nplt.legend()\nplt.xlabel('Penetration Rate')\nplt.ylabel('Avg. Travel Time (min)')\nplt.tight_layout()\nplt.savefig('results/' + dir_out +'/costs.png')\n\n\nplt.figure(num=None, figsize=(5, 2.8))\nwidth = 0.5\nind = np.arange(11)\np1 = plt.bar(ind, nonCavsFlow, width)\np2 = plt.bar(ind, cavsFlow, width,\n bottom=nonCavsFlow)\np3 = plt.bar(ind, rebalancingFlow, width,\n bottom=[cavsFlow[i] + nonCavsFlow[i] for i in range(len(cavsFlow))])\np4 = plt.bar(ind, pedestrianFlow, width,\n bottom=[cavsFlow[i] + nonCavsFlow[i] + rebalancingFlow[i] for i in range(len(cavsFlow))])\nplt.ylabel('Flow')\nplt.xlabel('Penetration Rate')\nplt.legend((p1[0], p2[0], p3[0], p4[0]), ('Private Vehicles', 'AMoDs', 'Rebalancing', 'Pedestrian'))\nplt.tight_layout()\n\n#plt.show()\n\n\nplt.savefig('results/' + dir_out +'/flow_composition.png')\nplt.show()\n\n\n#TODO: check social solution and if they are solving for their own fleet"
] | [
[
"matplotlib.pyplot.tight_layout",
"pandas.read_csv",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.savefig"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"numpy.linspace",
"numpy.arange",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.rc",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FedePeralta/ASVs_Deep_Reinforcement_Learning_with_CNNs | [
"23b9b181499a4b06f2ca2951c002359c1959e727"
] | [
"utils/Paralell_Experience_Generator.py"
] | [
"import random\nimport torch\nimport sys\nfrom contextlib import closing\nfrom torch.multiprocessing import Pool\nfrom random import randint\nfrom exploration_strategies.OUNoise import OrnsteinUhlenbeckActionNoise\n\n\nclass Parallel_Experience_Generator(object):\n \"\"\" Plays n episode in parallel using a fixed agent. \"\"\"\n\n def __init__(self, environment, policy, seed, hyperparameters, action_size, use_GPU=False, action_choice_output_columns=None):\n self.use_GPU = use_GPU\n self.environment = environment\n self.policy = policy\n self.action_choice_output_columns = action_choice_output_columns\n self.hyperparameters = hyperparameters\n self.noise = OrnsteinUhlenbeckActionNoise(mu=[0 for _ in range(self.environment.action_shape[1])],\n sigma=0.15,\n theta=.01,\n dt=1e-2,\n seed=seed)\n\n def play_n_episodes(self, n):\n \"\"\"Plays n episodes in parallel using the fixed policy and returns the data\"\"\"\n\n with closing(Pool(processes=n)) as pool:\n results = pool.map(self, range(n))\n pool.terminate()\n\n states_for_all_episodes = [episode[0] for episode in results]\n actions_for_all_episodes = [episode[1] for episode in results]\n rewards_for_all_episodes = [episode[2] for episode in results]\n\n return states_for_all_episodes, actions_for_all_episodes, rewards_for_all_episodes\n\n def play_1_episode(self, epsilon_exploration):\n \"\"\"Plays 1 episode using the fixed policy and returns the data\"\"\"\n\n state = self.reset_game()\n done = False\n episode_states = []\n episode_actions = []\n episode_rewards = []\n while not done:\n action = self.pick_action(self.policy, state)\n next_state, reward, done, _ = self.environment.step(action)\n episode_states.append(state)\n episode_actions.append(action)\n episode_rewards.append(reward)\n state = next_state\n return episode_states, episode_actions, episode_rewards\n\n def reset_game(self):\n \"\"\"Resets the game environment so it is ready to play a new episode\"\"\"\n seed = randint(0, sys.maxsize)\n torch.manual_seed(seed) # Need to do this otherwise each worker generates same experience\n state = self.environment.reset()\n return state\n\n def pick_action(self, policy, state):\n\n state = torch.from_numpy(state).float().unsqueeze(0)\n actor_output = policy(state)\n\n if self.action_choice_output_columns is not None:\n actor_output = actor_output[:, self.action_choice_output_columns]\n\n action_distribution = self.create_distributions(policy, self.environment.action_size)\n action = action_distribution.sample().cpu()\n\n action += torch.Tensor(self.noise())\n\n return action.detach().numpy()\n\n @staticmethod\n def create_distributions(policy_output, number_of_actions):\n\n means = policy_output[:, :number_of_actions].squeeze(0)\n stds = policy_output[:, number_of_actions:].squeeze(0)\n\n action_distribution = torch.distributions.normal.Normal(means.squeeze(0), torch.abs(stds))\n\n return action_distribution"
] | [
[
"torch.manual_seed",
"torch.abs",
"torch.from_numpy",
"torch.multiprocessing.Pool"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AbdulHoffmann/carla_carissma | [
"9444dce96954c546333d5aecc92a06c3bfd19aa5",
"9444dce96954c546333d5aecc92a06c3bfd19aa5",
"9444dce96954c546333d5aecc92a06c3bfd19aa5",
"8d382769ffa02a6c61a22c57160285505f5ff0a4",
"8d382769ffa02a6c61a22c57160285505f5ff0a4",
"8d382769ffa02a6c61a22c57160285505f5ff0a4",
"8d382769ffa02a6c61a22c57160285505f5ff0a4",
"8d382769ffa02a6c61a22c57160285505f5ff0a4",
"8d382769ffa02a6c61a22c57160285505f5ff0a4",
"9444dce96954c546333d5aecc92a06c3bfd19aa5",
"8d382769ffa02a6c61a22c57160285505f5ff0a4",
"9444dce96954c546333d5aecc92a06c3bfd19aa5",
"8d382769ffa02a6c61a22c57160285505f5ff0a4",
"9444dce96954c546333d5aecc92a06c3bfd19aa5",
"8d382769ffa02a6c61a22c57160285505f5ff0a4"
] | [
"PythonAPI/carissma_project/lib/python3.5/site-packages/mpl_toolkits/axes_grid/__init__.py",
"PythonAPI/carissma_project/lib/python3.5/site-packages/matplotlib/tri/tripcolor.py",
"PythonAPI/carissma_project/lib/python3.5/site-packages/matplotlib/tests/test_style.py",
"PythonAPI/carissma_project/lib/python3.5/site-packages/pandas/tests/indexes/timedeltas/test_setops.py",
"PythonAPI/carissma_project/PID_apply_static_sp.py",
"PythonAPI/carissma_project/lib/python3.5/site-packages/pandas/tests/indexes/timedeltas/test_timedelta.py",
"PythonAPI/carissma_project/lib/python3.5/site-packages/pandas/util/_test_decorators.py",
"PythonAPI/carissma_project/lib/python3.5/site-packages/pandas/tests/indexing/test_indexing.py",
"PythonAPI/carissma_project/lib/python3.5/site-packages/pandas/tests/io/parser/test_parse_dates.py",
"PythonAPI/carissma_project/lib/python3.5/site-packages/matplotlib/bezier.py",
"PythonAPI/carissma_project/lib/python3.5/site-packages/pandas/tests/arrays/categorical/test_repr.py",
"PythonAPI/carissma_project/lib/python3.5/site-packages/matplotlib/tests/test_backend_svg.py",
"PythonAPI/carissma_project/lib/python3.5/site-packages/pandas/tests/indexes/timedeltas/test_timedelta_range.py",
"PythonAPI/carissma_project/lib/python3.5/site-packages/matplotlib/tests/test_colorbar.py",
"PythonAPI/carissma_project/lib/python3.5/site-packages/pandas/plotting/_misc.py"
] | [
"from . import axes_size as Size\nfrom .axes_divider import Divider, SubplotDivider, LocatableAxes, \\\n make_axes_locatable\nfrom .axes_grid import Grid, ImageGrid, AxesGrid\n#from axes_divider import make_axes_locatable\nfrom matplotlib.cbook import warn_deprecated\nwarn_deprecated(since='2.1',\n name='mpl_toolkits.axes_grid',\n alternative='mpl_toolkits.axes_grid1 and'\n ' mpl_toolkits.axisartist, which provide'\n ' the same functionality',\n obj_type='module')\n",
"import numpy as np\n\nfrom matplotlib.collections import PolyCollection, TriMesh\nfrom matplotlib.colors import Normalize\nfrom matplotlib.tri.triangulation import Triangulation\n\n\ndef tripcolor(ax, *args, alpha=1.0, norm=None, cmap=None, vmin=None,\n vmax=None, shading='flat', facecolors=None, **kwargs):\n \"\"\"\n Create a pseudocolor plot of an unstructured triangular grid.\n\n The triangulation can be specified in one of two ways; either::\n\n tripcolor(triangulation, ...)\n\n where triangulation is a :class:`matplotlib.tri.Triangulation`\n object, or\n\n ::\n\n tripcolor(x, y, ...)\n tripcolor(x, y, triangles, ...)\n tripcolor(x, y, triangles=triangles, ...)\n tripcolor(x, y, mask=mask, ...)\n tripcolor(x, y, triangles, mask=mask, ...)\n\n in which case a Triangulation object will be created. See\n :class:`~matplotlib.tri.Triangulation` for a explanation of these\n possibilities.\n\n The next argument must be *C*, the array of color values, either\n one per point in the triangulation if color values are defined at\n points, or one per triangle in the triangulation if color values\n are defined at triangles. If there are the same number of points\n and triangles in the triangulation it is assumed that color\n values are defined at points; to force the use of color values at\n triangles use the kwarg ``facecolors=C`` instead of just ``C``.\n\n *shading* may be 'flat' (the default) or 'gouraud'. If *shading*\n is 'flat' and C values are defined at points, the color values\n used for each triangle are from the mean C of the triangle's\n three points. If *shading* is 'gouraud' then color values must be\n defined at points.\n\n The remaining kwargs are the same as for\n :meth:`~matplotlib.axes.Axes.pcolor`.\n \"\"\"\n if shading not in ['flat', 'gouraud']:\n raise ValueError(\"shading must be one of ['flat', 'gouraud'] \"\n \"not {0}\".format(shading))\n\n tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)\n\n # C is the colors array defined at either points or faces (i.e. triangles).\n # If facecolors is None, C are defined at points.\n # If facecolors is not None, C are defined at faces.\n if facecolors is not None:\n C = facecolors\n else:\n C = np.asarray(args[0])\n\n # If there are a different number of points and triangles in the\n # triangulation, can omit facecolors kwarg as it is obvious from\n # length of C whether it refers to points or faces.\n # Do not do this for gouraud shading.\n if (facecolors is None and len(C) == len(tri.triangles) and\n len(C) != len(tri.x) and shading != 'gouraud'):\n facecolors = C\n\n # Check length of C is OK.\n if ((facecolors is None and len(C) != len(tri.x)) or\n (facecolors is not None and len(C) != len(tri.triangles))):\n raise ValueError('Length of color values array must be the same '\n 'as either the number of triangulation points '\n 'or triangles')\n\n # Handling of linewidths, shading, edgecolors and antialiased as\n # in Axes.pcolor\n linewidths = (0.25,)\n if 'linewidth' in kwargs:\n kwargs['linewidths'] = kwargs.pop('linewidth')\n kwargs.setdefault('linewidths', linewidths)\n\n edgecolors = 'none'\n if 'edgecolor' in kwargs:\n kwargs['edgecolors'] = kwargs.pop('edgecolor')\n ec = kwargs.setdefault('edgecolors', edgecolors)\n\n if 'antialiased' in kwargs:\n kwargs['antialiaseds'] = kwargs.pop('antialiased')\n if 'antialiaseds' not in kwargs and ec.lower() == \"none\":\n kwargs['antialiaseds'] = False\n\n if shading == 'gouraud':\n if facecolors is not None:\n raise ValueError('Gouraud shading does not support the use '\n 'of facecolors kwarg')\n if len(C) != len(tri.x):\n raise ValueError('For gouraud shading, the length of color '\n 'values array must be the same as the '\n 'number of triangulation points')\n collection = TriMesh(tri, **kwargs)\n else:\n # Vertices of triangles.\n maskedTris = tri.get_masked_triangles()\n verts = np.stack((tri.x[maskedTris], tri.y[maskedTris]), axis=-1)\n\n # Color values.\n if facecolors is None:\n # One color per triangle, the mean of the 3 vertex color values.\n C = C[maskedTris].mean(axis=1)\n elif tri.mask is not None:\n # Remove color values of masked triangles.\n C = C.compress(1-tri.mask)\n\n collection = PolyCollection(verts, **kwargs)\n\n collection.set_alpha(alpha)\n collection.set_array(C)\n if norm is not None and not isinstance(norm, Normalize):\n raise ValueError(\"'norm' must be an instance of 'Normalize'\")\n collection.set_cmap(cmap)\n collection.set_norm(norm)\n if vmin is not None or vmax is not None:\n collection.set_clim(vmin, vmax)\n else:\n collection.autoscale_None()\n ax.grid(False)\n\n minx = tri.x.min()\n maxx = tri.x.max()\n miny = tri.y.min()\n maxy = tri.y.max()\n corners = (minx, miny), (maxx, maxy)\n ax.update_datalim(corners)\n ax.autoscale_view()\n ax.add_collection(collection)\n return collection\n",
"from collections import OrderedDict\nfrom contextlib import contextmanager\nimport gc\nimport os\nfrom pathlib import Path\nimport shutil\nfrom tempfile import TemporaryDirectory\nimport warnings\n\nimport pytest\n\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt, style\nfrom matplotlib.style.core import USER_LIBRARY_PATHS, STYLE_EXTENSION\n\n\nPARAM = 'image.cmap'\nVALUE = 'pink'\nDUMMY_SETTINGS = {PARAM: VALUE}\n\n\n@contextmanager\ndef temp_style(style_name, settings=None):\n \"\"\"Context manager to create a style sheet in a temporary directory.\"\"\"\n if not settings:\n settings = DUMMY_SETTINGS\n temp_file = '%s.%s' % (style_name, STYLE_EXTENSION)\n try:\n with TemporaryDirectory() as tmpdir:\n # Write style settings to file in the tmpdir.\n Path(tmpdir, temp_file).write_text(\n \"\\n\".join(\"{}: {}\".format(k, v) for k, v in settings.items()))\n # Add tmpdir to style path and reload so we can access this style.\n USER_LIBRARY_PATHS.append(tmpdir)\n style.reload_library()\n yield\n finally:\n style.reload_library()\n\n\ndef test_invalid_rc_warning_includes_filename():\n SETTINGS = {'foo': 'bar'}\n basename = 'basename'\n with warnings.catch_warnings(record=True) as warns:\n with temp_style(basename, SETTINGS):\n # style.reload_library() in temp_style() triggers the warning\n pass\n\n for w in warns:\n assert basename in str(w.message)\n\n\ndef test_available():\n with temp_style('_test_', DUMMY_SETTINGS):\n assert '_test_' in style.available\n\n\ndef test_use():\n mpl.rcParams[PARAM] = 'gray'\n with temp_style('test', DUMMY_SETTINGS):\n with style.context('test'):\n assert mpl.rcParams[PARAM] == VALUE\n\n\[email protected]\ndef test_use_url():\n with temp_style('test', DUMMY_SETTINGS):\n with style.context('https://gist.github.com/adrn/6590261/raw'):\n assert mpl.rcParams['axes.facecolor'] == \"#adeade\"\n\n\ndef test_context():\n mpl.rcParams[PARAM] = 'gray'\n with temp_style('test', DUMMY_SETTINGS):\n with style.context('test'):\n assert mpl.rcParams[PARAM] == VALUE\n # Check that this value is reset after the exiting the context.\n assert mpl.rcParams[PARAM] == 'gray'\n\n\ndef test_context_with_dict():\n original_value = 'gray'\n other_value = 'blue'\n mpl.rcParams[PARAM] = original_value\n with style.context({PARAM: other_value}):\n assert mpl.rcParams[PARAM] == other_value\n assert mpl.rcParams[PARAM] == original_value\n\n\ndef test_context_with_dict_after_namedstyle():\n # Test dict after style name where dict modifies the same parameter.\n original_value = 'gray'\n other_value = 'blue'\n mpl.rcParams[PARAM] = original_value\n with temp_style('test', DUMMY_SETTINGS):\n with style.context(['test', {PARAM: other_value}]):\n assert mpl.rcParams[PARAM] == other_value\n assert mpl.rcParams[PARAM] == original_value\n\n\ndef test_context_with_dict_before_namedstyle():\n # Test dict before style name where dict modifies the same parameter.\n original_value = 'gray'\n other_value = 'blue'\n mpl.rcParams[PARAM] = original_value\n with temp_style('test', DUMMY_SETTINGS):\n with style.context([{PARAM: other_value}, 'test']):\n assert mpl.rcParams[PARAM] == VALUE\n assert mpl.rcParams[PARAM] == original_value\n\n\ndef test_context_with_union_of_dict_and_namedstyle():\n # Test dict after style name where dict modifies the a different parameter.\n original_value = 'gray'\n other_param = 'text.usetex'\n other_value = True\n d = {other_param: other_value}\n mpl.rcParams[PARAM] = original_value\n mpl.rcParams[other_param] = (not other_value)\n with temp_style('test', DUMMY_SETTINGS):\n with style.context(['test', d]):\n assert mpl.rcParams[PARAM] == VALUE\n assert mpl.rcParams[other_param] == other_value\n assert mpl.rcParams[PARAM] == original_value\n assert mpl.rcParams[other_param] == (not other_value)\n\n\ndef test_context_with_badparam():\n original_value = 'gray'\n other_value = 'blue'\n d = OrderedDict([(PARAM, original_value), ('badparam', None)])\n with style.context({PARAM: other_value}):\n assert mpl.rcParams[PARAM] == other_value\n x = style.context([d])\n with pytest.raises(KeyError):\n with x:\n pass\n assert mpl.rcParams[PARAM] == other_value\n\n\[email protected]('equiv_styles',\n [('mpl20', 'default'),\n ('mpl15', 'classic')],\n ids=['mpl20', 'mpl15'])\ndef test_alias(equiv_styles):\n rc_dicts = []\n for sty in equiv_styles:\n with style.context(sty):\n rc_dicts.append(dict(mpl.rcParams))\n\n rc_base = rc_dicts[0]\n for nm, rc in zip(equiv_styles[1:], rc_dicts[1:]):\n assert rc_base == rc\n\n\ndef test_xkcd_no_cm():\n assert mpl.rcParams[\"path.sketch\"] is None\n plt.xkcd()\n assert mpl.rcParams[\"path.sketch\"] == (1, 100, 2)\n gc.collect()\n assert mpl.rcParams[\"path.sketch\"] == (1, 100, 2)\n\n\ndef test_xkcd_cm():\n assert mpl.rcParams[\"path.sketch\"] is None\n with plt.xkcd():\n assert mpl.rcParams[\"path.sketch\"] == (1, 100, 2)\n assert mpl.rcParams[\"path.sketch\"] is None\n",
"import numpy as np\n\nimport pandas as pd\nfrom pandas import Int64Index, TimedeltaIndex, timedelta_range\nimport pandas.util.testing as tm\n\n\nclass TestTimedeltaIndex(object):\n\n def test_union(self):\n\n i1 = timedelta_range('1day', periods=5)\n i2 = timedelta_range('3day', periods=5)\n result = i1.union(i2)\n expected = timedelta_range('1day', periods=7)\n tm.assert_index_equal(result, expected)\n\n i1 = Int64Index(np.arange(0, 20, 2))\n i2 = timedelta_range(start='1 day', periods=10, freq='D')\n i1.union(i2) # Works\n i2.union(i1) # Fails with \"AttributeError: can't set attribute\"\n\n def test_union_coverage(self):\n\n idx = TimedeltaIndex(['3d', '1d', '2d'])\n ordered = TimedeltaIndex(idx.sort_values(), freq='infer')\n result = ordered.union(idx)\n tm.assert_index_equal(result, ordered)\n\n result = ordered[:0].union(ordered)\n tm.assert_index_equal(result, ordered)\n assert result.freq == ordered.freq\n\n def test_union_bug_1730(self):\n\n rng_a = timedelta_range('1 day', periods=4, freq='3H')\n rng_b = timedelta_range('1 day', periods=4, freq='4H')\n\n result = rng_a.union(rng_b)\n exp = TimedeltaIndex(sorted(set(list(rng_a)) | set(list(rng_b))))\n tm.assert_index_equal(result, exp)\n\n def test_union_bug_1745(self):\n\n left = TimedeltaIndex(['1 day 15:19:49.695000'])\n right = TimedeltaIndex(['2 day 13:04:21.322000',\n '1 day 15:27:24.873000',\n '1 day 15:31:05.350000'])\n\n result = left.union(right)\n exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))\n tm.assert_index_equal(result, exp)\n\n def test_union_bug_4564(self):\n\n left = timedelta_range(\"1 day\", \"30d\")\n right = left + pd.offsets.Minute(15)\n\n result = left.union(right)\n exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))\n tm.assert_index_equal(result, exp)\n\n def test_intersection_bug_1708(self):\n index_1 = timedelta_range('1 day', periods=4, freq='h')\n index_2 = index_1 + pd.offsets.Hour(5)\n\n result = index_1 & index_2\n assert len(result) == 0\n\n index_1 = timedelta_range('1 day', periods=4, freq='h')\n index_2 = index_1 + pd.offsets.Hour(1)\n\n result = index_1 & index_2\n expected = timedelta_range('1 day 01:00:00', periods=3, freq='h')\n tm.assert_index_equal(result, expected)\n",
"#!/usr/bin/env python\n# file trying to apply and test the pid controller on carla.\n\nimport glob\nimport os\nimport sys\nimport time\nimport matplotlib.pyplot as plt\nfrom PID_controller import PID\nimport numpy as np\nimport speed_profile_reader as spr\n\ntry:\n sys.path.append(glob.glob('../**/*%d.%d-%s.egg' % (\n sys.version_info.major,\n sys.version_info.minor,\n 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])\nexcept IndexError:\n pass\n\nimport carla\n\nimport random\nimport time\n\n\nclass TestData:\n\n def __init__(self, total_duration, time_increment):\n self._iter_num = 0\n self.time = np.empty([int(total_duration / time_increment) + 1, 1])\n self.setpoint = np.empty([int(total_duration / time_increment) + 1, 1])\n self.actual_velocity = np.empty([int(total_duration / time_increment) + 1, 1])\n self.error = np.empty([int(total_duration / time_increment) + 1, 1])\n\n def append_data(self, t, sp, vel, error):\n self.time[self._iter_num] = t\n self.setpoint[self._iter_num] = sp\n self.actual_velocity[self._iter_num] = vel\n self.error[self._iter_num] = error\n self._iter_num+=1\n\n def plot(self):\n plt.figure()\n plt.plot(self.time, self.setpoint)\n plt.plot(self.time, self.actual_velocity)\n plt.xlabel('Time (s)')\n plt.ylabel('Velocity (m/s)')\n plt.title(\"PID Result\")\n plt.figure()\n plt.plot(self.time, self.error, 'r--', label='error', alpha=0.75, linewidth=0.5)\n plt.plot(self.time, np.zeros(len(self.time)), 'k--', linewidth=0.5)\n plt.title(\"Controller Error\")\n plt.show()\n\nclass DataInit:\n K = {\n \"Kp\": 0.055734,\n \"Ki\": 0.0114169,\n \"Kd\": .00006\n\n # For 10 m/s\n # \"Kp\": 0.055734,\n # \"Ki\": 0.0130169,\n # \"Kd\": .000006\n\n # \"Kp\": 1,\n # \"Ki\": 0.0112,\n # \"Kd\": 0.000006\n }\n total_duration = 20\n sampling_period = 0.025\n\ndef main():\n\n actor_list = []\n verboseIsEnabled = None\n try:\n \"\"\"\n Section for starting the client and connecting to the server\n \"\"\"\n client = carla.Client('localhost', 2000)\n client.set_timeout(2.0)\n\n for arg in sys.argv:\n if (arg == '--verbose'):\n verboseIsEnabled = True\n\n if verboseIsEnabled:\n print('client version: %s' % client.get_client_version())\n print('server version: %s' % client.get_server_version())\n print('client to server connection status: {}'.format(client.get_server_version()))\n\n print('Retrieving the world data from server...')\n\n world = client.get_world()\n if verboseIsEnabled:\n print('{} \\n'.format(world))\n\n \"\"\"\n Section for retrieving the blueprints and spawn the actors\n \"\"\"\n blueprint_library = world.get_blueprint_library()\n if verboseIsEnabled:\n print('\\nRetrieving CARLA blueprint library...')\n print('\\nobject: %s\\n\\nblueprint methods: %s\\n\\nblueprint list:' % (type(blueprint_library), dir(blueprint_library)) )\n for blueprint in blueprint_library:\n print(blueprint)\n\n audi_blueprint = blueprint_library.find('vehicle.audi.tt')\n print('\\n%s\\n' % audi_blueprint)\n\n color = '191,191,191'\n audi_blueprint.set_attribute('color', color)\n\n transform = carla.Transform(\n\t\t\tcarla.Location(\n x=10.5, y=-1.8,\n z=38.5),carla.Rotation(yaw=0.0)\n\t\t)\n\n vehicleEgo = world.spawn_actor(audi_blueprint, transform)\n actor_list.append(vehicleEgo)\n print('created %s' % vehicleEgo.type_id)\n\n color = random.choice(audi_blueprint.get_attribute('color').recommended_values)\n audi_blueprint.set_attribute('color', color)\n\n \"\"\"\n Section for initializing the PID testing\n \"\"\"\n user_input_sp = None\n while (not isinstance(user_input_sp, int)) and (not isinstance(user_input_sp, float)):\n user_input_sp = input('Enter the desired Setpoint:\\n')\n data = TestData(DataInit.total_duration, DataInit.sampling_period)\n start = time.time()\n\n print('\\nStarting test:\\n\\n' + 'Time(s) current_vel(m/s) setpoint_vel(m/s) throttle(%) pid_demand')\n time.sleep(2.5)\n print('.................................................................\\n')\n time.sleep(1)\n\n # raise SystemExit\n\n p = PID(\n DataInit.K['Kp'], \n DataInit.K['Ki'],\n DataInit.K['Kd']\n )\n p.setPoint(user_input_sp)\n p.Integrator_min = -5\n p.Integrator_max = 40\n pid = 0\n for _ in range(int(DataInit.total_duration / DataInit.sampling_period) + 1):\n measurement_value = vehicleEgo.get_velocity().x\n vehicleEgo.apply_control(carla.VehicleControl(pid)) if 1 > pid > 0 else vehicleEgo.apply_control(carla.VehicleControl(1))\n if 0 > pid: vehicleEgo.apply_control(carla.VehicleControl(brake=abs(pid)))\n pid = p.update(measurement_value)\n data.append_data(round(time.time() - start, 2), p.getSetPoint(), round(vehicleEgo.get_velocity().x, 5), p.getError())\n time.sleep(DataInit.sampling_period)\n\n print('%0.3f\\t%0.2f\\t\\t\\t%0.2f\\t\\t%0.2f\\t%0.2f' % (time.time() - start,\n vehicleEgo.get_velocity().x,\n p.set_point,\n vehicleEgo.get_control().throttle,\n pid))\n\n data.plot()\n print('\\nError Mean (Steady State):\\n' + \n str(round(np.absolute(np.mean(data.error[data.error.shape[0]/2:data.error.shape[0]])), 5)*100) + \n '%\\n')\n\n finally:\n print('destroying actors')\n for actor in actor_list:\n actor.destroy()\n print('done.')\n\nif __name__ == '__main__':\n\n main()\n",
"from datetime import timedelta\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame, Index, Int64Index, Series, Timedelta, TimedeltaIndex,\n date_range, timedelta_range)\nimport pandas.util.testing as tm\nfrom pandas.util.testing import (\n assert_almost_equal, assert_index_equal, assert_series_equal)\n\nfrom ..datetimelike import DatetimeLike\n\nrandn = np.random.randn\n\n\nclass TestTimedeltaIndex(DatetimeLike):\n _holder = TimedeltaIndex\n\n def setup_method(self, method):\n self.indices = dict(index=tm.makeTimedeltaIndex(10))\n self.setup_indices()\n\n def create_index(self):\n return pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)\n\n def test_numeric_compat(self):\n # Dummy method to override super's version; this test is now done\n # in test_arithmetic.py\n pass\n\n def test_shift(self):\n pass # this is handled in test_arithmetic.py\n\n def test_pickle_compat_construction(self):\n pass\n\n def test_fillna_timedelta(self):\n # GH 11343\n idx = pd.TimedeltaIndex(['1 day', pd.NaT, '3 day'])\n\n exp = pd.TimedeltaIndex(['1 day', '2 day', '3 day'])\n tm.assert_index_equal(idx.fillna(pd.Timedelta('2 day')), exp)\n\n exp = pd.TimedeltaIndex(['1 day', '3 hour', '3 day'])\n idx.fillna(pd.Timedelta('3 hour'))\n\n exp = pd.Index(\n [pd.Timedelta('1 day'), 'x', pd.Timedelta('3 day')], dtype=object)\n tm.assert_index_equal(idx.fillna('x'), exp)\n\n @pytest.mark.parametrize(\"sort\", [None, False])\n def test_difference_freq(self, sort):\n # GH14323: Difference of TimedeltaIndex should not preserve frequency\n\n index = timedelta_range(\"0 days\", \"5 days\", freq=\"D\")\n\n other = timedelta_range(\"1 days\", \"4 days\", freq=\"D\")\n expected = TimedeltaIndex([\"0 days\", \"5 days\"], freq=None)\n idx_diff = index.difference(other, sort)\n tm.assert_index_equal(idx_diff, expected)\n tm.assert_attr_equal('freq', idx_diff, expected)\n\n other = timedelta_range(\"2 days\", \"5 days\", freq=\"D\")\n idx_diff = index.difference(other, sort)\n expected = TimedeltaIndex([\"0 days\", \"1 days\"], freq=None)\n tm.assert_index_equal(idx_diff, expected)\n tm.assert_attr_equal('freq', idx_diff, expected)\n\n @pytest.mark.parametrize(\"sort\", [None, False])\n def test_difference_sort(self, sort):\n\n index = pd.TimedeltaIndex([\"5 days\", \"3 days\", \"2 days\", \"4 days\",\n \"1 days\", \"0 days\"])\n\n other = timedelta_range(\"1 days\", \"4 days\", freq=\"D\")\n idx_diff = index.difference(other, sort)\n\n expected = TimedeltaIndex([\"5 days\", \"0 days\"], freq=None)\n\n if sort is None:\n expected = expected.sort_values()\n\n tm.assert_index_equal(idx_diff, expected)\n tm.assert_attr_equal('freq', idx_diff, expected)\n\n other = timedelta_range(\"2 days\", \"5 days\", freq=\"D\")\n idx_diff = index.difference(other, sort)\n expected = TimedeltaIndex([\"1 days\", \"0 days\"], freq=None)\n\n if sort is None:\n expected = expected.sort_values()\n\n tm.assert_index_equal(idx_diff, expected)\n tm.assert_attr_equal('freq', idx_diff, expected)\n\n def test_isin(self):\n\n index = tm.makeTimedeltaIndex(4)\n result = index.isin(index)\n assert result.all()\n\n result = index.isin(list(index))\n assert result.all()\n\n assert_almost_equal(index.isin([index[2], 5]),\n np.array([False, False, True, False]))\n\n def test_factorize(self):\n idx1 = TimedeltaIndex(['1 day', '1 day', '2 day', '2 day', '3 day',\n '3 day'])\n\n exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)\n exp_idx = TimedeltaIndex(['1 day', '2 day', '3 day'])\n\n arr, idx = idx1.factorize()\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n\n arr, idx = idx1.factorize(sort=True)\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n\n # freq must be preserved\n idx3 = timedelta_range('1 day', periods=4, freq='s')\n exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)\n arr, idx = idx3.factorize()\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, idx3)\n\n def test_join_self(self, join_type):\n index = timedelta_range('1 day', periods=10)\n joined = index.join(index, how=join_type)\n tm.assert_index_equal(index, joined)\n\n def test_does_not_convert_mixed_integer(self):\n df = tm.makeCustomDataframe(10, 10,\n data_gen_f=lambda *args, **kwargs: randn(),\n r_idx_type='i', c_idx_type='td')\n str(df)\n\n cols = df.columns.join(df.index, how='outer')\n joined = cols.join(df.columns)\n assert cols.dtype == np.dtype('O')\n assert cols.dtype == joined.dtype\n tm.assert_index_equal(cols, joined)\n\n def test_sort_values(self):\n\n idx = TimedeltaIndex(['4d', '1d', '2d'])\n\n ordered = idx.sort_values()\n assert ordered.is_monotonic\n\n ordered = idx.sort_values(ascending=False)\n assert ordered[::-1].is_monotonic\n\n ordered, dexer = idx.sort_values(return_indexer=True)\n assert ordered.is_monotonic\n\n tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0]),\n check_dtype=False)\n\n ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)\n assert ordered[::-1].is_monotonic\n\n tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1]),\n check_dtype=False)\n\n def test_get_duplicates(self):\n idx = TimedeltaIndex(['1 day', '2 day', '2 day', '3 day', '3day',\n '4day'])\n\n with tm.assert_produces_warning(FutureWarning):\n # Deprecated - see GH20239\n result = idx.get_duplicates()\n\n ex = TimedeltaIndex(['2 day', '3day'])\n tm.assert_index_equal(result, ex)\n\n def test_argmin_argmax(self):\n idx = TimedeltaIndex(['1 day 00:00:05', '1 day 00:00:01',\n '1 day 00:00:02'])\n assert idx.argmin() == 1\n assert idx.argmax() == 0\n\n def test_misc_coverage(self):\n\n rng = timedelta_range('1 day', periods=5)\n result = rng.groupby(rng.days)\n assert isinstance(list(result.values())[0][0], Timedelta)\n\n idx = TimedeltaIndex(['3d', '1d', '2d'])\n assert not idx.equals(list(idx))\n\n non_td = Index(list('abc'))\n assert not idx.equals(list(non_td))\n\n def test_map(self):\n # test_map_dictlike generally tests\n\n rng = timedelta_range('1 day', periods=10)\n\n f = lambda x: x.days\n result = rng.map(f)\n exp = Int64Index([f(x) for x in rng])\n tm.assert_index_equal(result, exp)\n\n def test_pass_TimedeltaIndex_to_index(self):\n\n rng = timedelta_range('1 days', '10 days')\n idx = Index(rng, dtype=object)\n\n expected = Index(rng.to_pytimedelta(), dtype=object)\n\n tm.assert_numpy_array_equal(idx.values, expected.values)\n\n def test_pickle(self):\n\n rng = timedelta_range('1 days', periods=10)\n rng_p = tm.round_trip_pickle(rng)\n tm.assert_index_equal(rng, rng_p)\n\n def test_hash_error(self):\n index = timedelta_range('1 days', periods=10)\n with pytest.raises(TypeError, match=(\"unhashable type: %r\" %\n type(index).__name__)):\n hash(index)\n\n def test_append_join_nondatetimeindex(self):\n rng = timedelta_range('1 days', periods=10)\n idx = Index(['a', 'b', 'c', 'd'])\n\n result = rng.append(idx)\n assert isinstance(result[0], Timedelta)\n\n # it works\n rng.join(idx, how='outer')\n\n def test_append_numpy_bug_1681(self):\n\n td = timedelta_range('1 days', '10 days', freq='2D')\n a = DataFrame()\n c = DataFrame({'A': 'foo', 'B': td}, index=td)\n str(c)\n\n result = a.append(c)\n assert (result['B'] == td).all()\n\n def test_fields(self):\n rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,\n freq='s')\n tm.assert_index_equal(rng.days, Index([1, 1], dtype='int64'))\n tm.assert_index_equal(\n rng.seconds,\n Index([10 * 3600 + 11 * 60 + 12, 10 * 3600 + 11 * 60 + 13],\n dtype='int64'))\n tm.assert_index_equal(\n rng.microseconds,\n Index([100 * 1000 + 123, 100 * 1000 + 123], dtype='int64'))\n tm.assert_index_equal(rng.nanoseconds,\n Index([456, 456], dtype='int64'))\n\n pytest.raises(AttributeError, lambda: rng.hours)\n pytest.raises(AttributeError, lambda: rng.minutes)\n pytest.raises(AttributeError, lambda: rng.milliseconds)\n\n # with nat\n s = Series(rng)\n s[1] = np.nan\n\n tm.assert_series_equal(s.dt.days, Series([1, np.nan], index=[0, 1]))\n tm.assert_series_equal(s.dt.seconds, Series(\n [10 * 3600 + 11 * 60 + 12, np.nan], index=[0, 1]))\n\n # preserve name (GH15589)\n rng.name = 'name'\n assert rng.days.name == 'name'\n\n def test_freq_conversion(self):\n\n # doc example\n\n # series\n td = Series(date_range('20130101', periods=4)) - \\\n Series(date_range('20121201', periods=4))\n td[2] += timedelta(minutes=5, seconds=3)\n td[3] = np.nan\n\n result = td / np.timedelta64(1, 'D')\n expected = Series([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan\n ])\n assert_series_equal(result, expected)\n\n result = td.astype('timedelta64[D]')\n expected = Series([31, 31, 31, np.nan])\n assert_series_equal(result, expected)\n\n result = td / np.timedelta64(1, 's')\n expected = Series([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3,\n np.nan])\n assert_series_equal(result, expected)\n\n result = td.astype('timedelta64[s]')\n assert_series_equal(result, expected)\n\n # tdi\n td = TimedeltaIndex(td)\n\n result = td / np.timedelta64(1, 'D')\n expected = Index([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan])\n assert_index_equal(result, expected)\n\n result = td.astype('timedelta64[D]')\n expected = Index([31, 31, 31, np.nan])\n assert_index_equal(result, expected)\n\n result = td / np.timedelta64(1, 's')\n expected = Index([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3,\n np.nan])\n assert_index_equal(result, expected)\n\n result = td.astype('timedelta64[s]')\n assert_index_equal(result, expected)\n\n\nclass TestTimeSeries(object):\n\n def test_series_box_timedelta(self):\n rng = timedelta_range('1 day 1 s', periods=5, freq='h')\n s = Series(rng)\n assert isinstance(s[1], Timedelta)\n assert isinstance(s.iat[2], Timedelta)\n",
"\"\"\"\nThis module provides decorator functions which can be applied to test objects\nin order to skip those objects when certain conditions occur. A sample use case\nis to detect if the platform is missing ``matplotlib``. If so, any test objects\nwhich require ``matplotlib`` and decorated with ``@td.skip_if_no_mpl`` will be\nskipped by ``pytest`` during the execution of the test suite.\n\nTo illustrate, after importing this module:\n\nimport pandas.util._test_decorators as td\n\nThe decorators can be applied to classes:\n\[email protected]_if_some_reason\nclass Foo():\n ...\n\nOr individual functions:\n\[email protected]_if_some_reason\ndef test_foo():\n ...\n\nFor more information, refer to the ``pytest`` documentation on ``skipif``.\n\"\"\"\nfrom distutils.version import LooseVersion\nimport locale\n\nimport pytest\n\nfrom pandas.compat import (\n PY3, import_lzma, is_platform_32bit, is_platform_windows)\nfrom pandas.compat.numpy import _np_version_under1p15\n\nfrom pandas.core.computation.expressions import (\n _NUMEXPR_INSTALLED, _USE_NUMEXPR)\n\n\ndef safe_import(mod_name, min_version=None):\n \"\"\"\n Parameters:\n -----------\n mod_name : str\n Name of the module to be imported\n min_version : str, default None\n Minimum required version of the specified mod_name\n\n Returns:\n --------\n object\n The imported module if successful, or False\n \"\"\"\n try:\n mod = __import__(mod_name)\n except ImportError:\n return False\n\n if not min_version:\n return mod\n else:\n import sys\n try:\n version = getattr(sys.modules[mod_name], '__version__')\n except AttributeError:\n # xlrd uses a capitalized attribute name\n version = getattr(sys.modules[mod_name], '__VERSION__')\n if version:\n from distutils.version import LooseVersion\n if LooseVersion(version) >= LooseVersion(min_version):\n return mod\n\n return False\n\n\ndef _skip_if_no_mpl():\n mod = safe_import(\"matplotlib\")\n if mod:\n mod.use(\"Agg\", warn=False)\n else:\n return True\n\n\ndef _skip_if_mpl_2_2():\n mod = safe_import(\"matplotlib\")\n\n if mod:\n v = mod.__version__\n if LooseVersion(v) > LooseVersion('2.1.2'):\n return True\n else:\n mod.use(\"Agg\", warn=False)\n\n\ndef _skip_if_has_locale():\n lang, _ = locale.getlocale()\n if lang is not None:\n return True\n\n\ndef _skip_if_not_us_locale():\n lang, _ = locale.getlocale()\n if lang != 'en_US':\n return True\n\n\ndef _skip_if_no_scipy():\n return not (safe_import('scipy.stats') and\n safe_import('scipy.sparse') and\n safe_import('scipy.interpolate') and\n safe_import('scipy.signal'))\n\n\ndef _skip_if_no_lzma():\n try:\n import_lzma()\n except ImportError:\n return True\n\n\ndef skip_if_no(package, min_version=None):\n \"\"\"\n Generic function to help skip test functions when required packages are not\n present on the testing system.\n\n Intended for use as a decorator, this function will wrap the decorated\n function with a pytest ``skip_if`` mark. During a pytest test suite\n execution, that mark will attempt to import the specified ``package`` and\n optionally ensure it meets the ``min_version``. If the import and version\n check are unsuccessful, then the decorated function will be skipped.\n\n Parameters\n ----------\n package: str\n The name of the package required by the decorated function\n min_version: str or None, default None\n Optional minimum version of the package required by the decorated\n function\n\n Returns\n -------\n decorated_func: function\n The decorated function wrapped within a pytest ``skip_if`` mark\n \"\"\"\n def decorated_func(func):\n msg = \"Could not import '{}'\".format(package)\n if min_version:\n msg += \" satisfying a min_version of {}\".format(min_version)\n return pytest.mark.skipif(\n not safe_import(package, min_version=min_version), reason=msg\n )(func)\n return decorated_func\n\n\nskip_if_no_mpl = pytest.mark.skipif(_skip_if_no_mpl(),\n reason=\"Missing matplotlib dependency\")\nskip_if_np_lt_115 = pytest.mark.skipif(_np_version_under1p15,\n reason=\"NumPy 1.15 or greater required\")\nskip_if_mpl = pytest.mark.skipif(not _skip_if_no_mpl(),\n reason=\"matplotlib is present\")\nxfail_if_mpl_2_2 = pytest.mark.xfail(_skip_if_mpl_2_2(),\n reason=\"matplotlib 2.2\",\n strict=False)\nskip_if_32bit = pytest.mark.skipif(is_platform_32bit(),\n reason=\"skipping for 32 bit\")\nskip_if_windows = pytest.mark.skipif(is_platform_windows(),\n reason=\"Running on Windows\")\nskip_if_windows_python_3 = pytest.mark.skipif(is_platform_windows() and PY3,\n reason=(\"not used on python3/\"\n \"win32\"))\nskip_if_has_locale = pytest.mark.skipif(_skip_if_has_locale(),\n reason=\"Specific locale is set {lang}\"\n .format(lang=locale.getlocale()[0]))\nskip_if_not_us_locale = pytest.mark.skipif(_skip_if_not_us_locale(),\n reason=\"Specific locale is set \"\n \"{lang}\".format(\n lang=locale.getlocale()[0]))\nskip_if_no_scipy = pytest.mark.skipif(_skip_if_no_scipy(),\n reason=\"Missing SciPy requirement\")\nskip_if_no_lzma = pytest.mark.skipif(_skip_if_no_lzma(),\n reason=\"need backports.lzma to run\")\nskip_if_no_ne = pytest.mark.skipif(not _USE_NUMEXPR,\n reason=\"numexpr enabled->{enabled}, \"\n \"installed->{installed}\".format(\n enabled=_USE_NUMEXPR,\n installed=_NUMEXPR_INSTALLED))\n\n\ndef parametrize_fixture_doc(*args):\n \"\"\"\n Intended for use as a decorator for parametrized fixture,\n this function will wrap the decorated function with a pytest\n ``parametrize_fixture_doc`` mark. That mark will format\n initial fixture docstring by replacing placeholders {0}, {1} etc\n with parameters passed as arguments.\n\n Parameters:\n ----------\n args: iterable\n Positional arguments for docstring.\n\n Returns:\n -------\n documented_fixture: function\n The decorated function wrapped within a pytest\n ``parametrize_fixture_doc`` mark\n \"\"\"\n def documented_fixture(fixture):\n fixture.__doc__ = fixture.__doc__.format(*args)\n return fixture\n return documented_fixture\n",
"# -*- coding: utf-8 -*-\n# pylint: disable-msg=W0612,E1101\n\n\"\"\" test fancy indexing & misc \"\"\"\n\nfrom datetime import datetime\nfrom warnings import catch_warnings, simplefilter\nimport weakref\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import PY2, lrange, range\n\nfrom pandas.core.dtypes.common import is_float_dtype, is_integer_dtype\n\nimport pandas as pd\nfrom pandas import DataFrame, Index, NaT, Series\nfrom pandas.core.indexing import (\n _maybe_numeric_slice, _non_reducing_slice, validate_indices)\nfrom pandas.tests.indexing.common import Base, _mklbl\nimport pandas.util.testing as tm\n\n# ------------------------------------------------------------------------\n# Indexing test cases\n\n\nclass TestFancy(Base):\n \"\"\" pure get/set item & fancy indexing \"\"\"\n\n def test_setitem_ndarray_1d(self):\n # GH5508\n\n # len of indexer vs length of the 1d ndarray\n df = DataFrame(index=Index(lrange(1, 11)))\n df['foo'] = np.zeros(10, dtype=np.float64)\n df['bar'] = np.zeros(10, dtype=np.complex)\n\n # invalid\n with pytest.raises(ValueError):\n df.loc[df.index[2:5], 'bar'] = np.array([2.33j, 1.23 + 0.1j,\n 2.2, 1.0])\n\n # valid\n df.loc[df.index[2:6], 'bar'] = np.array([2.33j, 1.23 + 0.1j,\n 2.2, 1.0])\n\n result = df.loc[df.index[2:6], 'bar']\n expected = Series([2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6],\n name='bar')\n tm.assert_series_equal(result, expected)\n\n # dtype getting changed?\n df = DataFrame(index=Index(lrange(1, 11)))\n df['foo'] = np.zeros(10, dtype=np.float64)\n df['bar'] = np.zeros(10, dtype=np.complex)\n\n with pytest.raises(ValueError):\n df[2:5] = np.arange(1, 4) * 1j\n\n def test_inf_upcast(self):\n # GH 16957\n # We should be able to use np.inf as a key\n # np.inf should cause an index to convert to float\n\n # Test with np.inf in rows\n df = DataFrame(columns=[0])\n df.loc[1] = 1\n df.loc[2] = 2\n df.loc[np.inf] = 3\n\n # make sure we can look up the value\n assert df.loc[np.inf, 0] == 3\n\n result = df.index\n expected = pd.Float64Index([1, 2, np.inf])\n tm.assert_index_equal(result, expected)\n\n # Test with np.inf in columns\n df = DataFrame()\n df.loc[0, 0] = 1\n df.loc[1, 1] = 2\n df.loc[0, np.inf] = 3\n\n result = df.columns\n expected = pd.Float64Index([0, 1, np.inf])\n tm.assert_index_equal(result, expected)\n\n def test_setitem_dtype_upcast(self):\n\n # GH3216\n df = DataFrame([{\"a\": 1}, {\"a\": 3, \"b\": 2}])\n df['c'] = np.nan\n assert df['c'].dtype == np.float64\n\n df.loc[0, 'c'] = 'foo'\n expected = DataFrame([{\"a\": 1, \"c\": 'foo'},\n {\"a\": 3, \"b\": 2, \"c\": np.nan}])\n tm.assert_frame_equal(df, expected)\n\n # GH10280\n df = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),\n index=list('ab'),\n columns=['foo', 'bar', 'baz'])\n\n for val in [3.14, 'wxyz']:\n left = df.copy()\n left.loc['a', 'bar'] = val\n right = DataFrame([[0, val, 2], [3, 4, 5]], index=list('ab'),\n columns=['foo', 'bar', 'baz'])\n\n tm.assert_frame_equal(left, right)\n assert is_integer_dtype(left['foo'])\n assert is_integer_dtype(left['baz'])\n\n left = DataFrame(np.arange(6, dtype='int64').reshape(2, 3) / 10.0,\n index=list('ab'),\n columns=['foo', 'bar', 'baz'])\n left.loc['a', 'bar'] = 'wxyz'\n\n right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]], index=list('ab'),\n columns=['foo', 'bar', 'baz'])\n\n tm.assert_frame_equal(left, right)\n assert is_float_dtype(left['foo'])\n assert is_float_dtype(left['baz'])\n\n def test_dups_fancy_indexing(self):\n\n # GH 3455\n from pandas.util.testing import makeCustomDataframe as mkdf\n df = mkdf(10, 3)\n df.columns = ['a', 'a', 'b']\n result = df[['b', 'a']].columns\n expected = Index(['b', 'a', 'a'])\n tm.assert_index_equal(result, expected)\n\n # across dtypes\n df = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']],\n columns=list('aaaaaaa'))\n df.head()\n str(df)\n result = DataFrame([[1, 2, 1., 2., 3., 'foo', 'bar']])\n result.columns = list('aaaaaaa')\n\n # TODO(wesm): unused?\n df_v = df.iloc[:, 4] # noqa\n res_v = result.iloc[:, 4] # noqa\n\n tm.assert_frame_equal(df, result)\n\n # GH 3561, dups not in selected order\n df = DataFrame(\n {'test': [5, 7, 9, 11],\n 'test1': [4., 5, 6, 7],\n 'other': list('abcd')}, index=['A', 'A', 'B', 'C'])\n rows = ['C', 'B']\n expected = DataFrame(\n {'test': [11, 9],\n 'test1': [7., 6],\n 'other': ['d', 'c']}, index=rows)\n result = df.loc[rows]\n tm.assert_frame_equal(result, expected)\n\n result = df.loc[Index(rows)]\n tm.assert_frame_equal(result, expected)\n\n rows = ['C', 'B', 'E']\n expected = DataFrame(\n {'test': [11, 9, np.nan],\n 'test1': [7., 6, np.nan],\n 'other': ['d', 'c', np.nan]}, index=rows)\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n result = df.loc[rows]\n tm.assert_frame_equal(result, expected)\n\n # see GH5553, make sure we use the right indexer\n rows = ['F', 'G', 'H', 'C', 'B', 'E']\n expected = DataFrame({'test': [np.nan, np.nan, np.nan, 11, 9, np.nan],\n 'test1': [np.nan, np.nan, np.nan, 7., 6, np.nan],\n 'other': [np.nan, np.nan, np.nan,\n 'd', 'c', np.nan]},\n index=rows)\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n result = df.loc[rows]\n tm.assert_frame_equal(result, expected)\n\n # List containing only missing label\n dfnu = DataFrame(np.random.randn(5, 3), index=list('AABCD'))\n with pytest.raises(KeyError):\n dfnu.loc[['E']]\n\n # ToDo: check_index_type can be True after GH 11497\n\n # GH 4619; duplicate indexer with missing label\n df = DataFrame({\"A\": [0, 1, 2]})\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n result = df.loc[[0, 8, 0]]\n expected = DataFrame({\"A\": [0, np.nan, 0]}, index=[0, 8, 0])\n tm.assert_frame_equal(result, expected, check_index_type=False)\n\n df = DataFrame({\"A\": list('abc')})\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n result = df.loc[[0, 8, 0]]\n expected = DataFrame({\"A\": ['a', np.nan, 'a']}, index=[0, 8, 0])\n tm.assert_frame_equal(result, expected, check_index_type=False)\n\n # non unique with non unique selector\n df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])\n expected = DataFrame(\n {'test': [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n result = df.loc[['A', 'A', 'E']]\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.skipif(PY2,\n reason=\"GH-20770. Py2 unreliable warnings catching.\")\n def test_dups_fancy_indexing2(self):\n # GH 5835\n # dups on index and missing values\n df = DataFrame(\n np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])\n\n expected = pd.concat(\n [df.loc[:, ['A', 'B']], DataFrame(np.nan, columns=['C'],\n index=df.index)], axis=1)\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n result = df.loc[:, ['A', 'B', 'C']]\n tm.assert_frame_equal(result, expected)\n\n # GH 6504, multi-axis indexing\n df = DataFrame(np.random.randn(9, 2),\n index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=['a', 'b'])\n\n expected = df.iloc[0:6]\n result = df.loc[[1, 2]]\n tm.assert_frame_equal(result, expected)\n\n expected = df\n result = df.loc[:, ['a', 'b']]\n tm.assert_frame_equal(result, expected)\n\n expected = df.iloc[0:6, :]\n result = df.loc[[1, 2], ['a', 'b']]\n tm.assert_frame_equal(result, expected)\n\n def test_indexing_mixed_frame_bug(self):\n\n # GH3492\n df = DataFrame({'a': {1: 'aaa', 2: 'bbb', 3: 'ccc'},\n 'b': {1: 111, 2: 222, 3: 333}})\n\n # this works, new column is created correctly\n df['test'] = df['a'].apply(lambda x: '_' if x == 'aaa' else x)\n\n # this does not work, ie column test is not changed\n idx = df['test'] == '_'\n temp = df.loc[idx, 'a'].apply(lambda x: '-----' if x == 'aaa' else x)\n df.loc[idx, 'test'] = temp\n assert df.iloc[0, 2] == '-----'\n\n # if I look at df, then element [0,2] equals '_'. If instead I type\n # df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I\n # get '_'.\n\n def test_multitype_list_index_access(self):\n # GH 10610\n df = DataFrame(np.random.random((10, 5)),\n columns=[\"a\"] + [20, 21, 22, 23])\n\n with pytest.raises(KeyError):\n df[[22, 26, -8]]\n assert df[21].shape[0] == df.shape[0]\n\n def test_set_index_nan(self):\n\n # GH 3586\n df = DataFrame({'PRuid': {17: 'nonQC',\n 18: 'nonQC',\n 19: 'nonQC',\n 20: '10',\n 21: '11',\n 22: '12',\n 23: '13',\n 24: '24',\n 25: '35',\n 26: '46',\n 27: '47',\n 28: '48',\n 29: '59',\n 30: '10'},\n 'QC': {17: 0.0,\n 18: 0.0,\n 19: 0.0,\n 20: np.nan,\n 21: np.nan,\n 22: np.nan,\n 23: np.nan,\n 24: 1.0,\n 25: np.nan,\n 26: np.nan,\n 27: np.nan,\n 28: np.nan,\n 29: np.nan,\n 30: np.nan},\n 'data': {17: 7.9544899999999998,\n 18: 8.0142609999999994,\n 19: 7.8591520000000008,\n 20: 0.86140349999999999,\n 21: 0.87853110000000001,\n 22: 0.8427041999999999,\n 23: 0.78587700000000005,\n 24: 0.73062459999999996,\n 25: 0.81668560000000001,\n 26: 0.81927080000000008,\n 27: 0.80705009999999999,\n 28: 0.81440240000000008,\n 29: 0.80140849999999997,\n 30: 0.81307740000000006},\n 'year': {17: 2006,\n 18: 2007,\n 19: 2008,\n 20: 1985,\n 21: 1985,\n 22: 1985,\n 23: 1985,\n 24: 1985,\n 25: 1985,\n 26: 1985,\n 27: 1985,\n 28: 1985,\n 29: 1985,\n 30: 1986}}).reset_index()\n\n result = df.set_index(['year', 'PRuid', 'QC']).reset_index().reindex(\n columns=df.columns)\n tm.assert_frame_equal(result, df)\n\n def test_multi_assign(self):\n\n # GH 3626, an assignment of a sub-df to a df\n df = DataFrame({'FC': ['a', 'b', 'a', 'b', 'a', 'b'],\n 'PF': [0, 0, 0, 0, 1, 1],\n 'col1': lrange(6),\n 'col2': lrange(6, 12)})\n df.iloc[1, 0] = np.nan\n df2 = df.copy()\n\n mask = ~df2.FC.isna()\n cols = ['col1', 'col2']\n\n dft = df2 * 2\n dft.iloc[3, 3] = np.nan\n\n expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],\n 'PF': [0, 0, 0, 0, 1, 1],\n 'col1': Series([0, 1, 4, 6, 8, 10]),\n 'col2': [12, 7, 16, np.nan, 20, 22]})\n\n # frame on rhs\n df2.loc[mask, cols] = dft.loc[mask, cols]\n tm.assert_frame_equal(df2, expected)\n\n df2.loc[mask, cols] = dft.loc[mask, cols]\n tm.assert_frame_equal(df2, expected)\n\n # with an ndarray on rhs\n # coerces to float64 because values has float64 dtype\n # GH 14001\n expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'],\n 'PF': [0, 0, 0, 0, 1, 1],\n 'col1': [0., 1., 4., 6., 8., 10.],\n 'col2': [12, 7, 16, np.nan, 20, 22]})\n df2 = df.copy()\n df2.loc[mask, cols] = dft.loc[mask, cols].values\n tm.assert_frame_equal(df2, expected)\n df2.loc[mask, cols] = dft.loc[mask, cols].values\n tm.assert_frame_equal(df2, expected)\n\n # broadcasting on the rhs is required\n df = DataFrame(dict(A=[1, 2, 0, 0, 0], B=[0, 0, 0, 10, 11], C=[\n 0, 0, 0, 10, 11], D=[3, 4, 5, 6, 7]))\n\n expected = df.copy()\n mask = expected['A'] == 0\n for col in ['A', 'B']:\n expected.loc[mask, col] = df['D']\n\n df.loc[df['A'] == 0, ['A', 'B']] = df['D']\n tm.assert_frame_equal(df, expected)\n\n def test_setitem_list(self):\n\n # GH 6043\n # ix with a list\n df = DataFrame(index=[0, 1], columns=[0])\n with catch_warnings(record=True):\n simplefilter(\"ignore\")\n df.ix[1, 0] = [1, 2, 3]\n df.ix[1, 0] = [1, 2]\n\n result = DataFrame(index=[0, 1], columns=[0])\n with catch_warnings(record=True):\n simplefilter(\"ignore\")\n result.ix[1, 0] = [1, 2]\n\n tm.assert_frame_equal(result, df)\n\n # ix with an object\n class TO(object):\n\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return \"[{0}]\".format(self.value)\n\n __repr__ = __str__\n\n def __eq__(self, other):\n return self.value == other.value\n\n def view(self):\n return self\n\n df = DataFrame(index=[0, 1], columns=[0])\n with catch_warnings(record=True):\n simplefilter(\"ignore\")\n df.ix[1, 0] = TO(1)\n df.ix[1, 0] = TO(2)\n\n result = DataFrame(index=[0, 1], columns=[0])\n with catch_warnings(record=True):\n simplefilter(\"ignore\")\n result.ix[1, 0] = TO(2)\n\n tm.assert_frame_equal(result, df)\n\n # remains object dtype even after setting it back\n df = DataFrame(index=[0, 1], columns=[0])\n with catch_warnings(record=True):\n simplefilter(\"ignore\")\n df.ix[1, 0] = TO(1)\n df.ix[1, 0] = np.nan\n result = DataFrame(index=[0, 1], columns=[0])\n\n tm.assert_frame_equal(result, df)\n\n def test_string_slice(self):\n # GH 14424\n # string indexing against datetimelike with object\n # dtype should properly raises KeyError\n df = DataFrame([1], Index([pd.Timestamp('2011-01-01')], dtype=object))\n assert df.index.is_all_dates\n with pytest.raises(KeyError):\n df['2011']\n\n with pytest.raises(KeyError):\n df.loc['2011', 0]\n\n df = DataFrame()\n assert not df.index.is_all_dates\n with pytest.raises(KeyError):\n df['2011']\n\n with pytest.raises(KeyError):\n df.loc['2011', 0]\n\n def test_astype_assignment(self):\n\n # GH4312 (iloc)\n df_orig = DataFrame([['1', '2', '3', '.4', 5, 6., 'foo']],\n columns=list('ABCDEFG'))\n\n df = df_orig.copy()\n df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)\n expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],\n columns=list('ABCDEFG'))\n tm.assert_frame_equal(df, expected)\n\n df = df_orig.copy()\n df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)\n expected = DataFrame([[1, 2, '3', '.4', 5, 6., 'foo']],\n columns=list('ABCDEFG'))\n tm.assert_frame_equal(df, expected)\n\n # GH5702 (loc)\n df = df_orig.copy()\n df.loc[:, 'A'] = df.loc[:, 'A'].astype(np.int64)\n expected = DataFrame([[1, '2', '3', '.4', 5, 6., 'foo']],\n columns=list('ABCDEFG'))\n tm.assert_frame_equal(df, expected)\n\n df = df_orig.copy()\n df.loc[:, ['B', 'C']] = df.loc[:, ['B', 'C']].astype(np.int64)\n expected = DataFrame([['1', 2, 3, '.4', 5, 6., 'foo']],\n columns=list('ABCDEFG'))\n tm.assert_frame_equal(df, expected)\n\n # full replacements / no nans\n df = DataFrame({'A': [1., 2., 3., 4.]})\n df.iloc[:, 0] = df['A'].astype(np.int64)\n expected = DataFrame({'A': [1, 2, 3, 4]})\n tm.assert_frame_equal(df, expected)\n\n df = DataFrame({'A': [1., 2., 3., 4.]})\n df.loc[:, 'A'] = df['A'].astype(np.int64)\n expected = DataFrame({'A': [1, 2, 3, 4]})\n tm.assert_frame_equal(df, expected)\n\n @pytest.mark.parametrize(\"index,val\", [\n (Index([0, 1, 2]), 2),\n (Index([0, 1, '2']), '2'),\n (Index([0, 1, 2, np.inf, 4]), 4),\n (Index([0, 1, 2, np.nan, 4]), 4),\n (Index([0, 1, 2, np.inf]), np.inf),\n (Index([0, 1, 2, np.nan]), np.nan),\n ])\n def test_index_contains(self, index, val):\n assert val in index\n\n @pytest.mark.parametrize(\"index,val\", [\n (Index([0, 1, 2]), '2'),\n (Index([0, 1, '2']), 2),\n (Index([0, 1, 2, np.inf]), 4),\n (Index([0, 1, 2, np.nan]), 4),\n (Index([0, 1, 2, np.inf]), np.nan),\n (Index([0, 1, 2, np.nan]), np.inf),\n # Checking if np.inf in Int64Index should not cause an OverflowError\n # Related to GH 16957\n (pd.Int64Index([0, 1, 2]), np.inf),\n (pd.Int64Index([0, 1, 2]), np.nan),\n (pd.UInt64Index([0, 1, 2]), np.inf),\n (pd.UInt64Index([0, 1, 2]), np.nan),\n ])\n def test_index_not_contains(self, index, val):\n assert val not in index\n\n @pytest.mark.parametrize(\"index,val\", [\n (Index([0, 1, '2']), 0),\n (Index([0, 1, '2']), '2'),\n ])\n def test_mixed_index_contains(self, index, val):\n # GH 19860\n assert val in index\n\n @pytest.mark.parametrize(\"index,val\", [\n (Index([0, 1, '2']), '1'),\n (Index([0, 1, '2']), 2),\n ])\n def test_mixed_index_not_contains(self, index, val):\n # GH 19860\n assert val not in index\n\n def test_contains_with_float_index(self):\n # GH#22085\n integer_index = pd.Int64Index([0, 1, 2, 3])\n uinteger_index = pd.UInt64Index([0, 1, 2, 3])\n float_index = pd.Float64Index([0.1, 1.1, 2.2, 3.3])\n\n for index in (integer_index, uinteger_index):\n assert 1.1 not in index\n assert 1.0 in index\n assert 1 in index\n\n assert 1.1 in float_index\n assert 1.0 not in float_index\n assert 1 not in float_index\n\n def test_index_type_coercion(self):\n\n with catch_warnings(record=True):\n simplefilter(\"ignore\")\n\n # GH 11836\n # if we have an index type and set it with something that looks\n # to numpy like the same, but is actually, not\n # (e.g. setting with a float or string '0')\n # then we need to coerce to object\n\n # integer indexes\n for s in [Series(range(5)),\n Series(range(5), index=range(1, 6))]:\n\n assert s.index.is_integer()\n\n for indexer in [lambda x: x.ix,\n lambda x: x.loc,\n lambda x: x]:\n s2 = s.copy()\n indexer(s2)[0.1] = 0\n assert s2.index.is_floating()\n assert indexer(s2)[0.1] == 0\n\n s2 = s.copy()\n indexer(s2)[0.0] = 0\n exp = s.index\n if 0 not in s:\n exp = Index(s.index.tolist() + [0])\n tm.assert_index_equal(s2.index, exp)\n\n s2 = s.copy()\n indexer(s2)['0'] = 0\n assert s2.index.is_object()\n\n for s in [Series(range(5), index=np.arange(5.))]:\n\n assert s.index.is_floating()\n\n for idxr in [lambda x: x.ix,\n lambda x: x.loc,\n lambda x: x]:\n\n s2 = s.copy()\n idxr(s2)[0.1] = 0\n assert s2.index.is_floating()\n assert idxr(s2)[0.1] == 0\n\n s2 = s.copy()\n idxr(s2)[0.0] = 0\n tm.assert_index_equal(s2.index, s.index)\n\n s2 = s.copy()\n idxr(s2)['0'] = 0\n assert s2.index.is_object()\n\n\nclass TestMisc(Base):\n\n def test_float_index_to_mixed(self):\n df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})\n df['a'] = 10\n tm.assert_frame_equal(DataFrame({0.0: df[0.0],\n 1.0: df[1.0],\n 'a': [10] * 10}),\n df)\n\n def test_float_index_non_scalar_assignment(self):\n df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])\n df.loc[df.index[:2]] = 1\n expected = DataFrame({'a': [1, 1, 3], 'b': [1, 1, 5]}, index=df.index)\n tm.assert_frame_equal(expected, df)\n\n df = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]}, index=[1., 2., 3.])\n df2 = df.copy()\n df.loc[df.index] = df.loc[df.index]\n tm.assert_frame_equal(df, df2)\n\n def test_float_index_at_iat(self):\n s = Series([1, 2, 3], index=[0.1, 0.2, 0.3])\n for el, item in s.iteritems():\n assert s.at[el] == item\n for i in range(len(s)):\n assert s.iat[i] == i + 1\n\n def test_mixed_index_assignment(self):\n # GH 19860\n s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 1, 2])\n s.at['a'] = 11\n assert s.iat[0] == 11\n s.at[1] = 22\n assert s.iat[3] == 22\n\n def test_mixed_index_no_fallback(self):\n # GH 19860\n s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 1, 2])\n with pytest.raises(KeyError):\n s.at[0]\n with pytest.raises(KeyError):\n s.at[4]\n\n def test_rhs_alignment(self):\n # GH8258, tests that both rows & columns are aligned to what is\n # assigned to. covers both uniform data-type & multi-type cases\n def run_tests(df, rhs, right):\n # label, index, slice\n lbl_one, idx_one, slice_one = list('bcd'), [1, 2, 3], slice(1, 4)\n lbl_two, idx_two, slice_two = ['joe', 'jolie'], [1, 2], slice(1, 3)\n\n left = df.copy()\n left.loc[lbl_one, lbl_two] = rhs\n tm.assert_frame_equal(left, right)\n\n left = df.copy()\n left.iloc[idx_one, idx_two] = rhs\n tm.assert_frame_equal(left, right)\n\n left = df.copy()\n with catch_warnings(record=True):\n # XXX: finer-filter here.\n simplefilter(\"ignore\")\n left.ix[slice_one, slice_two] = rhs\n tm.assert_frame_equal(left, right)\n\n left = df.copy()\n with catch_warnings(record=True):\n simplefilter(\"ignore\")\n left.ix[idx_one, idx_two] = rhs\n tm.assert_frame_equal(left, right)\n\n left = df.copy()\n with catch_warnings(record=True):\n simplefilter(\"ignore\")\n left.ix[lbl_one, lbl_two] = rhs\n tm.assert_frame_equal(left, right)\n\n xs = np.arange(20).reshape(5, 4)\n cols = ['jim', 'joe', 'jolie', 'joline']\n df = DataFrame(xs, columns=cols, index=list('abcde'))\n\n # right hand side; permute the indices and multiplpy by -2\n rhs = -2 * df.iloc[3:0:-1, 2:0:-1]\n\n # expected `right` result; just multiply by -2\n right = df.copy()\n right.iloc[1:4, 1:3] *= -2\n\n # run tests with uniform dtypes\n run_tests(df, rhs, right)\n\n # make frames multi-type & re-run tests\n for frame in [df, rhs, right]:\n frame['joe'] = frame['joe'].astype('float64')\n frame['jolie'] = frame['jolie'].map('@{0}'.format)\n\n run_tests(df, rhs, right)\n\n def test_str_label_slicing_with_negative_step(self):\n SLC = pd.IndexSlice\n\n def assert_slices_equivalent(l_slc, i_slc):\n tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])\n\n if not idx.is_integer:\n # For integer indices, ix and plain getitem are position-based.\n tm.assert_series_equal(s[l_slc], s.iloc[i_slc])\n tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])\n\n for idx in [_mklbl('A', 20), np.arange(20) + 100,\n np.linspace(100, 150, 20)]:\n idx = Index(idx)\n s = Series(np.arange(20), index=idx)\n assert_slices_equivalent(SLC[idx[9]::-1], SLC[9::-1])\n assert_slices_equivalent(SLC[:idx[9]:-1], SLC[:8:-1])\n assert_slices_equivalent(SLC[idx[13]:idx[9]:-1], SLC[13:8:-1])\n assert_slices_equivalent(SLC[idx[9]:idx[13]:-1], SLC[:0])\n\n def test_slice_with_zero_step_raises(self):\n s = Series(np.arange(20), index=_mklbl('A', 20))\n with pytest.raises(ValueError, match='slice step cannot be zero'):\n s[::0]\n with pytest.raises(ValueError, match='slice step cannot be zero'):\n s.loc[::0]\n with catch_warnings(record=True):\n simplefilter(\"ignore\")\n with pytest.raises(ValueError, match='slice step cannot be zero'):\n s.ix[::0]\n\n def test_indexing_assignment_dict_already_exists(self):\n df = DataFrame({'x': [1, 2, 6],\n 'y': [2, 2, 8],\n 'z': [-5, 0, 5]}).set_index('z')\n expected = df.copy()\n rhs = dict(x=9, y=99)\n df.loc[5] = rhs\n expected.loc[5] = [9, 99]\n tm.assert_frame_equal(df, expected)\n\n def test_indexing_dtypes_on_empty(self):\n # Check that .iloc and .ix return correct dtypes GH9983\n df = DataFrame({'a': [1, 2, 3], 'b': ['b', 'b2', 'b3']})\n with catch_warnings(record=True):\n simplefilter(\"ignore\")\n df2 = df.ix[[], :]\n\n assert df2.loc[:, 'a'].dtype == np.int64\n tm.assert_series_equal(df2.loc[:, 'a'], df2.iloc[:, 0])\n with catch_warnings(record=True):\n simplefilter(\"ignore\")\n tm.assert_series_equal(df2.loc[:, 'a'], df2.ix[:, 0])\n\n def test_range_in_series_indexing(self):\n # range can cause an indexing error\n # GH 11652\n for x in [5, 999999, 1000000]:\n s = Series(index=range(x))\n s.loc[range(1)] = 42\n tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0]))\n\n s.loc[range(2)] = 43\n tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))\n\n def test_non_reducing_slice(self):\n df = DataFrame([[0, 1], [2, 3]])\n\n slices = [\n # pd.IndexSlice[:, :],\n pd.IndexSlice[:, 1],\n pd.IndexSlice[1, :],\n pd.IndexSlice[[1], [1]],\n pd.IndexSlice[1, [1]],\n pd.IndexSlice[[1], 1],\n pd.IndexSlice[1],\n pd.IndexSlice[1, 1],\n slice(None, None, None),\n [0, 1],\n np.array([0, 1]),\n Series([0, 1])\n ]\n for slice_ in slices:\n tslice_ = _non_reducing_slice(slice_)\n assert isinstance(df.loc[tslice_], DataFrame)\n\n def test_list_slice(self):\n # like dataframe getitem\n slices = [['A'], Series(['A']), np.array(['A'])]\n df = DataFrame({'A': [1, 2], 'B': [3, 4]}, index=['A', 'B'])\n expected = pd.IndexSlice[:, ['A']]\n for subset in slices:\n result = _non_reducing_slice(subset)\n tm.assert_frame_equal(df.loc[result], df.loc[expected])\n\n def test_maybe_numeric_slice(self):\n df = DataFrame({'A': [1, 2], 'B': ['c', 'd'], 'C': [True, False]})\n result = _maybe_numeric_slice(df, slice_=None)\n expected = pd.IndexSlice[:, ['A']]\n assert result == expected\n\n result = _maybe_numeric_slice(df, None, include_bool=True)\n expected = pd.IndexSlice[:, ['A', 'C']]\n result = _maybe_numeric_slice(df, [1])\n expected = [1]\n assert result == expected\n\n def test_partial_boolean_frame_indexing(self):\n # GH 17170\n df = DataFrame(np.arange(9.).reshape(3, 3),\n index=list('abc'), columns=list('ABC'))\n index_df = DataFrame(1, index=list('ab'), columns=list('AB'))\n result = df[index_df.notnull()]\n expected = DataFrame(np.array([[0., 1., np.nan],\n [3., 4., np.nan],\n [np.nan] * 3]),\n index=list('abc'),\n columns=list('ABC'))\n tm.assert_frame_equal(result, expected)\n\n def test_no_reference_cycle(self):\n df = DataFrame({'a': [0, 1], 'b': [2, 3]})\n for name in ('loc', 'iloc', 'at', 'iat'):\n getattr(df, name)\n with catch_warnings(record=True):\n simplefilter(\"ignore\")\n getattr(df, 'ix')\n wr = weakref.ref(df)\n del df\n assert wr() is None\n\n\nclass TestSeriesNoneCoercion(object):\n EXPECTED_RESULTS = [\n # For numeric series, we should coerce to NaN.\n ([1, 2, 3], [np.nan, 2, 3]),\n ([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),\n\n # For datetime series, we should coerce to NaT.\n ([datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],\n [NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)]),\n\n # For objects, we should preserve the None value.\n ([\"foo\", \"bar\", \"baz\"], [None, \"bar\", \"baz\"]),\n ]\n\n def test_coercion_with_setitem(self):\n for start_data, expected_result in self.EXPECTED_RESULTS:\n start_series = Series(start_data)\n start_series[0] = None\n\n expected_series = Series(expected_result)\n tm.assert_series_equal(start_series, expected_series)\n\n def test_coercion_with_loc_setitem(self):\n for start_data, expected_result in self.EXPECTED_RESULTS:\n start_series = Series(start_data)\n start_series.loc[0] = None\n\n expected_series = Series(expected_result)\n tm.assert_series_equal(start_series, expected_series)\n\n def test_coercion_with_setitem_and_series(self):\n for start_data, expected_result in self.EXPECTED_RESULTS:\n start_series = Series(start_data)\n start_series[start_series == start_series[0]] = None\n\n expected_series = Series(expected_result)\n tm.assert_series_equal(start_series, expected_series)\n\n def test_coercion_with_loc_and_series(self):\n for start_data, expected_result in self.EXPECTED_RESULTS:\n start_series = Series(start_data)\n start_series.loc[start_series == start_series[0]] = None\n\n expected_series = Series(expected_result)\n tm.assert_series_equal(start_series, expected_series)\n\n\nclass TestDataframeNoneCoercion(object):\n EXPECTED_SINGLE_ROW_RESULTS = [\n # For numeric series, we should coerce to NaN.\n ([1, 2, 3], [np.nan, 2, 3]),\n ([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),\n\n # For datetime series, we should coerce to NaT.\n ([datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],\n [NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)]),\n\n # For objects, we should preserve the None value.\n ([\"foo\", \"bar\", \"baz\"], [None, \"bar\", \"baz\"]),\n ]\n\n def test_coercion_with_loc(self):\n for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:\n start_dataframe = DataFrame({'foo': start_data})\n start_dataframe.loc[0, ['foo']] = None\n\n expected_dataframe = DataFrame({'foo': expected_result})\n tm.assert_frame_equal(start_dataframe, expected_dataframe)\n\n def test_coercion_with_setitem_and_dataframe(self):\n for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:\n start_dataframe = DataFrame({'foo': start_data})\n start_dataframe[start_dataframe['foo'] == start_dataframe['foo'][\n 0]] = None\n\n expected_dataframe = DataFrame({'foo': expected_result})\n tm.assert_frame_equal(start_dataframe, expected_dataframe)\n\n def test_none_coercion_loc_and_dataframe(self):\n for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:\n start_dataframe = DataFrame({'foo': start_data})\n start_dataframe.loc[start_dataframe['foo'] == start_dataframe[\n 'foo'][0]] = None\n\n expected_dataframe = DataFrame({'foo': expected_result})\n tm.assert_frame_equal(start_dataframe, expected_dataframe)\n\n def test_none_coercion_mixed_dtypes(self):\n start_dataframe = DataFrame({\n 'a': [1, 2, 3],\n 'b': [1.0, 2.0, 3.0],\n 'c': [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1,\n 3)],\n 'd': ['a', 'b', 'c']\n })\n start_dataframe.iloc[0] = None\n\n exp = DataFrame({'a': [np.nan, 2, 3],\n 'b': [np.nan, 2.0, 3.0],\n 'c': [NaT, datetime(2000, 1, 2),\n datetime(2000, 1, 3)],\n 'd': [None, 'b', 'c']})\n tm.assert_frame_equal(start_dataframe, exp)\n\n\ndef test_validate_indices_ok():\n indices = np.asarray([0, 1])\n validate_indices(indices, 2)\n validate_indices(indices[:0], 0)\n validate_indices(np.array([-1, -1]), 0)\n\n\ndef test_validate_indices_low():\n indices = np.asarray([0, -2])\n with pytest.raises(ValueError, match=\"'indices' contains\"):\n validate_indices(indices, 2)\n\n\ndef test_validate_indices_high():\n indices = np.asarray([0, 1, 2])\n with pytest.raises(IndexError, match=\"indices are out\"):\n validate_indices(indices, 2)\n\n\ndef test_validate_indices_empty():\n with pytest.raises(IndexError, match=\"indices are out\"):\n validate_indices(np.array([0, 1]), 0)\n\n\ndef test_extension_array_cross_section():\n # A cross-section of a homogeneous EA should be an EA\n df = pd.DataFrame({\n \"A\": pd.core.arrays.integer_array([1, 2]),\n \"B\": pd.core.arrays.integer_array([3, 4])\n }, index=['a', 'b'])\n expected = pd.Series(pd.core.arrays.integer_array([1, 3]),\n index=['A', 'B'], name='a')\n result = df.loc['a']\n tm.assert_series_equal(result, expected)\n\n result = df.iloc[0]\n tm.assert_series_equal(result, expected)\n\n\ndef test_extension_array_cross_section_converts():\n df = pd.DataFrame({\n \"A\": pd.core.arrays.integer_array([1, 2]),\n \"B\": np.array([1, 2]),\n }, index=['a', 'b'])\n result = df.loc['a']\n expected = pd.Series([1, 1], dtype=object, index=['A', 'B'], name='a')\n tm.assert_series_equal(result, expected)\n\n result = df.iloc[0]\n tm.assert_series_equal(result, expected)\n",
"# -*- coding: utf-8 -*-\n\n\"\"\"\nTests date parsing functionality for all of the\nparsers defined in parsers.py\n\"\"\"\n\nfrom datetime import date, datetime\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslib import Timestamp\nfrom pandas._libs.tslibs import parsing\nfrom pandas.compat import StringIO, lrange, parse_date\nfrom pandas.compat.numpy import np_array_datetime64_compat\n\nimport pandas as pd\nfrom pandas import DataFrame, DatetimeIndex, Index, MultiIndex\nfrom pandas.core.indexes.datetimes import date_range\nimport pandas.util.testing as tm\n\nimport pandas.io.date_converters as conv\nimport pandas.io.parsers as parsers\n\n\ndef test_separator_date_conflict(all_parsers):\n # Regression test for gh-4678\n #\n # Make sure thousands separator and\n # date parsing do not conflict.\n parser = all_parsers\n data = \"06-02-2013;13:00;1-000.215\"\n expected = DataFrame([[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],\n columns=[\"Date\", 2])\n\n df = parser.read_csv(StringIO(data), sep=\";\", thousands=\"-\",\n parse_dates={\"Date\": [0, 1]}, header=None)\n tm.assert_frame_equal(df, expected)\n\n\[email protected](\"keep_date_col\", [True, False])\ndef test_multiple_date_col_custom(all_parsers, keep_date_col):\n data = \"\"\"\\\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n parser = all_parsers\n\n def date_parser(*date_cols):\n \"\"\"\n Test date parser.\n\n Parameters\n ----------\n date_cols : args\n The list of data columns to parse.\n\n Returns\n -------\n parsed : Series\n \"\"\"\n return parsing.try_parse_dates(parsers._concat_date_cols(date_cols))\n\n result = parser.read_csv(StringIO(data), header=None,\n date_parser=date_parser, prefix=\"X\",\n parse_dates={\"actual\": [1, 2],\n \"nominal\": [1, 3]},\n keep_date_col=keep_date_col)\n expected = DataFrame([\n [datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),\n \"KORD\", \"19990127\", \" 19:00:00\", \" 18:56:00\",\n 0.81, 2.81, 7.2, 0.0, 280.0],\n [datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),\n \"KORD\", \"19990127\", \" 20:00:00\", \" 19:56:00\",\n 0.01, 2.21, 7.2, 0.0, 260.0],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),\n \"KORD\", \"19990127\", \" 21:00:00\", \" 20:56:00\",\n -0.59, 2.21, 5.7, 0.0, 280.0],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),\n \"KORD\", \"19990127\", \" 21:00:00\", \" 21:18:00\",\n -0.99, 2.01, 3.6, 0.0, 270.0],\n [datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),\n \"KORD\", \"19990127\", \" 22:00:00\", \" 21:56:00\",\n -0.59, 1.71, 5.1, 0.0, 290.0],\n [datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),\n \"KORD\", \"19990127\", \" 23:00:00\", \" 22:56:00\",\n -0.59, 1.71, 4.6, 0.0, 280.0],\n ], columns=[\"actual\", \"nominal\", \"X0\", \"X1\", \"X2\",\n \"X3\", \"X4\", \"X5\", \"X6\", \"X7\", \"X8\"])\n\n if not keep_date_col:\n expected = expected.drop([\"X1\", \"X2\", \"X3\"], axis=1)\n elif parser.engine == \"python\":\n expected[\"X1\"] = expected[\"X1\"].astype(np.int64)\n\n # Python can sometimes be flaky about how\n # the aggregated columns are entered, so\n # this standardizes the order.\n result = result[expected.columns]\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"keep_date_col\", [True, False])\ndef test_multiple_date_col(all_parsers, keep_date_col):\n data = \"\"\"\\\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), header=None,\n prefix=\"X\", parse_dates=[[1, 2], [1, 3]],\n keep_date_col=keep_date_col)\n expected = DataFrame([\n [datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),\n \"KORD\", \"19990127\", \" 19:00:00\", \" 18:56:00\",\n 0.81, 2.81, 7.2, 0.0, 280.0],\n [datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),\n \"KORD\", \"19990127\", \" 20:00:00\", \" 19:56:00\",\n 0.01, 2.21, 7.2, 0.0, 260.0],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),\n \"KORD\", \"19990127\", \" 21:00:00\", \" 20:56:00\",\n -0.59, 2.21, 5.7, 0.0, 280.0],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),\n \"KORD\", \"19990127\", \" 21:00:00\", \" 21:18:00\",\n -0.99, 2.01, 3.6, 0.0, 270.0],\n [datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),\n \"KORD\", \"19990127\", \" 22:00:00\", \" 21:56:00\",\n -0.59, 1.71, 5.1, 0.0, 290.0],\n [datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),\n \"KORD\", \"19990127\", \" 23:00:00\", \" 22:56:00\",\n -0.59, 1.71, 4.6, 0.0, 280.0],\n ], columns=[\"X1_X2\", \"X1_X3\", \"X0\", \"X1\", \"X2\",\n \"X3\", \"X4\", \"X5\", \"X6\", \"X7\", \"X8\"])\n\n if not keep_date_col:\n expected = expected.drop([\"X1\", \"X2\", \"X3\"], axis=1)\n elif parser.engine == \"python\":\n expected[\"X1\"] = expected[\"X1\"].astype(np.int64)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_date_col_as_index_col(all_parsers):\n data = \"\"\"\\\nKORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), header=None, prefix=\"X\",\n parse_dates=[1], index_col=1)\n\n index = Index([datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 20, 0),\n datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 0),\n datetime(1999, 1, 27, 22, 0)], name=\"X1\")\n expected = DataFrame([\n [\"KORD\", \" 18:56:00\", 0.81, 2.81, 7.2, 0.0, 280.0],\n [\"KORD\", \" 19:56:00\", 0.01, 2.21, 7.2, 0.0, 260.0],\n [\"KORD\", \" 20:56:00\", -0.59, 2.21, 5.7, 0.0, 280.0],\n [\"KORD\", \" 21:18:00\", -0.99, 2.01, 3.6, 0.0, 270.0],\n [\"KORD\", \" 21:56:00\", -0.59, 1.71, 5.1, 0.0, 290.0],\n ], columns=[\"X0\", \"X2\", \"X3\", \"X4\", \"X5\", \"X6\", \"X7\"], index=index)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiple_date_cols_int_cast(all_parsers):\n data = (\"KORD,19990127, 19:00:00, 18:56:00, 0.8100\\n\"\n \"KORD,19990127, 20:00:00, 19:56:00, 0.0100\\n\"\n \"KORD,19990127, 21:00:00, 20:56:00, -0.5900\\n\"\n \"KORD,19990127, 21:00:00, 21:18:00, -0.9900\\n\"\n \"KORD,19990127, 22:00:00, 21:56:00, -0.5900\\n\"\n \"KORD,19990127, 23:00:00, 22:56:00, -0.5900\")\n parse_dates = {\"actual\": [1, 2], \"nominal\": [1, 3]}\n parser = all_parsers\n\n result = parser.read_csv(StringIO(data), header=None,\n date_parser=conv.parse_date_time,\n parse_dates=parse_dates, prefix=\"X\")\n expected = DataFrame([\n [datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),\n \"KORD\", 0.81],\n [datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),\n \"KORD\", 0.01],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),\n \"KORD\", -0.59],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),\n \"KORD\", -0.99],\n [datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),\n \"KORD\", -0.59],\n [datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),\n \"KORD\", -0.59],\n ], columns=[\"actual\", \"nominal\", \"X0\", \"X4\"])\n\n # Python can sometimes be flaky about how\n # the aggregated columns are entered, so\n # this standardizes the order.\n result = result[expected.columns]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiple_date_col_timestamp_parse(all_parsers):\n parser = all_parsers\n data = \"\"\"05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25\n05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25\"\"\"\n\n result = parser.read_csv(StringIO(data), parse_dates=[[0, 1]],\n header=None, date_parser=Timestamp)\n expected = DataFrame([\n [Timestamp(\"05/31/2012, 15:30:00.029\"),\n 1306.25, 1, \"E\", 0, np.nan, 1306.25],\n [Timestamp(\"05/31/2012, 15:30:00.029\"),\n 1306.25, 8, \"E\", 0, np.nan, 1306.25]\n ], columns=[\"0_1\", 2, 3, 4, 5, 6, 7])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiple_date_cols_with_header(all_parsers):\n parser = all_parsers\n data = \"\"\"\\\nID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\"\"\"\n\n result = parser.read_csv(StringIO(data), parse_dates={\"nominal\": [1, 2]})\n expected = DataFrame([\n [datetime(1999, 1, 27, 19, 0), \"KORD\", \" 18:56:00\",\n 0.81, 2.81, 7.2, 0.0, 280.0],\n [datetime(1999, 1, 27, 20, 0), \"KORD\", \" 19:56:00\",\n 0.01, 2.21, 7.2, 0.0, 260.0],\n [datetime(1999, 1, 27, 21, 0), \"KORD\", \" 20:56:00\",\n -0.59, 2.21, 5.7, 0.0, 280.0],\n [datetime(1999, 1, 27, 21, 0), \"KORD\", \" 21:18:00\",\n -0.99, 2.01, 3.6, 0.0, 270.0],\n [datetime(1999, 1, 27, 22, 0), \"KORD\", \" 21:56:00\",\n -0.59, 1.71, 5.1, 0.0, 290.0],\n [datetime(1999, 1, 27, 23, 0), \"KORD\", \" 22:56:00\",\n -0.59, 1.71, 4.6, 0.0, 280.0],\n ], columns=[\"nominal\", \"ID\", \"ActualTime\", \"TDew\",\n \"TAir\", \"Windspeed\", \"Precip\", \"WindDir\"])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"data,parse_dates,msg\", [\n (\"\"\"\\\ndate_NominalTime,date,NominalTime\nKORD1,19990127, 19:00:00\nKORD2,19990127, 20:00:00\"\"\", [[1, 2]], (\"New date column already \"\n \"in dict date_NominalTime\")),\n (\"\"\"\\\nID,date,nominalTime\nKORD,19990127, 19:00:00\nKORD,19990127, 20:00:00\"\"\", dict(ID=[1, 2]), \"Date column ID already in dict\")\n])\ndef test_multiple_date_col_name_collision(all_parsers, data, parse_dates, msg):\n parser = all_parsers\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), parse_dates=parse_dates)\n\n\ndef test_date_parser_int_bug(all_parsers):\n # see gh-3071\n parser = all_parsers\n data = (\"posix_timestamp,elapsed,sys,user,queries,query_time,rows,\"\n \"accountid,userid,contactid,level,silo,method\\n\"\n \"1343103150,0.062353,0,4,6,0.01690,3,\"\n \"12345,1,-1,3,invoice_InvoiceResource,search\\n\")\n\n result = parser.read_csv(\n StringIO(data), index_col=0, parse_dates=[0],\n date_parser=lambda x: datetime.utcfromtimestamp(int(x)))\n expected = DataFrame([[0.062353, 0, 4, 6, 0.01690, 3, 12345, 1, -1,\n 3, \"invoice_InvoiceResource\", \"search\"]],\n columns=[\"elapsed\", \"sys\", \"user\", \"queries\",\n \"query_time\", \"rows\", \"accountid\",\n \"userid\", \"contactid\", \"level\",\n \"silo\", \"method\"],\n index=Index([Timestamp(\"2012-07-24 04:12:30\")],\n name=\"posix_timestamp\"))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_nat_parse(all_parsers):\n # see gh-3062\n parser = all_parsers\n df = DataFrame(dict({\"A\": np.asarray(lrange(10), dtype=\"float64\"),\n \"B\": pd.Timestamp(\"20010101\")}))\n df.iloc[3:6, :] = np.nan\n\n with tm.ensure_clean(\"__nat_parse_.csv\") as path:\n df.to_csv(path)\n\n result = parser.read_csv(path, index_col=0, parse_dates=[\"B\"])\n tm.assert_frame_equal(result, df)\n\n\ndef test_csv_custom_parser(all_parsers):\n data = \"\"\"A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(\n StringIO(data),\n date_parser=lambda x: datetime.strptime(x, \"%Y%m%d\"))\n expected = parser.read_csv(StringIO(data), parse_dates=True)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_dates_implicit_first_col(all_parsers):\n data = \"\"\"A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), parse_dates=True)\n\n expected = parser.read_csv(StringIO(data), index_col=0,\n parse_dates=True)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_dates_string(all_parsers):\n data = \"\"\"date,A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), index_col=\"date\",\n parse_dates=[\"date\"])\n index = date_range(\"1/1/2009\", periods=3)\n index.name = \"date\"\n\n expected = DataFrame({\"A\": [\"a\", \"b\", \"c\"], \"B\": [1, 3, 4],\n \"C\": [2, 4, 5]}, index=index)\n tm.assert_frame_equal(result, expected)\n\n\n# Bug in https://github.com/dateutil/dateutil/issues/217\n# has been addressed, but we just don't pass in the `yearfirst`\[email protected](reason=\"yearfirst is not surfaced in read_*\")\[email protected](\"parse_dates\", [\n [[\"date\", \"time\"]],\n [[0, 1]]\n])\ndef test_yy_format_with_year_first(all_parsers, parse_dates):\n data = \"\"\"date,time,B,C\n090131,0010,1,2\n090228,1020,3,4\n090331,0830,5,6\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), index_col=0,\n parse_dates=parse_dates)\n index = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),\n datetime(2009, 2, 28, 10, 20, 0),\n datetime(2009, 3, 31, 8, 30, 0)],\n dtype=object, name=\"date_time\")\n expected = DataFrame({\"B\": [1, 3, 5], \"C\": [2, 4, 6]}, index=index)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"parse_dates\", [[0, 2], [\"a\", \"c\"]])\ndef test_parse_dates_column_list(all_parsers, parse_dates):\n data = \"a,b,c\\n01/01/2010,1,15/02/2010\"\n parser = all_parsers\n\n expected = DataFrame({\"a\": [datetime(2010, 1, 1)], \"b\": [1],\n \"c\": [datetime(2010, 2, 15)]})\n expected = expected.set_index([\"a\", \"b\"])\n\n result = parser.read_csv(StringIO(data), index_col=[0, 1],\n parse_dates=parse_dates, dayfirst=True)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"index_col\", [[0, 1], [1, 0]])\ndef test_multi_index_parse_dates(all_parsers, index_col):\n data = \"\"\"index1,index2,A,B,C\n20090101,one,a,1,2\n20090101,two,b,3,4\n20090101,three,c,4,5\n20090102,one,a,1,2\n20090102,two,b,3,4\n20090102,three,c,4,5\n20090103,one,a,1,2\n20090103,two,b,3,4\n20090103,three,c,4,5\n\"\"\"\n parser = all_parsers\n index = MultiIndex.from_product([\n (datetime(2009, 1, 1), datetime(2009, 1, 2),\n datetime(2009, 1, 3)), (\"one\", \"two\", \"three\")],\n names=[\"index1\", \"index2\"])\n\n # Out of order.\n if index_col == [1, 0]:\n index = index.swaplevel(0, 1)\n\n expected = DataFrame([[\"a\", 1, 2], [\"b\", 3, 4], [\"c\", 4, 5],\n [\"a\", 1, 2], [\"b\", 3, 4], [\"c\", 4, 5],\n [\"a\", 1, 2], [\"b\", 3, 4], [\"c\", 4, 5]],\n columns=[\"A\", \"B\", \"C\"], index=index)\n result = parser.read_csv(StringIO(data), index_col=index_col,\n parse_dates=True)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"kwargs\", [\n dict(dayfirst=True), dict(day_first=True)\n])\ndef test_parse_dates_custom_euro_format(all_parsers, kwargs):\n parser = all_parsers\n data = \"\"\"foo,bar,baz\n31/01/2010,1,2\n01/02/2010,1,NA\n02/02/2010,1,2\n\"\"\"\n if \"dayfirst\" in kwargs:\n df = parser.read_csv(StringIO(data), names=[\"time\", \"Q\", \"NTU\"],\n date_parser=lambda d: parse_date(d, **kwargs),\n header=0, index_col=0, parse_dates=True,\n na_values=[\"NA\"])\n exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),\n datetime(2010, 2, 2)], name=\"time\")\n expected = DataFrame({\"Q\": [1, 1, 1], \"NTU\": [2, np.nan, 2]},\n index=exp_index, columns=[\"Q\", \"NTU\"])\n tm.assert_frame_equal(df, expected)\n else:\n msg = \"got an unexpected keyword argument 'day_first'\"\n with pytest.raises(TypeError, match=msg):\n parser.read_csv(StringIO(data), names=[\"time\", \"Q\", \"NTU\"],\n date_parser=lambda d: parse_date(d, **kwargs),\n skiprows=[0], index_col=0, parse_dates=True,\n na_values=[\"NA\"])\n\n\ndef test_parse_tz_aware(all_parsers):\n # See gh-1693\n parser = all_parsers\n data = \"Date,x\\n2012-06-13T01:39:00Z,0.5\"\n\n result = parser.read_csv(StringIO(data), index_col=0,\n parse_dates=True)\n expected = DataFrame({\"x\": [0.5]}, index=Index([Timestamp(\n \"2012-06-13 01:39:00+00:00\")], name=\"Date\"))\n tm.assert_frame_equal(result, expected)\n assert result.index.tz is pytz.utc\n\n\[email protected](\"parse_dates,index_col\", [\n ({\"nominal\": [1, 2]}, \"nominal\"),\n ({\"nominal\": [1, 2]}, 0),\n ([[1, 2]], 0),\n])\ndef test_multiple_date_cols_index(all_parsers, parse_dates, index_col):\n parser = all_parsers\n data = \"\"\"\nID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\nKORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n expected = DataFrame([\n [datetime(1999, 1, 27, 19, 0), \"KORD1\", \" 18:56:00\",\n 0.81, 2.81, 7.2, 0.0, 280.0],\n [datetime(1999, 1, 27, 20, 0), \"KORD2\", \" 19:56:00\",\n 0.01, 2.21, 7.2, 0.0, 260.0],\n [datetime(1999, 1, 27, 21, 0), \"KORD3\", \" 20:56:00\",\n -0.59, 2.21, 5.7, 0.0, 280.0],\n [datetime(1999, 1, 27, 21, 0), \"KORD4\", \" 21:18:00\",\n -0.99, 2.01, 3.6, 0.0, 270.0],\n [datetime(1999, 1, 27, 22, 0), \"KORD5\", \" 21:56:00\",\n -0.59, 1.71, 5.1, 0.0, 290.0],\n [datetime(1999, 1, 27, 23, 0), \"KORD6\", \" 22:56:00\",\n -0.59, 1.71, 4.6, 0.0, 280.0],\n ], columns=[\"nominal\", \"ID\", \"ActualTime\", \"TDew\",\n \"TAir\", \"Windspeed\", \"Precip\", \"WindDir\"])\n expected = expected.set_index(\"nominal\")\n\n if not isinstance(parse_dates, dict):\n expected.index.name = \"date_NominalTime\"\n\n result = parser.read_csv(StringIO(data), parse_dates=parse_dates,\n index_col=index_col)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiple_date_cols_chunked(all_parsers):\n parser = all_parsers\n data = \"\"\"\\\nID,date,nominalTime,actualTime,A,B,C,D,E\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n\n expected = DataFrame([\n [datetime(1999, 1, 27, 19, 0), \"KORD\", \" 18:56:00\",\n 0.81, 2.81, 7.2, 0.0, 280.0],\n [datetime(1999, 1, 27, 20, 0), \"KORD\", \" 19:56:00\",\n 0.01, 2.21, 7.2, 0.0, 260.0],\n [datetime(1999, 1, 27, 21, 0), \"KORD\", \" 20:56:00\",\n -0.59, 2.21, 5.7, 0.0, 280.0],\n [datetime(1999, 1, 27, 21, 0), \"KORD\", \" 21:18:00\",\n -0.99, 2.01, 3.6, 0.0, 270.0],\n [datetime(1999, 1, 27, 22, 0), \"KORD\", \" 21:56:00\",\n -0.59, 1.71, 5.1, 0.0, 290.0],\n [datetime(1999, 1, 27, 23, 0), \"KORD\", \" 22:56:00\",\n -0.59, 1.71, 4.6, 0.0, 280.0],\n ], columns=[\"nominal\", \"ID\", \"actualTime\", \"A\", \"B\", \"C\", \"D\", \"E\"])\n expected = expected.set_index(\"nominal\")\n\n reader = parser.read_csv(StringIO(data), parse_dates={\"nominal\": [1, 2]},\n index_col=\"nominal\", chunksize=2)\n chunks = list(reader)\n\n tm.assert_frame_equal(chunks[0], expected[:2])\n tm.assert_frame_equal(chunks[1], expected[2:4])\n tm.assert_frame_equal(chunks[2], expected[4:])\n\n\ndef test_multiple_date_col_named_index_compat(all_parsers):\n parser = all_parsers\n data = \"\"\"\\\nID,date,nominalTime,actualTime,A,B,C,D,E\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n\n with_indices = parser.read_csv(StringIO(data),\n parse_dates={\"nominal\": [1, 2]},\n index_col=\"nominal\")\n with_names = parser.read_csv(StringIO(data), index_col=\"nominal\",\n parse_dates={\"nominal\": [\n \"date\", \"nominalTime\"]})\n tm.assert_frame_equal(with_indices, with_names)\n\n\ndef test_multiple_date_col_multiple_index_compat(all_parsers):\n parser = all_parsers\n data = \"\"\"\\\nID,date,nominalTime,actualTime,A,B,C,D,E\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n result = parser.read_csv(StringIO(data), index_col=[\"nominal\", \"ID\"],\n parse_dates={\"nominal\": [1, 2]})\n expected = parser.read_csv(StringIO(data),\n parse_dates={\"nominal\": [1, 2]})\n\n expected = expected.set_index([\"nominal\", \"ID\"])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"kwargs\", [dict(), dict(index_col=\"C\")])\ndef test_read_with_parse_dates_scalar_non_bool(all_parsers, kwargs):\n # see gh-5636\n parser = all_parsers\n msg = (\"Only booleans, lists, and dictionaries \"\n \"are accepted for the 'parse_dates' parameter\")\n data = \"\"\"A,B,C\n 1,2,2003-11-1\"\"\"\n\n with pytest.raises(TypeError, match=msg):\n parser.read_csv(StringIO(data), parse_dates=\"C\", **kwargs)\n\n\[email protected](\"parse_dates\", [\n (1,), np.array([4, 5]), {1, 3, 3}\n])\ndef test_read_with_parse_dates_invalid_type(all_parsers, parse_dates):\n parser = all_parsers\n msg = (\"Only booleans, lists, and dictionaries \"\n \"are accepted for the 'parse_dates' parameter\")\n data = \"\"\"A,B,C\n 1,2,2003-11-1\"\"\"\n\n with pytest.raises(TypeError, match=msg):\n parser.read_csv(StringIO(data), parse_dates=(1,))\n\n\ndef test_parse_dates_empty_string(all_parsers):\n # see gh-2263\n parser = all_parsers\n data = \"Date,test\\n2012-01-01,1\\n,2\"\n result = parser.read_csv(StringIO(data), parse_dates=[\"Date\"],\n na_filter=False)\n\n expected = DataFrame([[datetime(2012, 1, 1), 1], [pd.NaT, 2]],\n columns=[\"Date\", \"test\"])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"data,kwargs,expected\", [\n (\"a\\n04.15.2016\", dict(parse_dates=[\"a\"]),\n DataFrame([datetime(2016, 4, 15)], columns=[\"a\"])),\n (\"a\\n04.15.2016\", dict(parse_dates=True, index_col=0),\n DataFrame(index=DatetimeIndex([\"2016-04-15\"], name=\"a\"))),\n (\"a,b\\n04.15.2016,09.16.2013\", dict(parse_dates=[\"a\", \"b\"]),\n DataFrame([[datetime(2016, 4, 15), datetime(2013, 9, 16)]],\n columns=[\"a\", \"b\"])),\n (\"a,b\\n04.15.2016,09.16.2013\", dict(parse_dates=True, index_col=[0, 1]),\n DataFrame(index=MultiIndex.from_tuples(\n [(datetime(2016, 4, 15), datetime(2013, 9, 16))], names=[\"a\", \"b\"]))),\n])\ndef test_parse_dates_no_convert_thousands(all_parsers, data, kwargs, expected):\n # see gh-14066\n parser = all_parsers\n\n result = parser.read_csv(StringIO(data), thousands=\".\", **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_date_time_multi_level_column_name(all_parsers):\n data = \"\"\"\\\nD,T,A,B\ndate, time,a,b\n2001-01-05, 09:00:00, 0.0, 10.\n2001-01-06, 00:00:00, 1.0, 11.\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), header=[0, 1],\n parse_dates={\"date_time\": [0, 1]},\n date_parser=conv.parse_date_time)\n\n expected_data = [[datetime(2001, 1, 5, 9, 0, 0), 0., 10.],\n [datetime(2001, 1, 6, 0, 0, 0), 1., 11.]]\n expected = DataFrame(expected_data,\n columns=[\"date_time\", (\"A\", \"a\"), (\"B\", \"b\")])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"data,kwargs,expected\", [\n (\"\"\"\\\ndate,time,a,b\n2001-01-05, 10:00:00, 0.0, 10.\n2001-01-05, 00:00:00, 1., 11.\n\"\"\", dict(header=0, parse_dates={\"date_time\": [0, 1]}),\n DataFrame([[datetime(2001, 1, 5, 10, 0, 0), 0.0, 10],\n [datetime(2001, 1, 5, 0, 0, 0), 1.0, 11.0]],\n columns=[\"date_time\", \"a\", \"b\"])),\n ((\"KORD,19990127, 19:00:00, 18:56:00, 0.8100\\n\"\n \"KORD,19990127, 20:00:00, 19:56:00, 0.0100\\n\"\n \"KORD,19990127, 21:00:00, 20:56:00, -0.5900\\n\"\n \"KORD,19990127, 21:00:00, 21:18:00, -0.9900\\n\"\n \"KORD,19990127, 22:00:00, 21:56:00, -0.5900\\n\"\n \"KORD,19990127, 23:00:00, 22:56:00, -0.5900\"),\n dict(header=None, parse_dates={\"actual\": [1, 2], \"nominal\": [1, 3]}),\n DataFrame([\n [datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),\n \"KORD\", 0.81],\n [datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),\n \"KORD\", 0.01],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),\n \"KORD\", -0.59],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),\n \"KORD\", -0.99],\n [datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),\n \"KORD\", -0.59],\n [datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),\n \"KORD\", -0.59]], columns=[\"actual\", \"nominal\", 0, 4])),\n])\ndef test_parse_date_time(all_parsers, data, kwargs, expected):\n parser = all_parsers\n result = parser.read_csv(StringIO(data), date_parser=conv.parse_date_time,\n **kwargs)\n\n # Python can sometimes be flaky about how\n # the aggregated columns are entered, so\n # this standardizes the order.\n result = result[expected.columns]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_date_fields(all_parsers):\n parser = all_parsers\n data = (\"year,month,day,a\\n2001,01,10,10.\\n\"\n \"2001,02,1,11.\")\n result = parser.read_csv(StringIO(data), header=0,\n parse_dates={\"ymd\": [0, 1, 2]},\n date_parser=conv.parse_date_fields)\n\n expected = DataFrame([[datetime(2001, 1, 10), 10.],\n [datetime(2001, 2, 1), 11.]], columns=[\"ymd\", \"a\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_date_all_fields(all_parsers):\n parser = all_parsers\n data = \"\"\"\\\nyear,month,day,hour,minute,second,a,b\n2001,01,05,10,00,0,0.0,10.\n2001,01,5,10,0,00,1.,11.\n\"\"\"\n result = parser.read_csv(StringIO(data), header=0,\n date_parser=conv.parse_all_fields,\n parse_dates={\"ymdHMS\": [0, 1, 2, 3, 4, 5]})\n expected = DataFrame([[datetime(2001, 1, 5, 10, 0, 0), 0.0, 10.0],\n [datetime(2001, 1, 5, 10, 0, 0), 1.0, 11.0]],\n columns=[\"ymdHMS\", \"a\", \"b\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_datetime_fractional_seconds(all_parsers):\n parser = all_parsers\n data = \"\"\"\\\nyear,month,day,hour,minute,second,a,b\n2001,01,05,10,00,0.123456,0.0,10.\n2001,01,5,10,0,0.500000,1.,11.\n\"\"\"\n result = parser.read_csv(StringIO(data), header=0,\n date_parser=conv.parse_all_fields,\n parse_dates={\"ymdHMS\": [0, 1, 2, 3, 4, 5]})\n expected = DataFrame([[datetime(2001, 1, 5, 10, 0, 0,\n microsecond=123456), 0.0, 10.0],\n [datetime(2001, 1, 5, 10, 0, 0,\n microsecond=500000), 1.0, 11.0]],\n columns=[\"ymdHMS\", \"a\", \"b\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_generic(all_parsers):\n parser = all_parsers\n data = \"year,month,day,a\\n2001,01,10,10.\\n2001,02,1,11.\"\n\n result = parser.read_csv(StringIO(data), header=0,\n parse_dates={\"ym\": [0, 1]},\n date_parser=lambda y, m: date(year=int(y),\n month=int(m),\n day=1))\n expected = DataFrame([[date(2001, 1, 1), 10, 10.],\n [date(2001, 2, 1), 1, 11.]],\n columns=[\"ym\", \"day\", \"a\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_date_parser_resolution_if_not_ns(all_parsers):\n # see gh-10245\n parser = all_parsers\n data = \"\"\"\\\ndate,time,prn,rxstatus\n2013-11-03,19:00:00,126,00E80000\n2013-11-03,19:00:00,23,00E80000\n2013-11-03,19:00:00,13,00E80000\n\"\"\"\n\n def date_parser(dt, time):\n return np_array_datetime64_compat(dt + \"T\" + time + \"Z\",\n dtype=\"datetime64[s]\")\n\n result = parser.read_csv(StringIO(data), date_parser=date_parser,\n parse_dates={\"datetime\": [\"date\", \"time\"]},\n index_col=[\"datetime\", \"prn\"])\n\n datetimes = np_array_datetime64_compat([\"2013-11-03T19:00:00Z\"] * 3,\n dtype=\"datetime64[s]\")\n expected = DataFrame(data={\"rxstatus\": [\"00E80000\"] * 3},\n index=MultiIndex.from_tuples(\n [(datetimes[0], 126), (datetimes[1], 23),\n (datetimes[2], 13)], names=[\"datetime\", \"prn\"]))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_date_column_with_empty_string(all_parsers):\n # see gh-6428\n parser = all_parsers\n data = \"case,opdate\\n7,10/18/2006\\n7,10/18/2008\\n621, \"\n result = parser.read_csv(StringIO(data), parse_dates=[\"opdate\"])\n\n expected_data = [[7, \"10/18/2006\"],\n [7, \"10/18/2008\"],\n [621, \" \"]]\n expected = DataFrame(expected_data, columns=[\"case\", \"opdate\"])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"data,expected\", [\n (\"a\\n135217135789158401\\n1352171357E+5\",\n DataFrame({\"a\": [135217135789158401,\n 135217135700000]}, dtype=\"float64\")),\n (\"a\\n99999999999\\n123456789012345\\n1234E+0\",\n DataFrame({\"a\": [99999999999,\n 123456789012345,\n 1234]}, dtype=\"float64\"))\n])\[email protected](\"parse_dates\", [True, False])\ndef test_parse_date_float(all_parsers, data, expected, parse_dates):\n # see gh-2697\n #\n # Date parsing should fail, so we leave the data untouched\n # (i.e. float precision should remain unchanged).\n parser = all_parsers\n\n result = parser.read_csv(StringIO(data), parse_dates=parse_dates)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_timezone(all_parsers):\n # see gh-22256\n parser = all_parsers\n data = \"\"\"dt,val\n 2018-01-04 09:01:00+09:00,23350\n 2018-01-04 09:02:00+09:00,23400\n 2018-01-04 09:03:00+09:00,23400\n 2018-01-04 09:04:00+09:00,23400\n 2018-01-04 09:05:00+09:00,23400\"\"\"\n result = parser.read_csv(StringIO(data), parse_dates=[\"dt\"])\n\n dti = pd.date_range(start=\"2018-01-04 09:01:00\",\n end=\"2018-01-04 09:05:00\", freq=\"1min\",\n tz=pytz.FixedOffset(540))\n expected_data = {\"dt\": dti, \"val\": [23350, 23400, 23400, 23400, 23400]}\n\n expected = DataFrame(expected_data)\n tm.assert_frame_equal(result, expected)\n",
"\"\"\"\nA module providing some utility functions regarding bezier path manipulation.\n\"\"\"\n\nimport warnings\n\nimport numpy as np\nfrom matplotlib.path import Path\n\n\nclass NonIntersectingPathException(ValueError):\n pass\n\n# some functions\n\n\ndef get_intersection(cx1, cy1, cos_t1, sin_t1,\n cx2, cy2, cos_t2, sin_t2):\n \"\"\" return a intersecting point between a line through (cx1, cy1)\n and having angle t1 and a line through (cx2, cy2) and angle t2.\n \"\"\"\n\n # line1 => sin_t1 * (x - cx1) - cos_t1 * (y - cy1) = 0.\n # line1 => sin_t1 * x + cos_t1 * y = sin_t1*cx1 - cos_t1*cy1\n\n line1_rhs = sin_t1 * cx1 - cos_t1 * cy1\n line2_rhs = sin_t2 * cx2 - cos_t2 * cy2\n\n # rhs matrix\n a, b = sin_t1, -cos_t1\n c, d = sin_t2, -cos_t2\n\n ad_bc = a * d - b * c\n if np.abs(ad_bc) < 1.0e-12:\n raise ValueError(\"Given lines do not intersect. Please verify that \"\n \"the angles are not equal or differ by 180 degrees.\")\n\n # rhs_inverse\n a_, b_ = d, -b\n c_, d_ = -c, a\n a_, b_, c_, d_ = [k / ad_bc for k in [a_, b_, c_, d_]]\n\n x = a_ * line1_rhs + b_ * line2_rhs\n y = c_ * line1_rhs + d_ * line2_rhs\n\n return x, y\n\n\ndef get_normal_points(cx, cy, cos_t, sin_t, length):\n \"\"\"\n For a line passing through (*cx*, *cy*) and having a angle *t*, return\n locations of the two points located along its perpendicular line at the\n distance of *length*.\n \"\"\"\n\n if length == 0.:\n return cx, cy, cx, cy\n\n cos_t1, sin_t1 = sin_t, -cos_t\n cos_t2, sin_t2 = -sin_t, cos_t\n\n x1, y1 = length * cos_t1 + cx, length * sin_t1 + cy\n x2, y2 = length * cos_t2 + cx, length * sin_t2 + cy\n\n return x1, y1, x2, y2\n\n\n# BEZIER routines\n\n# subdividing bezier curve\n# http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-sub.html\n\n\ndef _de_casteljau1(beta, t):\n next_beta = beta[:-1] * (1 - t) + beta[1:] * t\n return next_beta\n\n\ndef split_de_casteljau(beta, t):\n \"\"\"split a bezier segment defined by its controlpoints *beta*\n into two separate segment divided at *t* and return their control points.\n\n \"\"\"\n beta = np.asarray(beta)\n beta_list = [beta]\n while True:\n beta = _de_casteljau1(beta, t)\n beta_list.append(beta)\n if len(beta) == 1:\n break\n left_beta = [beta[0] for beta in beta_list]\n right_beta = [beta[-1] for beta in reversed(beta_list)]\n\n return left_beta, right_beta\n\n\n# FIXME spelling mistake in the name of the parameter ``tolerence``\ndef find_bezier_t_intersecting_with_closedpath(bezier_point_at_t,\n inside_closedpath,\n t0=0., t1=1., tolerence=0.01):\n \"\"\" Find a parameter t0 and t1 of the given bezier path which\n bounds the intersecting points with a provided closed\n path(*inside_closedpath*). Search starts from *t0* and *t1* and it\n uses a simple bisecting algorithm therefore one of the end point\n must be inside the path while the orther doesn't. The search stop\n when |t0-t1| gets smaller than the given tolerence.\n value for\n\n - bezier_point_at_t : a function which returns x, y coordinates at *t*\n\n - inside_closedpath : return True if the point is inside the path\n\n \"\"\"\n # inside_closedpath : function\n\n start = bezier_point_at_t(t0)\n end = bezier_point_at_t(t1)\n\n start_inside = inside_closedpath(start)\n end_inside = inside_closedpath(end)\n\n if start_inside == end_inside and start != end:\n raise NonIntersectingPathException(\n \"Both points are on the same side of the closed path\")\n\n while True:\n\n # return if the distance is smaller than the tolerence\n if np.hypot(start[0] - end[0], start[1] - end[1]) < tolerence:\n return t0, t1\n\n # calculate the middle point\n middle_t = 0.5 * (t0 + t1)\n middle = bezier_point_at_t(middle_t)\n middle_inside = inside_closedpath(middle)\n\n if start_inside ^ middle_inside:\n t1 = middle_t\n end = middle\n end_inside = middle_inside\n else:\n t0 = middle_t\n start = middle\n start_inside = middle_inside\n\n\nclass BezierSegment(object):\n \"\"\"\n A simple class of a 2-dimensional bezier segment\n \"\"\"\n\n # Higher order bezier lines can be supported by simplying adding\n # corresponding values.\n _binom_coeff = {1: np.array([1., 1.]),\n 2: np.array([1., 2., 1.]),\n 3: np.array([1., 3., 3., 1.])}\n\n def __init__(self, control_points):\n \"\"\"\n *control_points* : location of contol points. It needs have a\n shpae of n * 2, where n is the order of the bezier line. 1<=\n n <= 3 is supported.\n \"\"\"\n _o = len(control_points)\n self._orders = np.arange(_o)\n\n _coeff = BezierSegment._binom_coeff[_o - 1]\n xx, yy = np.asarray(control_points).T\n self._px = xx * _coeff\n self._py = yy * _coeff\n\n def point_at_t(self, t):\n \"evaluate a point at t\"\n tt = ((1 - t) ** self._orders)[::-1] * t ** self._orders\n _x = np.dot(tt, self._px)\n _y = np.dot(tt, self._py)\n return _x, _y\n\n\ndef split_bezier_intersecting_with_closedpath(bezier,\n inside_closedpath,\n tolerence=0.01):\n\n \"\"\"\n bezier : control points of the bezier segment\n inside_closedpath : a function which returns true if the point is inside\n the path\n \"\"\"\n\n bz = BezierSegment(bezier)\n bezier_point_at_t = bz.point_at_t\n\n t0, t1 = find_bezier_t_intersecting_with_closedpath(bezier_point_at_t,\n inside_closedpath,\n tolerence=tolerence)\n\n _left, _right = split_de_casteljau(bezier, (t0 + t1) / 2.)\n return _left, _right\n\n\ndef find_r_to_boundary_of_closedpath(inside_closedpath, xy,\n cos_t, sin_t,\n rmin=0., rmax=1., tolerence=0.01):\n \"\"\"\n Find a radius r (centered at *xy*) between *rmin* and *rmax* at\n which it intersect with the path.\n\n inside_closedpath : function\n cx, cy : center\n cos_t, sin_t : cosine and sine for the angle\n rmin, rmax :\n \"\"\"\n\n cx, cy = xy\n\n def _f(r):\n return cos_t * r + cx, sin_t * r + cy\n\n find_bezier_t_intersecting_with_closedpath(_f, inside_closedpath,\n t0=rmin, t1=rmax,\n tolerence=tolerence)\n\n# matplotlib specific\n\n\ndef split_path_inout(path, inside, tolerence=0.01, reorder_inout=False):\n \"\"\" divide a path into two segment at the point where inside(x, y)\n becomes False.\n \"\"\"\n\n path_iter = path.iter_segments()\n\n ctl_points, command = next(path_iter)\n begin_inside = inside(ctl_points[-2:]) # true if begin point is inside\n\n ctl_points_old = ctl_points\n\n concat = np.concatenate\n\n iold = 0\n i = 1\n\n for ctl_points, command in path_iter:\n iold = i\n i += len(ctl_points) // 2\n if inside(ctl_points[-2:]) != begin_inside:\n bezier_path = concat([ctl_points_old[-2:], ctl_points])\n break\n ctl_points_old = ctl_points\n else:\n raise ValueError(\"The path does not intersect with the patch\")\n\n bp = bezier_path.reshape((-1, 2))\n left, right = split_bezier_intersecting_with_closedpath(\n bp, inside, tolerence)\n if len(left) == 2:\n codes_left = [Path.LINETO]\n codes_right = [Path.MOVETO, Path.LINETO]\n elif len(left) == 3:\n codes_left = [Path.CURVE3, Path.CURVE3]\n codes_right = [Path.MOVETO, Path.CURVE3, Path.CURVE3]\n elif len(left) == 4:\n codes_left = [Path.CURVE4, Path.CURVE4, Path.CURVE4]\n codes_right = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]\n else:\n raise AssertionError(\"This should never be reached\")\n\n verts_left = left[1:]\n verts_right = right[:]\n\n if path.codes is None:\n path_in = Path(concat([path.vertices[:i], verts_left]))\n path_out = Path(concat([verts_right, path.vertices[i:]]))\n\n else:\n path_in = Path(concat([path.vertices[:iold], verts_left]),\n concat([path.codes[:iold], codes_left]))\n\n path_out = Path(concat([verts_right, path.vertices[i:]]),\n concat([codes_right, path.codes[i:]]))\n\n if reorder_inout and begin_inside is False:\n path_in, path_out = path_out, path_in\n\n return path_in, path_out\n\n\ndef inside_circle(cx, cy, r):\n r2 = r ** 2\n\n def _f(xy):\n x, y = xy\n return (x - cx) ** 2 + (y - cy) ** 2 < r2\n return _f\n\n\n# quadratic bezier lines\n\ndef get_cos_sin(x0, y0, x1, y1):\n dx, dy = x1 - x0, y1 - y0\n d = (dx * dx + dy * dy) ** .5\n # Account for divide by zero\n if d == 0:\n return 0.0, 0.0\n return dx / d, dy / d\n\n\ndef check_if_parallel(dx1, dy1, dx2, dy2, tolerence=1.e-5):\n \"\"\" returns\n * 1 if two lines are parralel in same direction\n * -1 if two lines are parralel in opposite direction\n * 0 otherwise\n \"\"\"\n theta1 = np.arctan2(dx1, dy1)\n theta2 = np.arctan2(dx2, dy2)\n dtheta = np.abs(theta1 - theta2)\n if dtheta < tolerence:\n return 1\n elif np.abs(dtheta - np.pi) < tolerence:\n return -1\n else:\n return False\n\n\ndef get_parallels(bezier2, width):\n \"\"\"\n Given the quadratic bezier control points *bezier2*, returns\n control points of quadratic bezier lines roughly parallel to given\n one separated by *width*.\n \"\"\"\n\n # The parallel bezier lines are constructed by following ways.\n # c1 and c2 are control points representing the begin and end of the\n # bezier line.\n # cm is the middle point\n\n c1x, c1y = bezier2[0]\n cmx, cmy = bezier2[1]\n c2x, c2y = bezier2[2]\n\n parallel_test = check_if_parallel(c1x - cmx, c1y - cmy,\n cmx - c2x, cmy - c2y)\n\n if parallel_test == -1:\n warnings.warn(\n \"Lines do not intersect. A straight line is used instead.\")\n cos_t1, sin_t1 = get_cos_sin(c1x, c1y, c2x, c2y)\n cos_t2, sin_t2 = cos_t1, sin_t1\n else:\n # t1 and t2 is the angle between c1 and cm, cm, c2. They are\n # also a angle of the tangential line of the path at c1 and c2\n cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)\n cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y)\n\n # find c1_left, c1_right which are located along the lines\n # through c1 and perpendicular to the tangential lines of the\n # bezier path at a distance of width. Same thing for c2_left and\n # c2_right with respect to c2.\n c1x_left, c1y_left, c1x_right, c1y_right = (\n get_normal_points(c1x, c1y, cos_t1, sin_t1, width)\n )\n c2x_left, c2y_left, c2x_right, c2y_right = (\n get_normal_points(c2x, c2y, cos_t2, sin_t2, width)\n )\n\n # find cm_left which is the intersectng point of a line through\n # c1_left with angle t1 and a line through c2_left with angle\n # t2. Same with cm_right.\n if parallel_test != 0:\n # a special case for a straight line, i.e., angle between two\n # lines are smaller than some (arbitrtay) value.\n cmx_left, cmy_left = (\n 0.5 * (c1x_left + c2x_left), 0.5 * (c1y_left + c2y_left)\n )\n cmx_right, cmy_right = (\n 0.5 * (c1x_right + c2x_right), 0.5 * (c1y_right + c2y_right)\n )\n else:\n cmx_left, cmy_left = get_intersection(c1x_left, c1y_left, cos_t1,\n sin_t1, c2x_left, c2y_left,\n cos_t2, sin_t2)\n\n cmx_right, cmy_right = get_intersection(c1x_right, c1y_right, cos_t1,\n sin_t1, c2x_right, c2y_right,\n cos_t2, sin_t2)\n\n # the parallel bezier lines are created with control points of\n # [c1_left, cm_left, c2_left] and [c1_right, cm_right, c2_right]\n path_left = [(c1x_left, c1y_left),\n (cmx_left, cmy_left),\n (c2x_left, c2y_left)]\n path_right = [(c1x_right, c1y_right),\n (cmx_right, cmy_right),\n (c2x_right, c2y_right)]\n\n return path_left, path_right\n\n\ndef find_control_points(c1x, c1y, mmx, mmy, c2x, c2y):\n \"\"\" Find control points of the bezier line through c1, mm, c2. We\n simply assume that c1, mm, c2 which have parametric value 0, 0.5, and 1.\n \"\"\"\n\n cmx = .5 * (4 * mmx - (c1x + c2x))\n cmy = .5 * (4 * mmy - (c1y + c2y))\n\n return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]\n\n\ndef make_wedged_bezier2(bezier2, width, w1=1., wm=0.5, w2=0.):\n \"\"\"\n Being similar to get_parallels, returns control points of two quadrativ\n bezier lines having a width roughly parallel to given one separated by\n *width*.\n \"\"\"\n\n # c1, cm, c2\n c1x, c1y = bezier2[0]\n cmx, cmy = bezier2[1]\n c3x, c3y = bezier2[2]\n\n # t1 and t2 is the angle between c1 and cm, cm, c3.\n # They are also a angle of the tangential line of the path at c1 and c3\n cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)\n cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y)\n\n # find c1_left, c1_right which are located along the lines\n # through c1 and perpendicular to the tangential lines of the\n # bezier path at a distance of width. Same thing for c3_left and\n # c3_right with respect to c3.\n c1x_left, c1y_left, c1x_right, c1y_right = (\n get_normal_points(c1x, c1y, cos_t1, sin_t1, width * w1)\n )\n c3x_left, c3y_left, c3x_right, c3y_right = (\n get_normal_points(c3x, c3y, cos_t2, sin_t2, width * w2)\n )\n\n # find c12, c23 and c123 which are middle points of c1-cm, cm-c3 and\n # c12-c23\n c12x, c12y = (c1x + cmx) * .5, (c1y + cmy) * .5\n c23x, c23y = (cmx + c3x) * .5, (cmy + c3y) * .5\n c123x, c123y = (c12x + c23x) * .5, (c12y + c23y) * .5\n\n # tangential angle of c123 (angle between c12 and c23)\n cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y)\n\n c123x_left, c123y_left, c123x_right, c123y_right = (\n get_normal_points(c123x, c123y, cos_t123, sin_t123, width * wm)\n )\n\n path_left = find_control_points(c1x_left, c1y_left,\n c123x_left, c123y_left,\n c3x_left, c3y_left)\n path_right = find_control_points(c1x_right, c1y_right,\n c123x_right, c123y_right,\n c3x_right, c3y_right)\n\n return path_left, path_right\n\n\ndef make_path_regular(p):\n \"\"\"\n fill in the codes if None.\n \"\"\"\n c = p.codes\n if c is None:\n c = np.empty(p.vertices.shape[:1], \"i\")\n c.fill(Path.LINETO)\n c[0] = Path.MOVETO\n\n return Path(p.vertices, c)\n else:\n return p\n\n\ndef concatenate_paths(paths):\n \"\"\"\n concatenate list of paths into a single path.\n \"\"\"\n\n vertices = []\n codes = []\n for p in paths:\n p = make_path_regular(p)\n vertices.append(p.vertices)\n codes.append(p.codes)\n\n _path = Path(np.concatenate(vertices),\n np.concatenate(codes))\n return _path\n",
"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom pandas.compat import PY3, u\n\nfrom pandas import (\n Categorical, CategoricalIndex, Series, date_range, period_range,\n timedelta_range)\nfrom pandas.core.config import option_context\nfrom pandas.tests.arrays.categorical.common import TestCategorical\n\n\nclass TestCategoricalReprWithFactor(TestCategorical):\n\n def test_print(self):\n expected = [\"[a, b, b, a, a, c, c, c]\",\n \"Categories (3, object): [a < b < c]\"]\n expected = \"\\n\".join(expected)\n actual = repr(self.factor)\n assert actual == expected\n\n\nclass TestCategoricalRepr(object):\n\n def test_big_print(self):\n factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],\n fastpath=True)\n expected = [\"[a, b, c, a, b, ..., b, c, a, b, c]\", \"Length: 600\",\n \"Categories (3, object): [a, b, c]\"]\n expected = \"\\n\".join(expected)\n\n actual = repr(factor)\n\n assert actual == expected\n\n def test_empty_print(self):\n factor = Categorical([], [\"a\", \"b\", \"c\"])\n expected = (\"[], Categories (3, object): [a, b, c]\")\n actual = repr(factor)\n assert actual == expected\n\n assert expected == actual\n factor = Categorical([], [\"a\", \"b\", \"c\"], ordered=True)\n expected = (\"[], Categories (3, object): [a < b < c]\")\n actual = repr(factor)\n assert expected == actual\n\n factor = Categorical([], [])\n expected = (\"[], Categories (0, object): []\")\n assert expected == repr(factor)\n\n def test_print_none_width(self):\n # GH10087\n a = Series(Categorical([1, 2, 3, 4]))\n exp = u(\"0 1\\n1 2\\n2 3\\n3 4\\n\" +\n \"dtype: category\\nCategories (4, int64): [1, 2, 3, 4]\")\n\n with option_context(\"display.width\", None):\n assert exp == repr(a)\n\n def test_unicode_print(self):\n if PY3:\n _rep = repr\n else:\n _rep = unicode # noqa\n\n c = Categorical(['aaaaa', 'bb', 'cccc'] * 20)\n expected = u\"\"\"\\\n[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]\nLength: 60\nCategories (3, object): [aaaaa, bb, cccc]\"\"\"\n\n assert _rep(c) == expected\n\n c = Categorical([u'ああああ', u'いいいいい', u'ううううううう'] * 20)\n expected = u\"\"\"\\\n[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]\nLength: 60\nCategories (3, object): [ああああ, いいいいい, ううううううう]\"\"\" # noqa\n\n assert _rep(c) == expected\n\n # unicode option should not affect to Categorical, as it doesn't care\n # the repr width\n with option_context('display.unicode.east_asian_width', True):\n\n c = Categorical([u'ああああ', u'いいいいい', u'ううううううう'] * 20)\n expected = u\"\"\"[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]\nLength: 60\nCategories (3, object): [ああああ, いいいいい, ううううううう]\"\"\" # noqa\n\n assert _rep(c) == expected\n\n def test_categorical_repr(self):\n c = Categorical([1, 2, 3])\n exp = \"\"\"[1, 2, 3]\nCategories (3, int64): [1, 2, 3]\"\"\"\n\n assert repr(c) == exp\n\n c = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])\n exp = \"\"\"[1, 2, 3, 1, 2, 3]\nCategories (3, int64): [1, 2, 3]\"\"\"\n\n assert repr(c) == exp\n\n c = Categorical([1, 2, 3, 4, 5] * 10)\n exp = \"\"\"[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]\nLength: 50\nCategories (5, int64): [1, 2, 3, 4, 5]\"\"\"\n\n assert repr(c) == exp\n\n c = Categorical(np.arange(20))\n exp = \"\"\"[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]\nLength: 20\nCategories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]\"\"\"\n\n assert repr(c) == exp\n\n def test_categorical_repr_ordered(self):\n c = Categorical([1, 2, 3], ordered=True)\n exp = \"\"\"[1, 2, 3]\nCategories (3, int64): [1 < 2 < 3]\"\"\"\n\n assert repr(c) == exp\n\n c = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3], ordered=True)\n exp = \"\"\"[1, 2, 3, 1, 2, 3]\nCategories (3, int64): [1 < 2 < 3]\"\"\"\n\n assert repr(c) == exp\n\n c = Categorical([1, 2, 3, 4, 5] * 10, ordered=True)\n exp = \"\"\"[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]\nLength: 50\nCategories (5, int64): [1 < 2 < 3 < 4 < 5]\"\"\"\n\n assert repr(c) == exp\n\n c = Categorical(np.arange(20), ordered=True)\n exp = \"\"\"[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]\nLength: 20\nCategories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]\"\"\"\n\n assert repr(c) == exp\n\n def test_categorical_repr_datetime(self):\n idx = date_range('2011-01-01 09:00', freq='H', periods=5)\n c = Categorical(idx)\n\n # TODO(wesm): exceeding 80 characters in the console is not good\n # behavior\n exp = (\n \"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, \"\n \"2011-01-01 12:00:00, 2011-01-01 13:00:00]\\n\"\n \"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, \"\n \"2011-01-01 10:00:00, 2011-01-01 11:00:00,\\n\"\n \" 2011-01-01 12:00:00, \"\n \"2011-01-01 13:00:00]\"\"\")\n assert repr(c) == exp\n\n c = Categorical(idx.append(idx), categories=idx)\n exp = (\n \"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, \"\n \"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, \"\n \"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, \"\n \"2011-01-01 13:00:00]\\n\"\n \"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, \"\n \"2011-01-01 10:00:00, 2011-01-01 11:00:00,\\n\"\n \" 2011-01-01 12:00:00, \"\n \"2011-01-01 13:00:00]\")\n\n assert repr(c) == exp\n\n idx = date_range('2011-01-01 09:00', freq='H', periods=5,\n tz='US/Eastern')\n c = Categorical(idx)\n exp = (\n \"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, \"\n \"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, \"\n \"2011-01-01 13:00:00-05:00]\\n\"\n \"Categories (5, datetime64[ns, US/Eastern]): \"\n \"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\\n\"\n \" \"\n \"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\\n\"\n \" \"\n \"2011-01-01 13:00:00-05:00]\")\n\n assert repr(c) == exp\n\n c = Categorical(idx.append(idx), categories=idx)\n exp = (\n \"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, \"\n \"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, \"\n \"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, \"\n \"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, \"\n \"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\\n\"\n \"Categories (5, datetime64[ns, US/Eastern]): \"\n \"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\\n\"\n \" \"\n \"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\\n\"\n \" \"\n \"2011-01-01 13:00:00-05:00]\")\n\n assert repr(c) == exp\n\n def test_categorical_repr_datetime_ordered(self):\n idx = date_range('2011-01-01 09:00', freq='H', periods=5)\n c = Categorical(idx, ordered=True)\n exp = \"\"\"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]\nCategories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <\n 2011-01-01 12:00:00 < 2011-01-01 13:00:00]\"\"\" # noqa\n\n assert repr(c) == exp\n\n c = Categorical(idx.append(idx), categories=idx, ordered=True)\n exp = \"\"\"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]\nCategories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <\n 2011-01-01 12:00:00 < 2011-01-01 13:00:00]\"\"\" # noqa\n\n assert repr(c) == exp\n\n idx = date_range('2011-01-01 09:00', freq='H', periods=5,\n tz='US/Eastern')\n c = Categorical(idx, ordered=True)\n exp = \"\"\"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\nCategories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <\n 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <\n 2011-01-01 13:00:00-05:00]\"\"\" # noqa\n\n assert repr(c) == exp\n\n c = Categorical(idx.append(idx), categories=idx, ordered=True)\n exp = \"\"\"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\nCategories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <\n 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <\n 2011-01-01 13:00:00-05:00]\"\"\" # noqa\n\n assert repr(c) == exp\n\n def test_categorical_repr_int_with_nan(self):\n c = Categorical([1, 2, np.nan])\n c_exp = \"\"\"[1, 2, NaN]\\nCategories (2, int64): [1, 2]\"\"\"\n assert repr(c) == c_exp\n\n s = Series([1, 2, np.nan], dtype=\"object\").astype(\"category\")\n s_exp = \"\"\"0 1\\n1 2\\n2 NaN\ndtype: category\nCategories (2, int64): [1, 2]\"\"\"\n assert repr(s) == s_exp\n\n def test_categorical_repr_period(self):\n idx = period_range('2011-01-01 09:00', freq='H', periods=5)\n c = Categorical(idx)\n exp = \"\"\"[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]\nCategories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,\n 2011-01-01 13:00]\"\"\" # noqa\n\n assert repr(c) == exp\n\n c = Categorical(idx.append(idx), categories=idx)\n exp = \"\"\"[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]\nCategories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,\n 2011-01-01 13:00]\"\"\" # noqa\n\n assert repr(c) == exp\n\n idx = period_range('2011-01', freq='M', periods=5)\n c = Categorical(idx)\n exp = \"\"\"[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]\nCategories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]\"\"\"\n\n assert repr(c) == exp\n\n c = Categorical(idx.append(idx), categories=idx)\n exp = \"\"\"[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]\nCategories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]\"\"\" # noqa\n\n assert repr(c) == exp\n\n def test_categorical_repr_period_ordered(self):\n idx = period_range('2011-01-01 09:00', freq='H', periods=5)\n c = Categorical(idx, ordered=True)\n exp = \"\"\"[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]\nCategories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <\n 2011-01-01 13:00]\"\"\" # noqa\n\n assert repr(c) == exp\n\n c = Categorical(idx.append(idx), categories=idx, ordered=True)\n exp = \"\"\"[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]\nCategories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <\n 2011-01-01 13:00]\"\"\" # noqa\n\n assert repr(c) == exp\n\n idx = period_range('2011-01', freq='M', periods=5)\n c = Categorical(idx, ordered=True)\n exp = \"\"\"[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]\nCategories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]\"\"\"\n\n assert repr(c) == exp\n\n c = Categorical(idx.append(idx), categories=idx, ordered=True)\n exp = \"\"\"[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]\nCategories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]\"\"\" # noqa\n\n assert repr(c) == exp\n\n def test_categorical_repr_timedelta(self):\n idx = timedelta_range('1 days', periods=5)\n c = Categorical(idx)\n exp = \"\"\"[1 days, 2 days, 3 days, 4 days, 5 days]\nCategories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]\"\"\"\n\n assert repr(c) == exp\n\n c = Categorical(idx.append(idx), categories=idx)\n exp = \"\"\"[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]\nCategories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]\"\"\" # noqa\n\n assert repr(c) == exp\n\n idx = timedelta_range('1 hours', periods=20)\n c = Categorical(idx)\n exp = \"\"\"[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]\nLength: 20\nCategories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,\n 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,\n 18 days 01:00:00, 19 days 01:00:00]\"\"\" # noqa\n\n assert repr(c) == exp\n\n c = Categorical(idx.append(idx), categories=idx)\n exp = \"\"\"[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]\nLength: 40\nCategories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,\n 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,\n 18 days 01:00:00, 19 days 01:00:00]\"\"\" # noqa\n\n assert repr(c) == exp\n\n def test_categorical_repr_timedelta_ordered(self):\n idx = timedelta_range('1 days', periods=5)\n c = Categorical(idx, ordered=True)\n exp = \"\"\"[1 days, 2 days, 3 days, 4 days, 5 days]\nCategories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]\"\"\" # noqa\n\n assert repr(c) == exp\n\n c = Categorical(idx.append(idx), categories=idx, ordered=True)\n exp = \"\"\"[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]\nCategories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]\"\"\" # noqa\n\n assert repr(c) == exp\n\n idx = timedelta_range('1 hours', periods=20)\n c = Categorical(idx, ordered=True)\n exp = \"\"\"[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]\nLength: 20\nCategories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <\n 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <\n 18 days 01:00:00 < 19 days 01:00:00]\"\"\" # noqa\n\n assert repr(c) == exp\n\n c = Categorical(idx.append(idx), categories=idx, ordered=True)\n exp = \"\"\"[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]\nLength: 40\nCategories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <\n 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <\n 18 days 01:00:00 < 19 days 01:00:00]\"\"\" # noqa\n\n assert repr(c) == exp\n\n def test_categorical_index_repr(self):\n idx = CategoricalIndex(Categorical([1, 2, 3]))\n exp = \"\"\"CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')\"\"\" # noqa\n assert repr(idx) == exp\n\n i = CategoricalIndex(Categorical(np.arange(10)))\n exp = \"\"\"CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')\"\"\" # noqa\n assert repr(i) == exp\n\n def test_categorical_index_repr_ordered(self):\n i = CategoricalIndex(Categorical([1, 2, 3], ordered=True))\n exp = \"\"\"CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')\"\"\" # noqa\n assert repr(i) == exp\n\n i = CategoricalIndex(Categorical(np.arange(10), ordered=True))\n exp = \"\"\"CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')\"\"\" # noqa\n assert repr(i) == exp\n\n def test_categorical_index_repr_datetime(self):\n idx = date_range('2011-01-01 09:00', freq='H', periods=5)\n i = CategoricalIndex(Categorical(idx))\n exp = \"\"\"CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',\n '2011-01-01 11:00:00', '2011-01-01 12:00:00',\n '2011-01-01 13:00:00'],\n categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')\"\"\" # noqa\n\n assert repr(i) == exp\n\n idx = date_range('2011-01-01 09:00', freq='H', periods=5,\n tz='US/Eastern')\n i = CategoricalIndex(Categorical(idx))\n exp = \"\"\"CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',\n '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',\n '2011-01-01 13:00:00-05:00'],\n categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')\"\"\" # noqa\n\n assert repr(i) == exp\n\n def test_categorical_index_repr_datetime_ordered(self):\n idx = date_range('2011-01-01 09:00', freq='H', periods=5)\n i = CategoricalIndex(Categorical(idx, ordered=True))\n exp = \"\"\"CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',\n '2011-01-01 11:00:00', '2011-01-01 12:00:00',\n '2011-01-01 13:00:00'],\n categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')\"\"\" # noqa\n\n assert repr(i) == exp\n\n idx = date_range('2011-01-01 09:00', freq='H', periods=5,\n tz='US/Eastern')\n i = CategoricalIndex(Categorical(idx, ordered=True))\n exp = \"\"\"CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',\n '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',\n '2011-01-01 13:00:00-05:00'],\n categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')\"\"\" # noqa\n\n assert repr(i) == exp\n\n i = CategoricalIndex(Categorical(idx.append(idx), ordered=True))\n exp = \"\"\"CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',\n '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',\n '2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',\n '2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',\n '2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],\n categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')\"\"\" # noqa\n\n assert repr(i) == exp\n\n def test_categorical_index_repr_period(self):\n # test all length\n idx = period_range('2011-01-01 09:00', freq='H', periods=1)\n i = CategoricalIndex(Categorical(idx))\n exp = \"\"\"CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')\"\"\" # noqa\n assert repr(i) == exp\n\n idx = period_range('2011-01-01 09:00', freq='H', periods=2)\n i = CategoricalIndex(Categorical(idx))\n exp = \"\"\"CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')\"\"\" # noqa\n assert repr(i) == exp\n\n idx = period_range('2011-01-01 09:00', freq='H', periods=3)\n i = CategoricalIndex(Categorical(idx))\n exp = \"\"\"CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')\"\"\" # noqa\n assert repr(i) == exp\n\n idx = period_range('2011-01-01 09:00', freq='H', periods=5)\n i = CategoricalIndex(Categorical(idx))\n exp = \"\"\"CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',\n '2011-01-01 12:00', '2011-01-01 13:00'],\n categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')\"\"\" # noqa\n\n assert repr(i) == exp\n\n i = CategoricalIndex(Categorical(idx.append(idx)))\n exp = \"\"\"CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',\n '2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',\n '2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',\n '2011-01-01 13:00'],\n categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')\"\"\" # noqa\n\n assert repr(i) == exp\n\n idx = period_range('2011-01', freq='M', periods=5)\n i = CategoricalIndex(Categorical(idx))\n exp = \"\"\"CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')\"\"\" # noqa\n assert repr(i) == exp\n\n def test_categorical_index_repr_period_ordered(self):\n idx = period_range('2011-01-01 09:00', freq='H', periods=5)\n i = CategoricalIndex(Categorical(idx, ordered=True))\n exp = \"\"\"CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',\n '2011-01-01 12:00', '2011-01-01 13:00'],\n categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')\"\"\" # noqa\n\n assert repr(i) == exp\n\n idx = period_range('2011-01', freq='M', periods=5)\n i = CategoricalIndex(Categorical(idx, ordered=True))\n exp = \"\"\"CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')\"\"\" # noqa\n assert repr(i) == exp\n\n def test_categorical_index_repr_timedelta(self):\n idx = timedelta_range('1 days', periods=5)\n i = CategoricalIndex(Categorical(idx))\n exp = \"\"\"CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')\"\"\" # noqa\n assert repr(i) == exp\n\n idx = timedelta_range('1 hours', periods=10)\n i = CategoricalIndex(Categorical(idx))\n exp = \"\"\"CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',\n '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',\n '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',\n '9 days 01:00:00'],\n categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')\"\"\" # noqa\n\n assert repr(i) == exp\n\n def test_categorical_index_repr_timedelta_ordered(self):\n idx = timedelta_range('1 days', periods=5)\n i = CategoricalIndex(Categorical(idx, ordered=True))\n exp = \"\"\"CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')\"\"\" # noqa\n assert repr(i) == exp\n\n idx = timedelta_range('1 hours', periods=10)\n i = CategoricalIndex(Categorical(idx, ordered=True))\n exp = \"\"\"CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',\n '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',\n '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',\n '9 days 01:00:00'],\n categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')\"\"\" # noqa\n\n assert repr(i) == exp\n",
"import numpy as np\nfrom io import BytesIO\nimport os\nimport tempfile\nimport xml.parsers.expat\n\nimport pytest\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.testing.decorators import image_comparison\nimport matplotlib\nfrom matplotlib import dviread\n\n\nneeds_usetex = pytest.mark.skipif(\n not matplotlib.checkdep_usetex(True),\n reason=\"This test needs a TeX installation\")\n\n\ndef test_visibility():\n fig, ax = plt.subplots()\n\n x = np.linspace(0, 4 * np.pi, 50)\n y = np.sin(x)\n yerr = np.ones_like(y)\n\n a, b, c = ax.errorbar(x, y, yerr=yerr, fmt='ko')\n for artist in b:\n artist.set_visible(False)\n\n fd = BytesIO()\n fig.savefig(fd, format='svg')\n\n fd.seek(0)\n buf = fd.read()\n fd.close()\n\n parser = xml.parsers.expat.ParserCreate()\n parser.Parse(buf) # this will raise ExpatError if the svg is invalid\n\n\n@image_comparison(baseline_images=['fill_black_with_alpha'], remove_text=True,\n extensions=['svg'])\ndef test_fill_black_with_alpha():\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.scatter(x=[0, 0.1, 1], y=[0, 0, 0], c='k', alpha=0.1, s=10000)\n\n\n@image_comparison(baseline_images=['noscale'], remove_text=True)\ndef test_noscale():\n X, Y = np.meshgrid(np.arange(-5, 5, 1), np.arange(-5, 5, 1))\n Z = np.sin(Y ** 2)\n\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.imshow(Z, cmap='gray', interpolation='none')\n\n\ndef test_text_urls():\n fig = plt.figure()\n\n test_url = \"http://test_text_urls.matplotlib.org\"\n fig.suptitle(\"test_text_urls\", url=test_url)\n\n fd = BytesIO()\n fig.savefig(fd, format='svg')\n fd.seek(0)\n buf = fd.read().decode()\n fd.close()\n\n expected = '<a xlink:href=\"{0}\">'.format(test_url)\n assert expected in buf\n\n\n@image_comparison(baseline_images=['bold_font_output'], extensions=['svg'])\ndef test_bold_font_output():\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(np.arange(10), np.arange(10))\n ax.set_xlabel('nonbold-xlabel')\n ax.set_ylabel('bold-ylabel', fontweight='bold')\n ax.set_title('bold-title', fontweight='bold')\n\n\n@image_comparison(baseline_images=['bold_font_output_with_none_fonttype'],\n extensions=['svg'])\ndef test_bold_font_output_with_none_fonttype():\n plt.rcParams['svg.fonttype'] = 'none'\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(np.arange(10), np.arange(10))\n ax.set_xlabel('nonbold-xlabel')\n ax.set_ylabel('bold-ylabel', fontweight='bold')\n ax.set_title('bold-title', fontweight='bold')\n\n\ndef _test_determinism_save(filename, usetex):\n # This function is mostly copy&paste from \"def test_visibility\"\n # To require no GUI, we use Figure and FigureCanvasSVG\n # instead of plt.figure and fig.savefig\n from matplotlib.figure import Figure\n from matplotlib.backends.backend_svg import FigureCanvasSVG\n from matplotlib import rc\n rc('svg', hashsalt='asdf')\n rc('text', usetex=usetex)\n\n fig = Figure()\n ax = fig.add_subplot(111)\n\n x = np.linspace(0, 4 * np.pi, 50)\n y = np.sin(x)\n yerr = np.ones_like(y)\n\n a, b, c = ax.errorbar(x, y, yerr=yerr, fmt='ko')\n for artist in b:\n artist.set_visible(False)\n ax.set_title('A string $1+2+\\\\sigma$')\n ax.set_xlabel('A string $1+2+\\\\sigma$')\n ax.set_ylabel('A string $1+2+\\\\sigma$')\n\n FigureCanvasSVG(fig).print_svg(filename)\n\n\[email protected](\n \"filename, usetex\",\n # unique filenames to allow for parallel testing\n [(\"determinism_notex.svg\", False),\n pytest.param(\"determinism_tex.svg\", True, marks=needs_usetex)])\ndef test_determinism(filename, usetex):\n import sys\n from subprocess import check_output, STDOUT, CalledProcessError\n plots = []\n for i in range(3):\n # Using check_output and setting stderr to STDOUT will capture the real\n # problem in the output property of the exception\n try:\n check_output(\n [sys.executable, '-R', '-c',\n 'import matplotlib; '\n 'matplotlib._called_from_pytest = True; '\n 'matplotlib.use(\"svg\", force=True); '\n 'from matplotlib.tests.test_backend_svg '\n 'import _test_determinism_save;'\n '_test_determinism_save(%r, %r)' % (filename, usetex)],\n stderr=STDOUT)\n except CalledProcessError as e:\n # it's easier to use utf8 and ask for forgiveness than try\n # to figure out what the current console has as an\n # encoding :-/\n print(e.output.decode(encoding=\"utf-8\", errors=\"ignore\"))\n raise e\n else:\n with open(filename, 'rb') as fd:\n plots.append(fd.read())\n finally:\n os.unlink(filename)\n for p in plots[1:]:\n assert p == plots[0]\n\n\n@needs_usetex\ndef test_missing_psfont(monkeypatch):\n \"\"\"An error is raised if a TeX font lacks a Type-1 equivalent\"\"\"\n from matplotlib import rc\n\n def psfont(*args, **kwargs):\n return dviread.PsFont(texname='texfont', psname='Some Font',\n effects=None, encoding=None, filename=None)\n\n monkeypatch.setattr(dviread.PsfontsMap, '__getitem__', psfont)\n rc('text', usetex=True)\n fig, ax = plt.subplots()\n ax.text(0.5, 0.5, 'hello')\n with tempfile.TemporaryFile() as tmpfile, pytest.raises(ValueError):\n fig.savefig(tmpfile, format='svg')\n",
"import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import timedelta_range, to_timedelta\nimport pandas.util.testing as tm\n\nfrom pandas.tseries.offsets import Day, Second\n\n\nclass TestTimedeltas(object):\n\n def test_timedelta_range(self):\n\n expected = to_timedelta(np.arange(5), unit='D')\n result = timedelta_range('0 days', periods=5, freq='D')\n tm.assert_index_equal(result, expected)\n\n expected = to_timedelta(np.arange(11), unit='D')\n result = timedelta_range('0 days', '10 days', freq='D')\n tm.assert_index_equal(result, expected)\n\n expected = to_timedelta(np.arange(5), unit='D') + Second(2) + Day()\n result = timedelta_range('1 days, 00:00:02', '5 days, 00:00:02',\n freq='D')\n tm.assert_index_equal(result, expected)\n\n expected = to_timedelta([1, 3, 5, 7, 9], unit='D') + Second(2)\n result = timedelta_range('1 days, 00:00:02', periods=5, freq='2D')\n tm.assert_index_equal(result, expected)\n\n expected = to_timedelta(np.arange(50), unit='T') * 30\n result = timedelta_range('0 days', freq='30T', periods=50)\n tm.assert_index_equal(result, expected)\n\n # GH 11776\n arr = np.arange(10).reshape(2, 5)\n df = pd.DataFrame(np.arange(10).reshape(2, 5))\n for arg in (arr, df):\n with pytest.raises(TypeError, match=\"1-d array\"):\n to_timedelta(arg)\n for errors in ['ignore', 'raise', 'coerce']:\n with pytest.raises(TypeError, match=\"1-d array\"):\n to_timedelta(arg, errors=errors)\n\n # issue10583\n df = pd.DataFrame(np.random.normal(size=(10, 4)))\n df.index = pd.timedelta_range(start='0s', periods=10, freq='s')\n expected = df.loc[pd.Timedelta('0s'):, :]\n result = df.loc['0s':, :]\n tm.assert_frame_equal(expected, result)\n\n @pytest.mark.parametrize('periods, freq', [\n (3, '2D'), (5, 'D'), (6, '19H12T'), (7, '16H'), (9, '12H')])\n def test_linspace_behavior(self, periods, freq):\n # GH 20976\n result = timedelta_range(start='0 days', end='4 days', periods=periods)\n expected = timedelta_range(start='0 days', end='4 days', freq=freq)\n tm.assert_index_equal(result, expected)\n\n def test_errors(self):\n # not enough params\n msg = ('Of the four parameters: start, end, periods, and freq, '\n 'exactly three must be specified')\n with pytest.raises(ValueError, match=msg):\n timedelta_range(start='0 days')\n\n with pytest.raises(ValueError, match=msg):\n timedelta_range(end='5 days')\n\n with pytest.raises(ValueError, match=msg):\n timedelta_range(periods=2)\n\n with pytest.raises(ValueError, match=msg):\n timedelta_range()\n\n # too many params\n with pytest.raises(ValueError, match=msg):\n timedelta_range(start='0 days', end='5 days', periods=10, freq='H')\n",
"import numpy as np\nimport pytest\n\nfrom matplotlib import rc_context\nfrom matplotlib.testing.decorators import image_comparison\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import BoundaryNorm, LogNorm, PowerNorm\nfrom matplotlib.cm import get_cmap\nfrom matplotlib.colorbar import ColorbarBase\nfrom matplotlib.ticker import LogLocator, LogFormatter\n\n\ndef _get_cmap_norms():\n \"\"\"\n Define a colormap and appropriate norms for each of the four\n possible settings of the extend keyword.\n\n Helper function for _colorbar_extension_shape and\n colorbar_extension_length.\n \"\"\"\n # Create a color map and specify the levels it represents.\n cmap = get_cmap(\"RdBu\", lut=5)\n clevs = [-5., -2.5, -.5, .5, 1.5, 3.5]\n # Define norms for the color maps.\n norms = dict()\n norms['neither'] = BoundaryNorm(clevs, len(clevs) - 1)\n norms['min'] = BoundaryNorm([-10] + clevs[1:], len(clevs) - 1)\n norms['max'] = BoundaryNorm(clevs[:-1] + [10], len(clevs) - 1)\n norms['both'] = BoundaryNorm([-10] + clevs[1:-1] + [10], len(clevs) - 1)\n return cmap, norms\n\n\ndef _colorbar_extension_shape(spacing):\n '''\n Produce 4 colorbars with rectangular extensions for either uniform\n or proportional spacing.\n\n Helper function for test_colorbar_extension_shape.\n '''\n # Get a colormap and appropriate norms for each extension type.\n cmap, norms = _get_cmap_norms()\n # Create a figure and adjust whitespace for subplots.\n fig = plt.figure()\n fig.subplots_adjust(hspace=4)\n for i, extension_type in enumerate(('neither', 'min', 'max', 'both')):\n # Get the appropriate norm and use it to get colorbar boundaries.\n norm = norms[extension_type]\n boundaries = values = norm.boundaries\n # Create a subplot.\n cax = fig.add_subplot(4, 1, i + 1)\n # Generate the colorbar.\n cb = ColorbarBase(cax, cmap=cmap, norm=norm,\n boundaries=boundaries, values=values,\n extend=extension_type, extendrect=True,\n orientation='horizontal', spacing=spacing)\n # Turn off text and ticks.\n cax.tick_params(left=False, labelleft=False,\n bottom=False, labelbottom=False)\n # Return the figure to the caller.\n return fig\n\n\ndef _colorbar_extension_length(spacing):\n '''\n Produce 12 colorbars with variable length extensions for either\n uniform or proportional spacing.\n\n Helper function for test_colorbar_extension_length.\n '''\n # Get a colormap and appropriate norms for each extension type.\n cmap, norms = _get_cmap_norms()\n # Create a figure and adjust whitespace for subplots.\n fig = plt.figure()\n fig.subplots_adjust(hspace=.6)\n for i, extension_type in enumerate(('neither', 'min', 'max', 'both')):\n # Get the appropriate norm and use it to get colorbar boundaries.\n norm = norms[extension_type]\n boundaries = values = norm.boundaries\n for j, extendfrac in enumerate((None, 'auto', 0.1)):\n # Create a subplot.\n cax = fig.add_subplot(12, 1, i*3 + j + 1)\n # Generate the colorbar.\n ColorbarBase(cax, cmap=cmap, norm=norm,\n boundaries=boundaries, values=values,\n extend=extension_type, extendfrac=extendfrac,\n orientation='horizontal', spacing=spacing)\n # Turn off text and ticks.\n cax.tick_params(left=False, labelleft=False,\n bottom=False, labelbottom=False)\n # Return the figure to the caller.\n return fig\n\n\n@image_comparison(\n baseline_images=['colorbar_extensions_shape_uniform',\n 'colorbar_extensions_shape_proportional'],\n extensions=['png'])\ndef test_colorbar_extension_shape():\n '''Test rectangular colorbar extensions.'''\n # Create figures for uniform and proportionally spaced colorbars.\n _colorbar_extension_shape('uniform')\n _colorbar_extension_shape('proportional')\n\n\n@image_comparison(baseline_images=['colorbar_extensions_uniform',\n 'colorbar_extensions_proportional'],\n extensions=['png'])\ndef test_colorbar_extension_length():\n '''Test variable length colorbar extensions.'''\n # Create figures for uniform and proportionally spaced colorbars.\n _colorbar_extension_length('uniform')\n _colorbar_extension_length('proportional')\n\n\n@image_comparison(baseline_images=['cbar_with_orientation',\n 'cbar_locationing',\n 'double_cbar',\n 'cbar_sharing',\n ],\n extensions=['png'], remove_text=True,\n savefig_kwarg={'dpi': 40})\ndef test_colorbar_positioning():\n data = np.arange(1200).reshape(30, 40)\n levels = [0, 200, 400, 600, 800, 1000, 1200]\n\n # -------------------\n plt.figure()\n plt.contourf(data, levels=levels)\n plt.colorbar(orientation='horizontal', use_gridspec=False)\n\n locations = ['left', 'right', 'top', 'bottom']\n plt.figure()\n for i, location in enumerate(locations):\n plt.subplot(2, 2, i + 1)\n plt.contourf(data, levels=levels)\n plt.colorbar(location=location, use_gridspec=False)\n\n # -------------------\n plt.figure()\n # make some other data (random integers)\n data_2nd = np.array([[2, 3, 2, 3], [1.5, 2, 2, 3], [2, 3, 3, 4]])\n # make the random data expand to the shape of the main data\n data_2nd = np.repeat(np.repeat(data_2nd, 10, axis=1), 10, axis=0)\n\n color_mappable = plt.contourf(data, levels=levels, extend='both')\n # test extend frac here\n hatch_mappable = plt.contourf(data_2nd, levels=[1, 2, 3], colors='none',\n hatches=['/', 'o', '+'], extend='max')\n plt.contour(hatch_mappable, colors='black')\n\n plt.colorbar(color_mappable, location='left', label='variable 1',\n use_gridspec=False)\n plt.colorbar(hatch_mappable, location='right', label='variable 2',\n use_gridspec=False)\n\n # -------------------\n plt.figure()\n ax1 = plt.subplot(211, anchor='NE', aspect='equal')\n plt.contourf(data, levels=levels)\n ax2 = plt.subplot(223)\n plt.contourf(data, levels=levels)\n ax3 = plt.subplot(224)\n plt.contourf(data, levels=levels)\n\n plt.colorbar(ax=[ax2, ax3, ax1], location='right', pad=0.0, shrink=0.5,\n panchor=False, use_gridspec=False)\n plt.colorbar(ax=[ax2, ax3, ax1], location='left', shrink=0.5,\n panchor=False, use_gridspec=False)\n plt.colorbar(ax=[ax1], location='bottom', panchor=False,\n anchor=(0.8, 0.5), shrink=0.6, use_gridspec=False)\n\n\n@image_comparison(baseline_images=['cbar_with_subplots_adjust'],\n extensions=['png'], remove_text=True,\n savefig_kwarg={'dpi': 40})\ndef test_gridspec_make_colorbar():\n plt.figure()\n data = np.arange(1200).reshape(30, 40)\n levels = [0, 200, 400, 600, 800, 1000, 1200]\n\n plt.subplot(121)\n plt.contourf(data, levels=levels)\n plt.colorbar(use_gridspec=True, orientation='vertical')\n\n plt.subplot(122)\n plt.contourf(data, levels=levels)\n plt.colorbar(use_gridspec=True, orientation='horizontal')\n\n plt.subplots_adjust(top=0.95, right=0.95, bottom=0.2, hspace=0.25)\n\n\n@image_comparison(baseline_images=['colorbar_single_scatter'],\n extensions=['png'], remove_text=True,\n savefig_kwarg={'dpi': 40})\ndef test_colorbar_single_scatter():\n # Issue #2642: if a path collection has only one entry,\n # the norm scaling within the colorbar must ensure a\n # finite range, otherwise a zero denominator will occur in _locate.\n plt.figure()\n x = np.arange(4)\n y = x.copy()\n z = np.ma.masked_greater(np.arange(50, 54), 50)\n cmap = plt.get_cmap('jet', 16)\n cs = plt.scatter(x, y, z, c=z, cmap=cmap)\n plt.colorbar(cs)\n\n\[email protected]('use_gridspec', [False, True],\n ids=['no gridspec', 'with gridspec'])\ndef test_remove_from_figure(use_gridspec):\n \"\"\"\n Test `remove_from_figure` with the specified ``use_gridspec`` setting\n \"\"\"\n fig, ax = plt.subplots()\n sc = ax.scatter([1, 2], [3, 4], cmap=\"spring\")\n sc.set_array(np.array([5, 6]))\n pre_figbox = np.array(ax.figbox)\n cb = fig.colorbar(sc, use_gridspec=use_gridspec)\n fig.subplots_adjust()\n cb.remove()\n fig.subplots_adjust()\n post_figbox = np.array(ax.figbox)\n assert (pre_figbox == post_figbox).all()\n\n\ndef test_colorbarbase():\n # smoke test from #3805\n ax = plt.gca()\n ColorbarBase(ax, plt.cm.bone)\n\n\n@image_comparison(\n baseline_images=['colorbar_closed_patch'],\n remove_text=True)\ndef test_colorbar_closed_patch():\n fig = plt.figure(figsize=(8, 6))\n ax1 = fig.add_axes([0.05, 0.85, 0.9, 0.1])\n ax2 = fig.add_axes([0.1, 0.65, 0.75, 0.1])\n ax3 = fig.add_axes([0.05, 0.45, 0.9, 0.1])\n ax4 = fig.add_axes([0.05, 0.25, 0.9, 0.1])\n ax5 = fig.add_axes([0.05, 0.05, 0.9, 0.1])\n\n cmap = get_cmap(\"RdBu\", lut=5)\n\n im = ax1.pcolormesh(np.linspace(0, 10, 16).reshape((4, 4)), cmap=cmap)\n values = np.linspace(0, 10, 5)\n\n with rc_context({'axes.linewidth': 16}):\n plt.colorbar(im, cax=ax2, cmap=cmap, orientation='horizontal',\n extend='both', extendfrac=0.5, values=values)\n plt.colorbar(im, cax=ax3, cmap=cmap, orientation='horizontal',\n extend='both', values=values)\n plt.colorbar(im, cax=ax4, cmap=cmap, orientation='horizontal',\n extend='both', extendrect=True, values=values)\n plt.colorbar(im, cax=ax5, cmap=cmap, orientation='horizontal',\n extend='neither', values=values)\n\n\ndef test_colorbar_ticks():\n # test fix for #5673\n fig, ax = plt.subplots()\n x = np.arange(-3.0, 4.001)\n y = np.arange(-4.0, 3.001)\n X, Y = np.meshgrid(x, y)\n Z = X * Y\n clevs = np.array([-12, -5, 0, 5, 12], dtype=float)\n colors = ['r', 'g', 'b', 'c']\n cs = ax.contourf(X, Y, Z, clevs, colors=colors)\n cbar = fig.colorbar(cs, ax=ax, extend='neither',\n orientation='horizontal', ticks=clevs)\n assert len(cbar.ax.xaxis.get_ticklocs()) == len(clevs)\n\n\ndef test_colorbar_minorticks_on_off():\n # test for github issue #11510 and PR #11584\n np.random.seed(seed=12345)\n data = np.random.randn(20, 20)\n with rc_context({'_internal.classic_mode': False}):\n fig, ax = plt.subplots()\n # purposefully setting vmin and vmax to odd fractions\n # so as to check for the correct locations of the minor ticks\n im = ax.pcolormesh(data, vmin=-2.3, vmax=3.3)\n\n cbar = fig.colorbar(im, extend='both')\n cbar.minorticks_on()\n correct_minorticklocs = np.array([-2.2, -1.8, -1.6, -1.4, -1.2, -0.8,\n -0.6, -0.4, -0.2, 0.2, 0.4, 0.6,\n 0.8, 1.2, 1.4, 1.6, 1.8, 2.2, 2.4,\n 2.6, 2.8, 3.2])\n # testing after minorticks_on()\n np.testing.assert_almost_equal(cbar.ax.yaxis.get_minorticklocs(),\n correct_minorticklocs)\n cbar.minorticks_off()\n # testing after minorticks_off()\n np.testing.assert_almost_equal(cbar.ax.yaxis.get_minorticklocs(),\n np.array([]))\n\n im.set_clim(vmin=-1.2, vmax=1.2)\n cbar.minorticks_on()\n correct_minorticklocs = np.array([-1.2, -1.1, -0.9, -0.8, -0.7, -0.6,\n -0.4, -0.3, -0.2, -0.1, 0.1, 0.2,\n 0.3, 0.4, 0.6, 0.7, 0.8, 0.9,\n 1.1, 1.2])\n np.testing.assert_almost_equal(cbar.ax.yaxis.get_minorticklocs(),\n correct_minorticklocs)\n\n\ndef test_colorbar_autoticks():\n # Test new autotick modes. Needs to be classic because\n # non-classic doesn't go this route.\n with rc_context({'_internal.classic_mode': False}):\n fig, ax = plt.subplots(2, 1)\n x = np.arange(-3.0, 4.001)\n y = np.arange(-4.0, 3.001)\n X, Y = np.meshgrid(x, y)\n Z = X * Y\n pcm = ax[0].pcolormesh(X, Y, Z)\n cbar = fig.colorbar(pcm, ax=ax[0], extend='both',\n orientation='vertical')\n\n pcm = ax[1].pcolormesh(X, Y, Z)\n cbar2 = fig.colorbar(pcm, ax=ax[1], extend='both',\n orientation='vertical', shrink=0.4)\n np.testing.assert_almost_equal(cbar.ax.yaxis.get_ticklocs(),\n np.arange(-10, 11., 5.))\n np.testing.assert_almost_equal(cbar2.ax.yaxis.get_ticklocs(),\n np.arange(-10, 11., 10.))\n\n\ndef test_colorbar_autotickslog():\n # Test new autotick modes...\n with rc_context({'_internal.classic_mode': False}):\n fig, ax = plt.subplots(2, 1)\n x = np.arange(-3.0, 4.001)\n y = np.arange(-4.0, 3.001)\n X, Y = np.meshgrid(x, y)\n Z = X * Y\n pcm = ax[0].pcolormesh(X, Y, 10**Z, norm=LogNorm())\n cbar = fig.colorbar(pcm, ax=ax[0], extend='both',\n orientation='vertical')\n\n pcm = ax[1].pcolormesh(X, Y, 10**Z, norm=LogNorm())\n cbar2 = fig.colorbar(pcm, ax=ax[1], extend='both',\n orientation='vertical', shrink=0.4)\n np.testing.assert_almost_equal(cbar.ax.yaxis.get_ticklocs(),\n 10**np.arange(-12, 12.2, 4.))\n np.testing.assert_almost_equal(cbar2.ax.yaxis.get_ticklocs(),\n 10**np.arange(-12, 13., 12.))\n\n\ndef test_colorbar_get_ticks():\n # test feature for #5792\n plt.figure()\n data = np.arange(1200).reshape(30, 40)\n levels = [0, 200, 400, 600, 800, 1000, 1200]\n\n plt.subplot()\n plt.contourf(data, levels=levels)\n\n # testing getter for user set ticks\n userTicks = plt.colorbar(ticks=[0, 600, 1200])\n assert userTicks.get_ticks().tolist() == [0, 600, 1200]\n\n # testing for getter after calling set_ticks\n userTicks.set_ticks([600, 700, 800])\n assert userTicks.get_ticks().tolist() == [600, 700, 800]\n\n # testing for getter after calling set_ticks with some ticks out of bounds\n userTicks.set_ticks([600, 1300, 1400, 1500])\n assert userTicks.get_ticks().tolist() == [600]\n\n # testing getter when no ticks are assigned\n defTicks = plt.colorbar(orientation='horizontal')\n assert defTicks.get_ticks().tolist() == levels\n\n\ndef test_colorbar_lognorm_extension():\n # Test that colorbar with lognorm is extended correctly\n f, ax = plt.subplots()\n cb = ColorbarBase(ax, norm=LogNorm(vmin=0.1, vmax=1000.0),\n orientation='vertical', extend='both')\n assert cb._values[0] >= 0.0\n\n\ndef test_colorbar_powernorm_extension():\n # Test that colorbar with powernorm is extended correctly\n f, ax = plt.subplots()\n cb = ColorbarBase(ax, norm=PowerNorm(gamma=0.5, vmin=0.0, vmax=1.0),\n orientation='vertical', extend='both')\n assert cb._values[0] >= 0.0\n\n\ndef test_colorbar_axes_kw():\n # test fix for #8493: This does only test, that axes-related keywords pass\n # and do not raise an exception.\n plt.figure()\n plt.imshow(([[1, 2], [3, 4]]))\n plt.colorbar(orientation='horizontal', fraction=0.2, pad=0.2, shrink=0.5,\n aspect=10, anchor=(0., 0.), panchor=(0., 1.))\n\n\ndef test_colorbar_log_minortick_labels():\n with rc_context({'_internal.classic_mode': False}):\n fig, ax = plt.subplots()\n pcm = ax.imshow([[10000, 50000]], norm=LogNorm())\n cb = fig.colorbar(pcm)\n fig.canvas.draw()\n lb = cb.ax.yaxis.get_ticklabels(which='both')\n expected = [r'$\\mathdefault{10^{4}}$',\n r'$\\mathdefault{2\\times10^{4}}$',\n r'$\\mathdefault{3\\times10^{4}}$',\n r'$\\mathdefault{4\\times10^{4}}$']\n for l, exp in zip(lb, expected):\n assert l.get_text() == exp\n\n\ndef test_colorbar_renorm():\n x, y = np.ogrid[-4:4:31j, -4:4:31j]\n z = 120000*np.exp(-x**2 - y**2)\n\n fig, ax = plt.subplots()\n im = ax.imshow(z)\n cbar = fig.colorbar(im)\n\n norm = LogNorm(z.min(), z.max())\n im.set_norm(norm)\n cbar.set_norm(norm)\n cbar.locator = LogLocator()\n cbar.formatter = LogFormatter()\n cbar.update_normal(im)\n assert np.isclose(cbar.vmin, z.min())\n\n norm = LogNorm(z.min() * 1000, z.max() * 1000)\n im.set_norm(norm)\n cbar.set_norm(norm)\n cbar.update_normal(im)\n assert np.isclose(cbar.vmin, z.min() * 1000)\n assert np.isclose(cbar.vmax, z.max() * 1000)\n\n\ndef test_colorbar_get_ticks():\n with rc_context({'_internal.classic_mode': False}):\n\n fig, ax = plt. subplots()\n np.random.seed(19680801)\n pc = ax.pcolormesh(np.random.rand(30, 30))\n cb = fig.colorbar(pc)\n np.testing.assert_allclose(cb.get_ticks(), [0.2, 0.4, 0.6, 0.8])\n",
"# being a bit too dynamic\n# pylint: disable=E1101\nfrom __future__ import division\n\nimport numpy as np\n\nfrom pandas.compat import lmap, lrange, range, zip\nfrom pandas.util._decorators import deprecate_kwarg\n\nfrom pandas.core.dtypes.missing import notna\n\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.plotting._style import _get_standard_colors\nfrom pandas.plotting._tools import _set_ticks_props, _subplots\n\n\ndef scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,\n diagonal='hist', marker='.', density_kwds=None,\n hist_kwds=None, range_padding=0.05, **kwds):\n \"\"\"\n Draw a matrix of scatter plots.\n\n Parameters\n ----------\n frame : DataFrame\n alpha : float, optional\n amount of transparency applied\n figsize : (float,float), optional\n a tuple (width, height) in inches\n ax : Matplotlib axis object, optional\n grid : bool, optional\n setting this to True will show the grid\n diagonal : {'hist', 'kde'}\n pick between 'kde' and 'hist' for\n either Kernel Density Estimation or Histogram\n plot in the diagonal\n marker : str, optional\n Matplotlib marker type, default '.'\n hist_kwds : other plotting keyword arguments\n To be passed to hist function\n density_kwds : other plotting keyword arguments\n To be passed to kernel density estimate plot\n range_padding : float, optional\n relative extension of axis range in x and y\n with respect to (x_max - x_min) or (y_max - y_min),\n default 0.05\n kwds : other plotting keyword arguments\n To be passed to scatter function\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])\n >>> scatter_matrix(df, alpha=0.2)\n \"\"\"\n\n df = frame._get_numeric_data()\n n = df.columns.size\n naxes = n * n\n fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,\n squeeze=False)\n\n # no gaps between subplots\n fig.subplots_adjust(wspace=0, hspace=0)\n\n mask = notna(df)\n\n marker = _get_marker_compat(marker)\n\n hist_kwds = hist_kwds or {}\n density_kwds = density_kwds or {}\n\n # GH 14855\n kwds.setdefault('edgecolors', 'none')\n\n boundaries_list = []\n for a in df.columns:\n values = df[a].values[mask[a].values]\n rmin_, rmax_ = np.min(values), np.max(values)\n rdelta_ext = (rmax_ - rmin_) * range_padding / 2.\n boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext))\n\n for i, a in zip(lrange(n), df.columns):\n for j, b in zip(lrange(n), df.columns):\n ax = axes[i, j]\n\n if i == j:\n values = df[a].values[mask[a].values]\n\n # Deal with the diagonal by drawing a histogram there.\n if diagonal == 'hist':\n ax.hist(values, **hist_kwds)\n\n elif diagonal in ('kde', 'density'):\n from scipy.stats import gaussian_kde\n y = values\n gkde = gaussian_kde(y)\n ind = np.linspace(y.min(), y.max(), 1000)\n ax.plot(ind, gkde.evaluate(ind), **density_kwds)\n\n ax.set_xlim(boundaries_list[i])\n\n else:\n common = (mask[a] & mask[b]).values\n\n ax.scatter(df[b][common], df[a][common],\n marker=marker, alpha=alpha, **kwds)\n\n ax.set_xlim(boundaries_list[j])\n ax.set_ylim(boundaries_list[i])\n\n ax.set_xlabel(b)\n ax.set_ylabel(a)\n\n if j != 0:\n ax.yaxis.set_visible(False)\n if i != n - 1:\n ax.xaxis.set_visible(False)\n\n if len(df.columns) > 1:\n lim1 = boundaries_list[0]\n locs = axes[0][1].yaxis.get_majorticklocs()\n locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]\n adj = (locs - lim1[0]) / (lim1[1] - lim1[0])\n\n lim0 = axes[0][0].get_ylim()\n adj = adj * (lim0[1] - lim0[0]) + lim0[0]\n axes[0][0].yaxis.set_ticks(adj)\n\n if np.all(locs == locs.astype(int)):\n # if all ticks are int\n locs = locs.astype(int)\n axes[0][0].yaxis.set_ticklabels(locs)\n\n _set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)\n\n return axes\n\n\ndef _get_marker_compat(marker):\n import matplotlib.lines as mlines\n if marker not in mlines.lineMarkers:\n return 'o'\n return marker\n\n\ndef radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):\n \"\"\"\n Plot a multidimensional dataset in 2D.\n\n Each Series in the DataFrame is represented as a evenly distributed\n slice on a circle. Each data point is rendered in the circle according to\n the value on each Series. Highly correlated `Series` in the `DataFrame`\n are placed closer on the unit circle.\n\n RadViz allow to project a N-dimensional data set into a 2D space where the\n influence of each dimension can be interpreted as a balance between the\n influence of all dimensions.\n\n More info available at the `original article\n <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.889>`_\n describing RadViz.\n\n Parameters\n ----------\n frame : `DataFrame`\n Pandas object holding the data.\n class_column : str\n Column name containing the name of the data point category.\n ax : :class:`matplotlib.axes.Axes`, optional\n A plot instance to which to add the information.\n color : list[str] or tuple[str], optional\n Assign a color to each category. Example: ['blue', 'green'].\n colormap : str or :class:`matplotlib.colors.Colormap`, default None\n Colormap to select colors from. If string, load colormap with that\n name from matplotlib.\n kwds : optional\n Options to pass to matplotlib scatter plotting method.\n\n Returns\n -------\n axes : :class:`matplotlib.axes.Axes`\n\n See Also\n --------\n pandas.plotting.andrews_curves : Plot clustering visualization.\n\n Examples\n --------\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({\n ... 'SepalLength': [6.5, 7.7, 5.1, 5.8, 7.6, 5.0, 5.4, 4.6,\n ... 6.7, 4.6],\n ... 'SepalWidth': [3.0, 3.8, 3.8, 2.7, 3.0, 2.3, 3.0, 3.2,\n ... 3.3, 3.6],\n ... 'PetalLength': [5.5, 6.7, 1.9, 5.1, 6.6, 3.3, 4.5, 1.4,\n ... 5.7, 1.0],\n ... 'PetalWidth': [1.8, 2.2, 0.4, 1.9, 2.1, 1.0, 1.5, 0.2,\n ... 2.1, 0.2],\n ... 'Category': ['virginica', 'virginica', 'setosa',\n ... 'virginica', 'virginica', 'versicolor',\n ... 'versicolor', 'setosa', 'virginica',\n ... 'setosa']\n ... })\n >>> rad_viz = pd.plotting.radviz(df, 'Category') # doctest: +SKIP\n \"\"\"\n import matplotlib.pyplot as plt\n import matplotlib.patches as patches\n\n def normalize(series):\n a = min(series)\n b = max(series)\n return (series - a) / (b - a)\n\n n = len(frame)\n classes = frame[class_column].drop_duplicates()\n class_col = frame[class_column]\n df = frame.drop(class_column, axis=1).apply(normalize)\n\n if ax is None:\n ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])\n\n to_plot = {}\n colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,\n color_type='random', color=color)\n\n for kls in classes:\n to_plot[kls] = [[], []]\n\n m = len(frame.columns) - 1\n s = np.array([(np.cos(t), np.sin(t))\n for t in [2.0 * np.pi * (i / float(m))\n for i in range(m)]])\n\n for i in range(n):\n row = df.iloc[i].values\n row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)\n y = (s * row_).sum(axis=0) / row.sum()\n kls = class_col.iat[i]\n to_plot[kls][0].append(y[0])\n to_plot[kls][1].append(y[1])\n\n for i, kls in enumerate(classes):\n ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],\n label=pprint_thing(kls), **kwds)\n ax.legend()\n\n ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))\n\n for xy, name in zip(s, df.columns):\n\n ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))\n\n if xy[0] < 0.0 and xy[1] < 0.0:\n ax.text(xy[0] - 0.025, xy[1] - 0.025, name,\n ha='right', va='top', size='small')\n elif xy[0] < 0.0 and xy[1] >= 0.0:\n ax.text(xy[0] - 0.025, xy[1] + 0.025, name,\n ha='right', va='bottom', size='small')\n elif xy[0] >= 0.0 and xy[1] < 0.0:\n ax.text(xy[0] + 0.025, xy[1] - 0.025, name,\n ha='left', va='top', size='small')\n elif xy[0] >= 0.0 and xy[1] >= 0.0:\n ax.text(xy[0] + 0.025, xy[1] + 0.025, name,\n ha='left', va='bottom', size='small')\n\n ax.axis('equal')\n return ax\n\n\n@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')\ndef andrews_curves(frame, class_column, ax=None, samples=200, color=None,\n colormap=None, **kwds):\n \"\"\"\n Generates a matplotlib plot of Andrews curves, for visualising clusters of\n multivariate data.\n\n Andrews curves have the functional form:\n\n f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) +\n x_4 sin(2t) + x_5 cos(2t) + ...\n\n Where x coefficients correspond to the values of each dimension and t is\n linearly spaced between -pi and +pi. Each row of frame then corresponds to\n a single curve.\n\n Parameters\n ----------\n frame : DataFrame\n Data to be plotted, preferably normalized to (0.0, 1.0)\n class_column : Name of the column containing class names\n ax : matplotlib axes object, default None\n samples : Number of points to plot in each curve\n color : list or tuple, optional\n Colors to use for the different classes\n colormap : str or matplotlib colormap object, default None\n Colormap to select colors from. If string, load colormap with that name\n from matplotlib.\n kwds : keywords\n Options to pass to matplotlib plotting method\n\n Returns\n -------\n ax : Matplotlib axis object\n\n \"\"\"\n from math import sqrt, pi\n import matplotlib.pyplot as plt\n\n def function(amplitudes):\n def f(t):\n x1 = amplitudes[0]\n result = x1 / sqrt(2.0)\n\n # Take the rest of the coefficients and resize them\n # appropriately. Take a copy of amplitudes as otherwise numpy\n # deletes the element from amplitudes itself.\n coeffs = np.delete(np.copy(amplitudes), 0)\n coeffs.resize(int((coeffs.size + 1) / 2), 2)\n\n # Generate the harmonics and arguments for the sin and cos\n # functions.\n harmonics = np.arange(0, coeffs.shape[0]) + 1\n trig_args = np.outer(harmonics, t)\n\n result += np.sum(coeffs[:, 0, np.newaxis] * np.sin(trig_args) +\n coeffs[:, 1, np.newaxis] * np.cos(trig_args),\n axis=0)\n return result\n return f\n\n n = len(frame)\n class_col = frame[class_column]\n classes = frame[class_column].drop_duplicates()\n df = frame.drop(class_column, axis=1)\n t = np.linspace(-pi, pi, samples)\n used_legends = set()\n\n color_values = _get_standard_colors(num_colors=len(classes),\n colormap=colormap, color_type='random',\n color=color)\n colors = dict(zip(classes, color_values))\n if ax is None:\n ax = plt.gca(xlim=(-pi, pi))\n for i in range(n):\n row = df.iloc[i].values\n f = function(row)\n y = f(t)\n kls = class_col.iat[i]\n label = pprint_thing(kls)\n if label not in used_legends:\n used_legends.add(label)\n ax.plot(t, y, color=colors[kls], label=label, **kwds)\n else:\n ax.plot(t, y, color=colors[kls], **kwds)\n\n ax.legend(loc='upper right')\n ax.grid()\n return ax\n\n\ndef bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):\n \"\"\"\n Bootstrap plot on mean, median and mid-range statistics.\n\n The bootstrap plot is used to estimate the uncertainty of a statistic\n by relaying on random sampling with replacement [1]_. This function will\n generate bootstrapping plots for mean, median and mid-range statistics\n for the given number of samples of the given size.\n\n .. [1] \"Bootstrapping (statistics)\" in \\\n https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29\n\n Parameters\n ----------\n series : pandas.Series\n Pandas Series from where to get the samplings for the bootstrapping.\n fig : matplotlib.figure.Figure, default None\n If given, it will use the `fig` reference for plotting instead of\n creating a new one with default parameters.\n size : int, default 50\n Number of data points to consider during each sampling. It must be\n greater or equal than the length of the `series`.\n samples : int, default 500\n Number of times the bootstrap procedure is performed.\n **kwds :\n Options to pass to matplotlib plotting method.\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n Matplotlib figure\n\n See Also\n --------\n pandas.DataFrame.plot : Basic plotting for DataFrame objects.\n pandas.Series.plot : Basic plotting for Series objects.\n\n Examples\n --------\n\n .. plot::\n :context: close-figs\n\n >>> s = pd.Series(np.random.uniform(size=100))\n >>> fig = pd.plotting.bootstrap_plot(s) # doctest: +SKIP\n \"\"\"\n import random\n import matplotlib.pyplot as plt\n\n # random.sample(ndarray, int) fails on python 3.3, sigh\n data = list(series.values)\n samplings = [random.sample(data, size) for _ in range(samples)]\n\n means = np.array([np.mean(sampling) for sampling in samplings])\n medians = np.array([np.median(sampling) for sampling in samplings])\n midranges = np.array([(min(sampling) + max(sampling)) * 0.5\n for sampling in samplings])\n if fig is None:\n fig = plt.figure()\n x = lrange(samples)\n axes = []\n ax1 = fig.add_subplot(2, 3, 1)\n ax1.set_xlabel(\"Sample\")\n axes.append(ax1)\n ax1.plot(x, means, **kwds)\n ax2 = fig.add_subplot(2, 3, 2)\n ax2.set_xlabel(\"Sample\")\n axes.append(ax2)\n ax2.plot(x, medians, **kwds)\n ax3 = fig.add_subplot(2, 3, 3)\n ax3.set_xlabel(\"Sample\")\n axes.append(ax3)\n ax3.plot(x, midranges, **kwds)\n ax4 = fig.add_subplot(2, 3, 4)\n ax4.set_xlabel(\"Mean\")\n axes.append(ax4)\n ax4.hist(means, **kwds)\n ax5 = fig.add_subplot(2, 3, 5)\n ax5.set_xlabel(\"Median\")\n axes.append(ax5)\n ax5.hist(medians, **kwds)\n ax6 = fig.add_subplot(2, 3, 6)\n ax6.set_xlabel(\"Midrange\")\n axes.append(ax6)\n ax6.hist(midranges, **kwds)\n for axis in axes:\n plt.setp(axis.get_xticklabels(), fontsize=8)\n plt.setp(axis.get_yticklabels(), fontsize=8)\n return fig\n\n\n@deprecate_kwarg(old_arg_name='colors', new_arg_name='color')\n@deprecate_kwarg(old_arg_name='data', new_arg_name='frame', stacklevel=3)\ndef parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,\n use_columns=False, xticks=None, colormap=None,\n axvlines=True, axvlines_kwds=None, sort_labels=False,\n **kwds):\n \"\"\"Parallel coordinates plotting.\n\n Parameters\n ----------\n frame : DataFrame\n class_column : str\n Column name containing class names\n cols : list, optional\n A list of column names to use\n ax : matplotlib.axis, optional\n matplotlib axis object\n color : list or tuple, optional\n Colors to use for the different classes\n use_columns : bool, optional\n If true, columns will be used as xticks\n xticks : list or tuple, optional\n A list of values to use for xticks\n colormap : str or matplotlib colormap, default None\n Colormap to use for line colors.\n axvlines : bool, optional\n If true, vertical lines will be added at each xtick\n axvlines_kwds : keywords, optional\n Options to be passed to axvline method for vertical lines\n sort_labels : bool, False\n Sort class_column labels, useful when assigning colors\n\n .. versionadded:: 0.20.0\n\n kwds : keywords\n Options to pass to matplotlib plotting method\n\n Returns\n -------\n ax: matplotlib axis object\n\n Examples\n --------\n >>> from matplotlib import pyplot as plt\n >>> df = pd.read_csv('https://raw.github.com/pandas-dev/pandas/master'\n '/pandas/tests/data/iris.csv')\n >>> pd.plotting.parallel_coordinates(\n df, 'Name',\n color=('#556270', '#4ECDC4', '#C7F464'))\n >>> plt.show()\n \"\"\"\n if axvlines_kwds is None:\n axvlines_kwds = {'linewidth': 1, 'color': 'black'}\n import matplotlib.pyplot as plt\n\n n = len(frame)\n classes = frame[class_column].drop_duplicates()\n class_col = frame[class_column]\n\n if cols is None:\n df = frame.drop(class_column, axis=1)\n else:\n df = frame[cols]\n\n used_legends = set()\n\n ncols = len(df.columns)\n\n # determine values to use for xticks\n if use_columns is True:\n if not np.all(np.isreal(list(df.columns))):\n raise ValueError('Columns must be numeric to be used as xticks')\n x = df.columns\n elif xticks is not None:\n if not np.all(np.isreal(xticks)):\n raise ValueError('xticks specified must be numeric')\n elif len(xticks) != ncols:\n raise ValueError('Length of xticks must match number of columns')\n x = xticks\n else:\n x = lrange(ncols)\n\n if ax is None:\n ax = plt.gca()\n\n color_values = _get_standard_colors(num_colors=len(classes),\n colormap=colormap, color_type='random',\n color=color)\n\n if sort_labels:\n classes = sorted(classes)\n color_values = sorted(color_values)\n colors = dict(zip(classes, color_values))\n\n for i in range(n):\n y = df.iloc[i].values\n kls = class_col.iat[i]\n label = pprint_thing(kls)\n if label not in used_legends:\n used_legends.add(label)\n ax.plot(x, y, color=colors[kls], label=label, **kwds)\n else:\n ax.plot(x, y, color=colors[kls], **kwds)\n\n if axvlines:\n for i in x:\n ax.axvline(i, **axvlines_kwds)\n\n ax.set_xticks(x)\n ax.set_xticklabels(df.columns)\n ax.set_xlim(x[0], x[-1])\n ax.legend(loc='upper right')\n ax.grid()\n return ax\n\n\ndef lag_plot(series, lag=1, ax=None, **kwds):\n \"\"\"Lag plot for time series.\n\n Parameters\n ----------\n series : Time series\n lag : lag of the scatter plot, default 1\n ax : Matplotlib axis object, optional\n kwds : Matplotlib scatter method keyword arguments, optional\n\n Returns\n -------\n ax: Matplotlib axis object\n \"\"\"\n import matplotlib.pyplot as plt\n\n # workaround because `c='b'` is hardcoded in matplotlibs scatter method\n kwds.setdefault('c', plt.rcParams['patch.facecolor'])\n\n data = series.values\n y1 = data[:-lag]\n y2 = data[lag:]\n if ax is None:\n ax = plt.gca()\n ax.set_xlabel(\"y(t)\")\n ax.set_ylabel(\"y(t + {lag})\".format(lag=lag))\n ax.scatter(y1, y2, **kwds)\n return ax\n\n\ndef autocorrelation_plot(series, ax=None, **kwds):\n \"\"\"Autocorrelation plot for time series.\n\n Parameters:\n -----------\n series: Time series\n ax: Matplotlib axis object, optional\n kwds : keywords\n Options to pass to matplotlib plotting method\n\n Returns:\n -----------\n ax: Matplotlib axis object\n \"\"\"\n import matplotlib.pyplot as plt\n n = len(series)\n data = np.asarray(series)\n if ax is None:\n ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))\n mean = np.mean(data)\n c0 = np.sum((data - mean) ** 2) / float(n)\n\n def r(h):\n return ((data[:n - h] - mean) *\n (data[h:] - mean)).sum() / float(n) / c0\n x = np.arange(n) + 1\n y = lmap(r, x)\n z95 = 1.959963984540054\n z99 = 2.5758293035489004\n ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')\n ax.axhline(y=z95 / np.sqrt(n), color='grey')\n ax.axhline(y=0.0, color='black')\n ax.axhline(y=-z95 / np.sqrt(n), color='grey')\n ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')\n ax.set_xlabel(\"Lag\")\n ax.set_ylabel(\"Autocorrelation\")\n ax.plot(x, y, **kwds)\n if 'label' in kwds:\n ax.legend()\n ax.grid()\n return ax\n"
] | [
[
"matplotlib.cbook.warn_deprecated"
],
[
"matplotlib.tri.triangulation.Triangulation.get_from_args_and_kwargs",
"numpy.asarray",
"matplotlib.collections.TriMesh",
"numpy.stack",
"matplotlib.collections.PolyCollection"
],
[
"matplotlib.style.reload_library",
"matplotlib.pyplot.xkcd",
"matplotlib.style.context",
"matplotlib.style.core.USER_LIBRARY_PATHS.append"
],
[
"pandas.timedelta_range",
"pandas.TimedeltaIndex",
"numpy.arange",
"pandas.offsets.Minute",
"pandas.util.testing.assert_index_equal",
"pandas.offsets.Hour"
],
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.plot",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"pandas.timedelta_range",
"pandas.util.testing.assert_attr_equal",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.offsets.Hour",
"pandas.TimedeltaIndex",
"pandas.Series",
"pandas.util.testing.round_trip_pickle",
"pandas.util.testing.assert_produces_warning",
"pandas.Index",
"pandas.DataFrame",
"pandas.util.testing.assert_series_equal",
"pandas.Timedelta",
"numpy.dtype",
"pandas.util.testing.assert_index_equal",
"numpy.timedelta64",
"pandas.date_range",
"pandas.util.testing.makeTimedeltaIndex",
"numpy.array"
],
[
"pandas.compat.import_lzma",
"pandas.compat.is_platform_32bit",
"pandas.compat.is_platform_windows"
],
[
"pandas.core.indexing.validate_indices",
"pandas.Series",
"numpy.linspace",
"numpy.asarray",
"pandas.util.testing.assert_produces_warning",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal",
"pandas.util.testing.assert_index_equal",
"pandas.tests.indexing.common._mklbl",
"numpy.random.randn",
"pandas.core.indexing._non_reducing_slice",
"numpy.arange",
"pandas.util.testing.assert_series_equal",
"pandas.Index",
"pandas.core.arrays.integer_array",
"pandas.Int64Index",
"pandas.core.dtypes.common.is_float_dtype",
"numpy.zeros",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.Float64Index",
"pandas.UInt64Index",
"pandas.core.indexing._maybe_numeric_slice",
"numpy.random.rand",
"numpy.array",
"pandas.util.testing.makeCustomDataframe",
"numpy.random.random",
"pandas.Timestamp",
"pandas.compat.lrange",
"pandas.compat.range"
],
[
"pandas.util.testing.ensure_clean",
"pandas.Timestamp",
"pandas.compat.StringIO",
"pandas.MultiIndex.from_tuples",
"pandas.compat.numpy.np_array_datetime64_compat",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal",
"pandas.compat.parse_date",
"pandas.DatetimeIndex",
"pandas.core.indexes.datetimes.date_range",
"pandas._libs.tslib.Timestamp",
"pandas.io.parsers._concat_date_cols",
"numpy.array",
"pandas.compat.lrange"
],
[
"numpy.dot",
"numpy.abs",
"numpy.asarray",
"numpy.arange",
"matplotlib.path.Path",
"numpy.arctan2",
"numpy.concatenate",
"numpy.hypot",
"numpy.array",
"numpy.empty"
],
[
"pandas.timedelta_range",
"pandas.compat.u",
"pandas.Series",
"pandas.period_range",
"pandas.Categorical",
"numpy.arange",
"pandas.core.config.option_context",
"pandas.date_range"
],
[
"numpy.ones_like",
"numpy.linspace",
"matplotlib.figure.Figure",
"numpy.arange",
"matplotlib.checkdep_usetex",
"matplotlib.pyplot.subplots",
"numpy.sin",
"matplotlib.testing.decorators.image_comparison",
"matplotlib.backends.backend_svg.FigureCanvasSVG",
"matplotlib.dviread.PsFont",
"matplotlib.rc",
"matplotlib.pyplot.figure"
],
[
"pandas.timedelta_range",
"pandas.tseries.offsets.Day",
"numpy.arange",
"pandas.Timedelta",
"pandas.util.testing.assert_frame_equal",
"pandas.tseries.offsets.Second",
"pandas.util.testing.assert_index_equal",
"numpy.random.normal",
"pandas.to_timedelta"
],
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.contourf",
"numpy.linspace",
"matplotlib.pyplot.get_cmap",
"numpy.random.randn",
"numpy.exp",
"matplotlib.pyplot.gca",
"matplotlib.colors.PowerNorm",
"numpy.arange",
"matplotlib.testing.decorators.image_comparison",
"matplotlib.colorbar.ColorbarBase",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.ticker.LogFormatter",
"numpy.repeat",
"matplotlib.pyplot.figure",
"numpy.random.rand",
"numpy.meshgrid",
"numpy.array",
"matplotlib.rc_context",
"matplotlib.colors.LogNorm",
"matplotlib.pyplot.scatter",
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.colorbar",
"matplotlib.ticker.LogLocator",
"matplotlib.pyplot.contour",
"matplotlib.cm.get_cmap"
],
[
"numpy.expand_dims",
"pandas.util._decorators.deprecate_kwarg",
"numpy.linspace",
"numpy.sqrt",
"numpy.asarray",
"pandas.core.dtypes.missing.notna",
"numpy.max",
"scipy.stats.gaussian_kde",
"numpy.mean",
"matplotlib.pyplot.gca",
"pandas.plotting._tools._subplots",
"numpy.arange",
"numpy.sin",
"numpy.copy",
"pandas.compat.lmap",
"numpy.outer",
"matplotlib.pyplot.figure",
"numpy.min",
"pandas.plotting._tools._set_ticks_props",
"numpy.median",
"matplotlib.patches.Circle",
"numpy.isreal",
"numpy.sum",
"numpy.cos",
"pandas.compat.zip",
"pandas.io.formats.printing.pprint_thing",
"pandas.compat.lrange",
"pandas.compat.range"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"1.0",
"0.25"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
DexiongYung/robustnav_AE | [
"f2b1b5bb8780e4e6ae5f81c127b7589cfc949801",
"f2b1b5bb8780e4e6ae5f81c127b7589cfc949801"
] | [
"domain_adaptation/corruptions/corruptions.py",
"projects/robustnav_baselines/experiments/robustnav_eval/pointnav_robothor_vanilla_rgb_resnet_rot_pred_ddppo.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom PIL import Image\n\n# /////////////// Corruption Helpers ///////////////\n\nimport skimage as sk\nfrom torchvision import transforms\nimport torchvision.transforms.functional as F\nfrom skimage.filters import gaussian\nfrom io import BytesIO\nfrom wand.image import Image as WandImage\nfrom wand.api import library as wandlibrary\nimport wand.color as WandColor\nimport ctypes\nfrom PIL import Image as PILImage\nimport cv2\nfrom scipy.ndimage import zoom as scizoom\nfrom scipy.ndimage.interpolation import map_coordinates\nimport warnings\nimport os\nfrom pkg_resources import resource_filename\n\nwarnings.simplefilter(\"ignore\", UserWarning)\n\n\ndef disk(radius, alias_blur=0.1, dtype=np.float32):\n # 17 x 17 kernel causes seg fault in opencv\n # if radius <= 8:\n # L = np.arange(-8, 8 + 1)\n # ksize = (3, 3) \n if radius <= 5:\n L = np.arange(-5, 5 + 1)\n ksize = (3, 3)\n else:\n L = np.arange(-radius, radius + 1)\n ksize = (5, 5)\n X, Y = np.meshgrid(L, L)\n aliased_disk = np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype)\n aliased_disk /= np.sum(aliased_disk)\n # supersample disk to antialias\n return cv2.GaussianBlur(aliased_disk, ksize=ksize, sigmaX=alias_blur)\n\n\n# Tell Python about the C method\nwandlibrary.MagickMotionBlurImage.argtypes = (ctypes.c_void_p, # wand\n ctypes.c_double, # radius\n ctypes.c_double, # sigma\n ctypes.c_double) # angle\n\n\n# Extend wand.image.Image class to include method signature\nclass MotionImage(WandImage):\n def motion_blur(self, radius=0.0, sigma=0.0, angle=0.0):\n wandlibrary.MagickMotionBlurImage(self.wand, radius, sigma, angle)\n\n\n# modification of https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py\ndef plasma_fractal(mapsize=512, wibbledecay=3):\n \"\"\"\n Generate a heightmap using diamond-square algorithm.\n Return square 2d array, side length 'mapsize', of floats in range 0-255.\n 'mapsize' must be a power of two.\n \"\"\"\n assert (mapsize & (mapsize - 1) == 0)\n maparray = np.empty((mapsize, mapsize), dtype=np.float_)\n maparray[0, 0] = 0\n stepsize = mapsize\n wibble = 100\n\n def wibbledmean(array):\n return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)\n\n def fillsquares():\n \"\"\"For each square of points stepsize apart,\n calculate middle value as mean of points + wibble\"\"\"\n cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]\n squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)\n squareaccum += np.roll(squareaccum, shift=-1, axis=1)\n maparray[stepsize // 2:mapsize:stepsize,\n stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)\n\n def filldiamonds():\n \"\"\"For each diamond of points stepsize apart,\n calculate middle value as mean of points + wibble\"\"\"\n mapsize = maparray.shape[0]\n drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize]\n ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]\n ldrsum = drgrid + np.roll(drgrid, 1, axis=0)\n lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)\n ltsum = ldrsum + lulsum\n maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum)\n tdrsum = drgrid + np.roll(drgrid, 1, axis=1)\n tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)\n ttsum = tdrsum + tulsum\n maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum)\n\n while stepsize >= 2:\n fillsquares()\n filldiamonds()\n stepsize //= 2\n wibble /= wibbledecay\n\n maparray -= maparray.min()\n return maparray / maparray.max()\n\n\ndef clipped_zoom(img, zoom_factor):\n h = img.shape[0]\n # ceil crop height(= crop width)\n ch = int(np.ceil(h / float(zoom_factor)))\n\n top = (h - ch) // 2\n img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)\n # trim off any extra pixels\n trim_top = (img.shape[0] - h) // 2\n\n return img[trim_top:trim_top + h, trim_top:trim_top + h]\n\n\n# /////////////// End Corruption Helpers ///////////////\n\n\n# /////////////// Corruptions ///////////////\n\ndef gaussian_noise(x, severity=1):\n # c = [.08, .12, 0.18, 0.26, 0.38][severity - 1]\n c = [0.04, 0.06, .08, .09, .10][severity - 1]\n\n x = np.array(x) / 255.\n return np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255\n\n\ndef shot_noise(x, severity=1):\n # c = [60, 25, 12, 5, 3][severity - 1]\n c = [500, 250, 100, 75, 50][severity - 1]\n\n x = np.array(x) / 255.\n return np.clip(np.random.poisson(x * c) / float(c), 0, 1) * 255\n\n\ndef impulse_noise(x, severity=1):\n # c = [.03, .06, .09, 0.17, 0.27][severity - 1]\n c = [.01, .02, .03, .05, .07][severity - 1]\n\n x = sk.util.random_noise(np.array(x) / 255., mode='s&p', amount=c)\n return np.clip(x, 0, 1) * 255\n\n\ndef speckle_noise(x, severity=1):\n # c = [.15, .2, 0.35, 0.45, 0.6][severity - 1]\n c = [.06, .1, .12, .16, .2][severity - 1]\n\n x = np.array(x) / 255.\n return np.clip(x + x * np.random.normal(size=x.shape, scale=c), 0, 1) * 255\n\n\ndef gaussian_blur(x, severity=1):\n # c = [1, 2, 3, 4, 6][severity - 1]\n c = [.4, .6, 0.7, .8, 1][severity - 1]\n\n x = gaussian(np.array(x) / 255., sigma=c, multichannel=True)\n return np.clip(x, 0, 1) * 255\n\n\ndef glass_blur(x, severity=1):\n # sigma, max_delta, iterations\n # c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4, 2)][severity - 1]\n c = [(0.05,1,1), (0.25,1,1), (0.4,1,1), (0.25,1,2), (0.4,1,2)][severity - 1]\n\n x = np.uint8(gaussian(np.array(x) / 255., sigma=c[0], multichannel=True) * 255)\n size = x.shape[0]\n # locally shuffle pixels\n for i in range(c[2]):\n for h in range(size - c[1], c[1], -1):\n for w in range(size - c[1], c[1], -1):\n dx, dy = np.random.randint(-c[1], c[1], size=(2,))\n h_prime, w_prime = h + dy, w + dx\n # swap\n x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]\n\n return np.clip(gaussian(x / 255., sigma=c[0], multichannel=True), 0, 1) * 255\n\n\ndef defocus_blur(x, severity=1):\n # c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1]\n c = [(0.3, 0.4), (0.4, 0.5), (0.5, 0.6), (1, 0.2), (1.5, 0.1)][severity - 1]\n\n x = np.array(x) / 255.\n kernel = disk(radius=c[0], alias_blur=c[1])\n\n channels = []\n for d in range(3):\n channels.append(cv2.filter2D(x[:, :, d].astype(np.float32), -1, kernel))\n channels = np.array(channels).transpose((1, 2, 0)) # 3x32x32 -> 32x32x3\n\n return np.clip(channels, 0, 1) * 255\n\ndef motion_blur(x, severity=1):\n # c = [(10, 3), (15, 5), (15, 8), (15, 12), (20, 15)][severity - 1]\n c = [(6,1), (6,1.5), (6,2), (8,2), (9,2.5)][severity - 1]\n\n output = BytesIO()\n Image.fromarray(x).save(output, format='PNG')\n x = MotionImage(blob=output.getvalue())\n\n x.motion_blur(radius=c[0], sigma=c[1], angle=np.random.uniform(-45, 45))\n\n x = cv2.imdecode(np.fromstring(x.make_blob(), np.uint8),\n cv2.IMREAD_UNCHANGED)\n\n if x.shape != (512, 512):\n return np.clip(x[..., [2, 1, 0]], 0, 255) # BGR to RGB\n else: # greyscale to RGB\n return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255)\n\n\ndef zoom_blur(x, severity=1):\n # c = [np.arange(1, 1.11, 0.01),\n # np.arange(1, 1.16, 0.01),\n # np.arange(1, 1.21, 0.02),\n # np.arange(1, 1.26, 0.02),\n # np.arange(1, 1.31, 0.03)][severity - 1]\n c = [np.arange(1, 1.06, 0.01), np.arange(1, 1.11, 0.01), np.arange(1, 1.16, 0.01),\n np.arange(1, 1.21, 0.01), np.arange(1, 1.26, 0.01)][severity - 1]\n\n x = (np.array(x) / 255.).astype(np.float32)\n out = np.zeros_like(x)\n for zoom_factor in c:\n out += clipped_zoom(x, zoom_factor)\n\n x = (x + out) / (len(c) + 1)\n return np.clip(x, 0, 1) * 255\n\n\ndef fog(x, severity=1):\n # c = [(1.5, 2), (2., 2), (2.5, 1.7), (2.5, 1.5), (3., 1.4)][severity - 1]\n c = [(.2,3), (.5,3), (0.75,2.5), (1,2), (1.5,1.75)][severity - 1]\n size = x.shape[0]\n x = np.array(x) / 255.\n max_val = x.max()\n x += c[0] * plasma_fractal(wibbledecay=c[1])[:size, :size][..., np.newaxis]\n return np.clip(x * max_val / (max_val + c[0]), 0, 1) # *255\n\n\ndef frost(x, severity=1):\n size = x.shape[0]\n # c = [(1, 0.4),\n # (0.8, 0.6),\n # (0.7, 0.7),\n # (0.65, 0.7),\n # (0.6, 0.75)][severity - 1]\n c = [(1, 0.2), (1, 0.3), (0.9, 0.4), (0.85, 0.4), (0.75, 0.45)][severity - 1]\n idx = np.random.randint(5)\n filename = [resource_filename(__name__, 'frost/frost1.png'),\n resource_filename(__name__, 'frost/frost2.png'),\n resource_filename(__name__, 'frost/frost3.png'),\n resource_filename(__name__, 'frost/frost4.jpg'),\n resource_filename(__name__, 'frost/frost5.jpg'),\n resource_filename(__name__, 'frost/frost6.jpg')][idx]\n frost = cv2.imread(filename)\n # randomly crop and convert to rgb\n\n x_start, y_start = np.random.randint(0, frost.shape[0] - size), np.random.randint(0, frost.shape[1] - size)\n\n frost = frost[x_start:x_start + size, y_start:y_start + size][..., [2, 1, 0]]\n\n return np.clip(c[0] * np.array(x) + c[1] * frost, 0, 255) / 255\n\n\ndef snow(x, severity=1):\n # c = [(0.1, 0.3, 3, 0.5, 10, 4, 0.8),\n # (0.2, 0.3, 2, 0.5, 12, 4, 0.7),\n # (0.55, 0.3, 4, 0.9, 12, 8, 0.7),\n # (0.55, 0.3, 4.5, 0.85, 12, 8, 0.65),\n # (0.55, 0.3, 2.5, 0.85, 12, 12, 0.55)][severity - 1]\n\n c = [(0.1,0.2,1,0.6,8,3,0.95),\n (0.1,0.2,1,0.5,10,4,0.9),\n (0.15,0.3,1.75,0.55,10,4,0.9),\n (0.25,0.3,2.25,0.6,12,6,0.85),\n (0.3,0.3,1.25,0.65,14,12,0.8)][severity - 1]\n\n size = x.shape[0]\n x = np.array(x, dtype=np.float32) / 255.\n snow_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1]) # [:2] for monochrome\n\n snow_layer = clipped_zoom(snow_layer[..., np.newaxis], c[2])\n snow_layer[snow_layer < c[3]] = 0\n\n snow_layer = PILImage.fromarray((np.clip(snow_layer.squeeze(), 0, 1) * 255).astype(np.uint8), mode='L')\n output = BytesIO()\n snow_layer.save(output, format='PNG')\n snow_layer = MotionImage(blob=output.getvalue())\n\n snow_layer.motion_blur(radius=c[4], sigma=c[5], angle=np.random.uniform(-135, -45))\n\n snow_layer = cv2.imdecode(np.fromstring(snow_layer.make_blob(), np.uint8),\n cv2.IMREAD_UNCHANGED) / 255.\n snow_layer = snow_layer[..., np.newaxis]\n\n x = c[6] * x + (1 - c[6]) * np.maximum(x, cv2.cvtColor(x, code = cv2.COLOR_RGB2GRAY).reshape(size, size, 1) * 1.5 + 0.5)\n return np.clip(x + snow_layer + np.rot90(snow_layer, k=2), 0, 1) * 255\n\n\ndef spatter(x, severity=1):\n # c = [(0.65, 0.3, 4, 0.69, 0.6, 0),\n # (0.65, 0.3, 3, 0.68, 0.6, 0),\n # (0.65, 0.3, 2, 0.68, 0.5, 0),\n # (0.65, 0.3, 1, 0.65, 1.5, 1),\n # (0.67, 0.4, 1, 0.65, 1.5, 1)][severity - 1]\n c = [(0.62,0.1,0.7,0.7,0.5,0),\n (0.65,0.1,0.8,0.7,0.5,0),\n (0.65,0.3,1,0.69,0.5,0),\n (0.65,0.1,0.7,0.69,0.6,1),\n (0.65,0.1,0.5,0.68,0.6,1)][severity - 1]\n\n x = np.array(x, dtype=np.float32) / 255.\n\n liquid_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1])\n\n liquid_layer = gaussian(liquid_layer, sigma=c[2])\n liquid_layer[liquid_layer < c[3]] = 0\n if c[5] == 0:\n liquid_layer = (liquid_layer * 255).astype(np.uint8)\n dist = 255 - cv2.Canny(liquid_layer, 50, 150)\n dist = cv2.distanceTransform(dist, cv2.DIST_L2, 5)\n _, dist = cv2.threshold(dist, 20, 20, cv2.THRESH_TRUNC)\n dist = cv2.blur(dist, (3, 3)).astype(np.uint8)\n dist = cv2.equalizeHist(dist)\n ker = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]])\n dist = cv2.filter2D(dist, cv2.CV_8U, ker)\n dist = cv2.blur(dist, (3, 3)).astype(np.float32)\n\n m = cv2.cvtColor(liquid_layer * dist, cv2.COLOR_GRAY2BGRA)\n m /= np.max(m, axis=(0, 1))\n m *= c[4]\n\n # water is pale turqouise\n color = np.concatenate((175 / 255. * np.ones_like(m[..., :1]),\n 238 / 255. * np.ones_like(m[..., :1]),\n 238 / 255. * np.ones_like(m[..., :1])), axis=2)\n\n color = cv2.cvtColor(color, cv2.COLOR_BGR2BGRA)\n x = cv2.cvtColor(x, cv2.COLOR_BGR2BGRA)\n\n return cv2.cvtColor(np.clip(x + m * color, 0, 1), cv2.COLOR_BGRA2BGR) * 255\n else:\n m = np.where(liquid_layer > c[3], 1, 0)\n m = gaussian(m.astype(np.float32), sigma=c[4])\n m[m < 0.8] = 0\n\n # mud brown\n color = np.concatenate((63 / 255. * np.ones_like(x[..., :1]),\n 42 / 255. * np.ones_like(x[..., :1]),\n 20 / 255. * np.ones_like(x[..., :1])), axis=2)\n\n color *= m[..., np.newaxis]\n x *= (1 - m[..., np.newaxis])\n\n return np.clip(x + color, 0, 1) * 255\n\n\ndef contrast(x, severity=1):\n # c = [0.4, .3, .2, .1, .05][severity - 1]\n c = [.75, .5, .4, .3, 0.15][severity - 1]\n\n x = np.array(x) / 255.\n means = np.mean(x, axis=(0, 1), keepdims=True)\n return np.clip((x - means) * c + means, 0, 1) * 255\n\n\ndef generate_random_lines(imshape,slant,drop_length,rain_type):\n drops=[]\n area=imshape[0]*imshape[1]\n no_of_drops=area//600\n\n if rain_type.lower()=='drizzle':\n no_of_drops=area//770\n drop_length=10\n elif rain_type.lower()=='heavy':\n drop_length=30\n elif rain_type.lower()=='torrential':\n no_of_drops=area//500\n drop_length=60\n\n for i in range(no_of_drops): ## If You want heavy rain, try increasing this\n if slant<0:\n x= np.random.randint(slant,imshape[1])\n else:\n x= np.random.randint(0,imshape[1]-slant)\n y= np.random.randint(0,imshape[0]-drop_length)\n drops.append((x,y))\n return drops,drop_length\n\n\ndef rain_process(image,slant,drop_length,drop_color,drop_width,rain_drops):\n imshape = image.shape \n image_t = image.copy()\n for rain_drop in rain_drops:\n cv2.line(image_t,(rain_drop[0],rain_drop[1]),(rain_drop[0]+slant,rain_drop[1]+drop_length),drop_color,drop_width)\n image= cv2.blur(image_t,(7,7)) ## rainy view are blurry\n brightness_coefficient = 0.7 ## rainy days are usually shady \n image_HLS = hls(image) ## Conversion to HLS\n image_HLS[:,:,1] = image_HLS[:,:,1]*brightness_coefficient ## scale pixel values down for channel 1(Lightness)\n image_RGB= rgb(image_HLS,'hls') ## Conversion to RGB\n return image_RGB\n\n\ndef hls(image,src='RGB'):\n image_HLS = eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2HLS)')\n return image_HLS\n\n\ndef rgb(image, src='BGR'):\n image_RGB= eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2RGB)')\n return image_RGB\n\n\ndef rain(image, slant=-1,drop_length=20,drop_width=1,drop_color=(200,200,200),rain_type='torrential'): ## (200,200,200) a shade of gray\n # verify_image(image)\n slant_extreme=slant\n # if not(is_numeric(slant_extreme) and (slant_extreme>=-20 and slant_extreme<=20)or slant_extreme==-1):\n # raise Exception(err_rain_slant)\n # if not(is_numeric(drop_width) and drop_width>=1 and drop_width<=5):\n # raise Exception(err_rain_width)\n # if not(is_numeric(drop_length) and drop_length>=0 and drop_length<=100):\n # raise Exception(err_rain_length)\n\n imshape = image.shape\n if slant_extreme==-1:\n slant= np.random.randint(-10,10) ##generate random slant if no slant value is given\n rain_drops, drop_length= generate_random_lines(imshape,slant,drop_length,rain_type)\n output = rain_process(image,slant_extreme,drop_length,drop_color,drop_width,rain_drops)\n return output\n\n\ndef brightness(x, severity=1):\n # c = [.1, .2, .3, .4, .5][severity - 1]\n c = [.05, .1, .15, .2, .3][severity - 1]\n\n x = np.array(x) / 255.\n x = sk.color.rgb2hsv(x)\n x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1)\n x = sk.color.hsv2rgb(x)\n\n return np.clip(x, 0, 1) * 255\n\n\ndef saturate(x, severity=1):\n # c = [(0.3, 0), (0.1, 0), (2, 0), (5, 0.1), (20, 0.2)][severity - 1]\n c = [(0.3, 0), (0.1, 0), (1.5, 0), (2, 0.1), (2.5, 0.2)][severity - 1]\n x = np.array(x) / 255.\n x = sk.color.rgb2hsv(x)\n x[:, :, 1] = np.clip(x[:, :, 1] * c[0] + c[1], 0, 1)\n x = sk.color.hsv2rgb(x)\n\n return np.clip(x, 0, 1) * 255\n\n\ndef jpeg_compression(x, severity=1):\n # c = [25, 18, 15, 10, 7][severity - 1]\n c = [80, 65, 58, 50, 40][severity - 1]\n\n output = BytesIO()\n\n Image.fromarray(x).save(output, 'JPEG', quality=c)\n x = np.array(PILImage.open(output))\n\n return x\n\n\ndef pixelate(x, severity=1):\n size = x.shape[0]\n # c = [0.6, 0.5, 0.4, 0.3, 0.25][severity - 1]\n c = [0.95, 0.9, 0.85, 0.75, 0.65][severity - 1]\n\n x = Image.fromarray(x)\n x = x.resize((int(size * c), int(size * c)),resample=Image.BILINEAR)\n x = x.resize((size, size),Image.NEAREST)\n\n return np.array(x)\n\n\n# mod of https://gist.github.com/erniejunior/601cdf56d2b424757de5\ndef elastic_transform(image, severity=1):\n c = [(244 * 2, 244 * 0.7, 244 * 0.1), # 244 should have been 512, but ultimately nothing is incorrect\n (244 * 2, 244 * 0.08, 244 * 0.2),\n (244 * 0.05, 244 * 0.01, 244 * 0.02),\n (244 * 0.07, 244 * 0.01, 244 * 0.02),\n (244 * 0.12, 244 * 0.01, 244 * 0.02)][severity - 1]\n\n image = np.array(image, dtype=np.float32) / 255.\n shape = image.shape\n shape_size = shape[:2]\n\n # random affine\n center_square = np.float32(shape_size) // 2\n square_size = min(shape_size) // 3\n pts1 = np.float32([center_square + square_size,\n [center_square[0] + square_size, center_square[1] - square_size],\n center_square - square_size])\n pts2 = pts1 + np.random.uniform(-c[2], c[2], size=pts1.shape).astype(np.float32)\n M = cv2.getAffineTransform(pts1, pts2)\n image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)\n\n dx = (gaussian(np.random.uniform(-1, 1, size=shape[:2]),\n c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)\n dy = (gaussian(np.random.uniform(-1, 1, size=shape[:2]),\n c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)\n dx, dy = dx[..., np.newaxis], dy[..., np.newaxis]\n\n x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))\n indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1))\n return np.clip(map_coordinates(image, indices, order=1, mode='reflect').reshape(shape), 0, 1) * 255\n\n\ndef blackoutNoise(image, severity=1):\n image = np.zeros(image.shape, dtype=np.uint8)\n m = (severity, severity, severity)\n s = (severity, severity, severity)\n\n image = np.clip(cv2.randn(image, m, s), 0, 255)\n\n return image\n\n\ndef additiveGaussianNoise(image, severity=1):\n m = (severity, severity, severity)\n s = (severity, severity, severity)\n corr = cv2.randn(np.zeros(image.shape, dtype=np.uint8), m, s)\n\n image = np.clip(image.copy() + corr, 0, 255)\n return image\n\n\ndef occlusion(image, severity=1):\n mask = np.ones(image.shape, dtype=np.uint8)\n\n x = int(image.shape[0] * np.random.rand())\n y = int(image.shape[1] * np.random.rand())\n r = int((min(image.shape[:2]) / 4) * np.random.rand() + (min(image.shape[:2]) / 4))\n\n cv2.circle(mask, (x, y), r, 0, -1)\n\n image = np.clip(image.copy() * mask, 0, 255)\n return image\n",
"\"\"\"\nExperiment config to evaluate a PointNav RGB policy\ntrained with Nav. Loss + Rotation Prediction\n\nSupports \"Clean\" and the following visual corruptions\n- Defocus Blur\n- Motion Blur\n- Spatter\n- Low Lighting\n- Speckle\n\"\"\"\n\n# Required imports\nimport glob\nimport os\nfrom abc import ABC\nfrom math import ceil\nfrom typing import Dict, Any, List, Optional, Sequence, Union\n\nimport gym\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torchvision import models\n\nfrom allenact.base_abstractions.experiment_config import ExperimentConfig\nfrom allenact.base_abstractions.preprocessor import Preprocessor\nfrom allenact.base_abstractions.sensor import Sensor, RotationSensor\nfrom allenact.base_abstractions.experiment_config import MachineParams\nfrom allenact.base_abstractions.preprocessor import SensorPreprocessorGraph\nfrom allenact.base_abstractions.sensor import SensorSuite, ExpertActionSensor\nfrom allenact.base_abstractions.task import TaskSampler\n\nfrom allenact.utils.experiment_utils import evenly_distribute_count_into_bins\nfrom allenact.utils.system import get_logger\nfrom allenact.utils.experiment_utils import (\n Builder,\n PipelineStage,\n TrainingPipeline,\n LinearDecay,\n)\n\nfrom allenact_plugins.ithor_plugin.ithor_sensors import RGBSensorThor\nfrom allenact_plugins.robothor_plugin.robothor_sensors import DepthSensorThor\nfrom allenact_plugins.robothor_plugin.robothor_sensors import GPSCompassSensorRoboThor\nfrom allenact_plugins.ithor_plugin.ithor_util import horizontal_to_vertical_fov\n\nfrom allenact_plugins.robothor_plugin.robothor_task_samplers import (\n PointNavDatasetTaskSampler,\n)\nfrom allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask\nfrom allenact_plugins.robothor_plugin.robothor_tasks import PointNavTask\n\nfrom allenact.embodiedai.preprocessors.resnet import ResNetPreprocessor\n\nfrom allenact.algorithms.onpolicy_sync.losses import PPO, RotationPred\nfrom allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig\nfrom allenact.algorithms.onpolicy_sync.losses.rotation_pred import RotPredConfig\n\nfrom projects.pointnav_baselines.models.point_nav_models import (\n PointNavActorCriticSimpleConvRNN,\n ResnetTensorPointNavActorCritic,\n ResnetTensorAuxPointNavActorCritic,\n)\n\nfrom allenact.base_abstractions.sensor import DepthSensor, RGBSensor\n\n\nclass PointNavS2SRGBResNetDDPPO(ExperimentConfig, ABC):\n \"\"\"A PointNav Experiment Config using RGB sensors and DDPPO\"\"\"\n\n def __init__(self):\n super().__init__()\n\n # Task Parameters\n self.ADVANCE_SCENE_ROLLOUT_PERIOD: Optional[int] = None\n\n self.STEP_SIZE = 0.25\n self.ROTATION_DEGREES = 30.0\n self.DISTANCE_TO_GOAL = 0.2\n self.STOCHASTIC = True\n self.HORIZONTAL_FIELD_OF_VIEW = 79\n\n self.CAMERA_WIDTH = 400\n self.CAMERA_HEIGHT = 300\n self.SCREEN_SIZE = 224\n self.MAX_STEPS = 300\n\n # Random crop specifications for data augmentations\n self.CROP_WIDTH = 320\n self.CROP_HEIGHT = 240\n\n self.REWARD_CONFIG = {\n \"step_penalty\": -0.01,\n \"goal_success_reward\": 10.0,\n \"failed_stop_reward\": 0.0,\n \"reached_max_steps_reward\": 0.0,\n \"shaping_weight\": 1.0,\n }\n\n self.NUM_PROCESSES = 60\n\n self.TRAIN_GPU_IDS = list(range(torch.cuda.device_count()))\n self.VALID_GPU_IDS = [torch.cuda.device_count() - 1]\n self.TEST_GPU_IDS = [torch.cuda.device_count() - 1]\n\n self.PREPROCESSORS = [\n Builder(\n ResNetPreprocessor,\n {\n \"input_height\": self.SCREEN_SIZE,\n \"input_width\": self.SCREEN_SIZE,\n \"output_width\": 7,\n \"output_height\": 7,\n \"output_dims\": 512,\n \"pool\": False,\n \"torchvision_resnet_model\": models.resnet18,\n \"input_uuids\": [\"rgb_lowres\"],\n \"output_uuid\": \"rgb_resnet\",\n },\n ),\n ]\n\n OBSERVATIONS = [\n \"rgb_resnet\",\n \"target_coordinates_ind\",\n \"rot_label\",\n ]\n\n self.ENV_ARGS = dict(\n width=self.CAMERA_WIDTH,\n height=self.CAMERA_HEIGHT,\n continuousMode=True,\n applyActionNoise=self.STOCHASTIC,\n agentType=\"stochastic\",\n rotateStepDegrees=self.ROTATION_DEGREES,\n gridSize=self.STEP_SIZE,\n snapToGrid=False,\n agentMode=\"locobot\",\n fieldOfView=horizontal_to_vertical_fov(\n horizontal_fov_in_degrees=self.HORIZONTAL_FIELD_OF_VIEW,\n width=self.CAMERA_WIDTH,\n height=self.CAMERA_HEIGHT,\n ),\n include_private_scenes=False,\n renderDepthImage=False,\n )\n\n @classmethod\n def tag(cls):\n return \"Pointnav-RoboTHOR-Vanilla-RGB-ResNet-Rot-Pred-DDPPO\"\n\n def monkey_patch_datasets(self, train_dataset, val_dataset, test_dataset):\n if train_dataset is not None:\n self.TRAIN_DATASET_DIR = os.path.join(os.getcwd(), train_dataset)\n else:\n self.TRAIN_DATASET_DIR = os.path.join(\n os.getcwd(), \"datasets/robothor-pointnav/train\"\n )\n\n if val_dataset is not None:\n self.VAL_DATASET_DIR = os.path.join(os.getcwd(), val_dataset)\n else:\n self.VAL_DATASET_DIR = os.path.join(\n os.getcwd(), \"datasets/robothor-pointnav/robustnav_eval\"\n )\n\n if test_dataset is not None:\n self.TEST_DATASET_DIR = os.path.join(os.getcwd(), test_dataset)\n else:\n self.TEST_DATASET_DIR = os.path.join(\n os.getcwd(), \"datasets/robothor-pointnav/robustnav_eval\"\n )\n\n def monkey_patch_sensor(\n self,\n corruptions=None,\n severities=None,\n random_crop=False,\n color_jitter=False,\n random_shift=False,\n ):\n self.SENSORS = [\n RGBSensorThor(\n height=self.SCREEN_SIZE,\n width=self.SCREEN_SIZE,\n use_resnet_normalization=True,\n uuid=\"rgb_lowres\",\n corruptions=corruptions,\n severities=severities,\n random_crop=random_crop,\n random_translate=random_shift,\n crop_height=self.CROP_HEIGHT,\n crop_width=self.CROP_WIDTH,\n color_jitter=color_jitter,\n # rotate=True,\n ),\n GPSCompassSensorRoboThor(),\n RotationSensor(uuid=\"rot_label\"),\n ]\n\n # DD-PPO Base\n def training_pipeline(self, **kwargs):\n ppo_steps = int(75000000)\n lr = 3e-4\n num_mini_batch = 1\n update_repeats = 4\n num_steps = 128\n save_interval = 5000000\n log_interval = 10000 if torch.cuda.is_available() else 1\n gamma = 0.99\n use_gae = True\n gae_lambda = 0.95\n max_grad_norm = 0.5\n return TrainingPipeline(\n save_interval=save_interval,\n metric_accumulate_interval=log_interval,\n optimizer_builder=Builder(optim.Adam, dict(lr=lr)),\n num_mini_batch=num_mini_batch,\n update_repeats=update_repeats,\n max_grad_norm=max_grad_norm,\n num_steps=num_steps,\n named_losses={\n \"ppo_loss\": PPO(**PPOConfig),\n \"rotation_pred_loss\": RotationPred(**RotPredConfig),\n },\n gamma=gamma,\n use_gae=use_gae,\n gae_lambda=gae_lambda,\n advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,\n pipeline_stages=[\n PipelineStage(\n loss_names=[\"ppo_loss\", \"rotation_pred_loss\"],\n max_stage_steps=ppo_steps,\n loss_weights=[1.0, 0.01],\n )\n ],\n lr_scheduler_builder=Builder(\n LambdaLR, {\"lr_lambda\": LinearDecay(steps=ppo_steps)}\n ),\n )\n\n # Model base requirements\n @classmethod\n def create_model(cls, **kwargs) -> nn.Module:\n rgb_uuid = \"rgb_resnet\"\n goal_sensor_uuid = \"target_coordinates_ind\"\n\n return ResnetTensorAuxPointNavActorCritic(\n action_space=gym.spaces.Discrete(len(PointNavTask.class_action_names())),\n observation_space=kwargs[\"sensor_preprocessor_graph\"].observation_spaces,\n goal_sensor_uuid=goal_sensor_uuid,\n rgb_resnet_preprocessor_uuid=rgb_uuid,\n hidden_size=512,\n goal_dims=32,\n aux_mode=True,\n rot_mode=True,\n )\n\n def machine_params(self, mode=\"train\", **kwargs):\n sampler_devices: Sequence[int] = []\n if mode == \"train\":\n workers_per_device = 1\n gpu_ids = (\n []\n if not torch.cuda.is_available()\n else self.TRAIN_GPU_IDS * workers_per_device\n )\n nprocesses = (\n 1\n if not torch.cuda.is_available()\n else evenly_distribute_count_into_bins(self.NUM_PROCESSES, len(gpu_ids))\n )\n sampler_devices = self.TRAIN_GPU_IDS\n elif mode == \"valid\":\n nprocesses = 1 if torch.cuda.is_available() else 0\n gpu_ids = [] if not torch.cuda.is_available() else self.VALID_GPU_IDS\n elif mode == \"test\":\n nprocesses = 15\n gpu_ids = [] if not torch.cuda.is_available() else self.TEST_GPU_IDS\n else:\n raise NotImplementedError(\"mode must be 'train', 'valid', or 'test'.\")\n\n sensor_preprocessor_graph = (\n SensorPreprocessorGraph(\n source_observation_spaces=SensorSuite(self.SENSORS).observation_spaces,\n preprocessors=self.PREPROCESSORS,\n )\n if mode == \"train\"\n or (\n (isinstance(nprocesses, int) and nprocesses > 0)\n or (isinstance(nprocesses, Sequence) and sum(nprocesses) > 0)\n )\n else None\n )\n\n return MachineParams(\n nprocesses=nprocesses,\n devices=gpu_ids,\n sampler_devices=sampler_devices\n if mode == \"train\"\n else gpu_ids, # ignored with > 1 gpu_ids\n sensor_preprocessor_graph=sensor_preprocessor_graph,\n )\n\n @classmethod\n def make_sampler_fn(cls, **kwargs) -> TaskSampler:\n return PointNavDatasetTaskSampler(**kwargs)\n\n @staticmethod\n def _partition_inds(n: int, num_parts: int):\n return np.round(np.linspace(0, n, num_parts + 1, endpoint=True)).astype(\n np.int32\n )\n\n def _get_sampler_args_for_scene_split(\n self,\n scenes_dir: str,\n process_ind: int,\n total_processes: int,\n devices: Optional[List[int]],\n seeds: Optional[List[int]],\n deterministic_cudnn: bool,\n include_expert_sensor: bool = True,\n ) -> Dict[str, Any]:\n path = os.path.join(scenes_dir, \"*.json.gz\")\n scenes = [scene.split(\"/\")[-1].split(\".\")[0] for scene in glob.glob(path)]\n if len(scenes) == 0:\n raise RuntimeError(\n (\n \"Could find no scene dataset information in directory {}.\"\n \" Are you sure you've downloaded them? \"\n \" If not, see https://allenact.org/installation/download-datasets/ information\"\n \" on how this can be done.\"\n ).format(scenes_dir)\n )\n\n oversample_warning = (\n f\"Warning: oversampling some of the scenes ({scenes}) to feed all processes ({total_processes}).\"\n \" You can avoid this by setting a number of workers divisible by the number of scenes\"\n )\n if total_processes > len(scenes): # oversample some scenes -> bias\n if total_processes % len(scenes) != 0:\n get_logger().warning(oversample_warning)\n scenes = scenes * int(ceil(total_processes / len(scenes)))\n scenes = scenes[: total_processes * (len(scenes) // total_processes)]\n elif len(scenes) % total_processes != 0:\n get_logger().warning(oversample_warning)\n\n inds = self._partition_inds(len(scenes), total_processes)\n\n return {\n \"scenes\": scenes[inds[process_ind] : inds[process_ind + 1]],\n \"max_steps\": self.MAX_STEPS,\n \"sensors\": [\n s\n for s in self.SENSORS\n if (include_expert_sensor or not isinstance(s, ExpertActionSensor))\n ],\n \"action_space\": gym.spaces.Discrete(len(PointNavTask.class_action_names())),\n \"seed\": seeds[process_ind] if seeds is not None else None,\n \"deterministic_cudnn\": deterministic_cudnn,\n \"rewards_config\": self.REWARD_CONFIG,\n \"env_args\": {\n **self.ENV_ARGS,\n \"x_display\": (\n f\"0.{devices[process_ind % len(devices)]}\"\n if devices is not None\n and len(devices) > 0\n and devices[process_ind % len(devices)] >= 0\n else None\n ),\n },\n }\n\n def train_task_sampler_args(\n self,\n process_ind: int,\n total_processes: int,\n devices: Optional[List[int]] = None,\n seeds: Optional[List[int]] = None,\n deterministic_cudnn: bool = False,\n ) -> Dict[str, Any]:\n res = self._get_sampler_args_for_scene_split(\n os.path.join(self.TRAIN_DATASET_DIR, \"episodes\"),\n process_ind,\n total_processes,\n devices=devices,\n seeds=seeds,\n deterministic_cudnn=deterministic_cudnn,\n )\n res[\"scene_directory\"] = self.TRAIN_DATASET_DIR\n res[\"loop_dataset\"] = True\n res[\"allow_flipping\"] = True\n return res\n\n def valid_task_sampler_args(\n self,\n process_ind: int,\n total_processes: int,\n devices: Optional[List[int]] = None,\n seeds: Optional[List[int]] = None,\n deterministic_cudnn: bool = False,\n ) -> Dict[str, Any]:\n res = self._get_sampler_args_for_scene_split(\n os.path.join(self.VAL_DATASET_DIR, \"episodes\"),\n process_ind,\n total_processes,\n devices=devices,\n seeds=seeds,\n deterministic_cudnn=deterministic_cudnn,\n include_expert_sensor=False,\n )\n res[\"scene_directory\"] = self.VAL_DATASET_DIR\n res[\"loop_dataset\"] = False\n return res\n\n def test_task_sampler_args(\n self,\n process_ind: int,\n total_processes: int,\n devices: Optional[List[int]] = None,\n seeds: Optional[List[int]] = None,\n deterministic_cudnn: bool = False,\n ) -> Dict[str, Any]:\n res = self._get_sampler_args_for_scene_split(\n scenes_dir=os.path.join(self.TEST_DATASET_DIR, \"episodes\"),\n process_ind=process_ind,\n total_processes=total_processes,\n devices=devices,\n seeds=seeds,\n deterministic_cudnn=deterministic_cudnn,\n include_expert_sensor=False,\n )\n res[\"scene_directory\"] = self.TEST_DATASET_DIR\n res[\"loop_dataset\"] = False\n return res\n"
] | [
[
"scipy.ndimage.interpolation.map_coordinates",
"numpy.max",
"numpy.zeros_like",
"numpy.mean",
"numpy.where",
"numpy.roll",
"numpy.random.randint",
"numpy.ones_like",
"numpy.clip",
"numpy.reshape",
"numpy.arange",
"scipy.ndimage.zoom",
"numpy.random.poisson",
"numpy.float32",
"numpy.zeros",
"numpy.rot90",
"numpy.random.rand",
"numpy.meshgrid",
"numpy.array",
"numpy.sum",
"numpy.ones",
"numpy.random.normal",
"numpy.random.uniform",
"numpy.empty"
],
[
"torch.cuda.device_count",
"torch.cuda.is_available",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.10",
"1.3",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
simondlevy/TinyNEF | [
"2e42754cf22996c86f1e35780d77591ec2bbb658"
] | [
"gym/pendulum_test.py"
] | [
"#!/usr/bin/env python3\n'''\nUse the Neural Engineering framework to solve Pendulum via an elitist GA\n\nCopyright (C) 2020 Simon D. Levy\n\nMIT License\n'''\n\nfrom lib import NefGym\nfrom sys import argv\nimport pickle\nimport numpy as np\n\nfrom sueap.algorithms.elitist import Elitist\n\nclass NefPendulum(NefGym):\n\n def __init__(self, neurons=20, seed=None):\n\n NefGym.__init__(self, 'Pendulum-v0', neurons, seed)\n\n def activate(self, x):\n\n return np.clip(x, -2, +2)\n\nif __name__ == '__main__':\n\n if len(argv) < 2:\n print('Usage: python3 %s FILE' % argv[0])\n exit(0)\n \n problem = NefPendulum()\n net = pickle.load(open(argv[1], 'rb'))\n print('Got reward %.3f in %d steps' % problem.test(net))\n"
] | [
[
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
astrax/astro2019 | [
"c1f5309415c80fbd986d6760bcb8bc095898beda"
] | [
"docs/.src/programs/skyplot_proj/skyplotv1.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\n\nimport astropy\nfrom scipy.spatial import cKDTree\n\nimport numpy as np\nimport matplotlib.pyplot as plt\ndata=np.genfromtxt('ybs.degbv',names=True)\nmessier=np.genfromtxt('Messierdec.txt',names=True)\n\nvlim=4.5\nmagscale=10\nstarsize=magscale*(vlim-data['v'])\n#norm = ((-data['v'])-( (-data['v'])).min())/(data['v'].max()-data['v'].min())\n#starsize=vlim+norm*starsize\n\nimport astropy\n\nfrom astropy import units as u\nfrom astropy.time import Time\nfrom astropy.coordinates import SkyCoord, EarthLocation, AltAz\n\nstarcoords=SkyCoord(ra=data['ra']*u.degree,dec=data['dec']*u.degree)\nmcoords=SkyCoord(ra=messier['Mra']*15.*u.degree,dec=messier['Mdec']*u.degree)\n\nCT=EarthLocation(lat=-30.159*u.deg,lon=-70.809*u.deg,height=2207.*u.m)\nKP=EarthLocation(lat=31.98*u.deg,lon=-111.60*u.deg,height=2097.*u.m)\nRM=EarthLocation(lat=28.7569*u.deg,lon=-17.8925*u.deg,height=2267.*u.m)\nsitecodes=['CT','KP','RM']\nsitenames=['Cerro Tololo','Kitt Peak', 'La Palma']\n\n\nfor site in range(0,2):\n if site==0:\n obsloc=CT\n if site==1:\n obsloc=KP\n utcoffset=-5.0*u.hour\n showtime = Time('2015-7-21 22:00:00') - utcoffset\n showtime=Time.now()\n print(showtime.iso)\n staraltaz=starcoords.transform_to(AltAz(obstime=showtime,location=obsloc))\n az2plot=np.pi/2.+np.array((3.1415926/180.)*u.degree*staraltaz.az)\n zd2plot=np.array(90.*u.degree-staraltaz.alt)\n #pos4kd=np.array([[az2plot],[zd2plot]])\n upind=(zd2plot < 90.).nonzero()\n plt.clf()\n plt.figure(site+1)\n ax=plt.subplot(111,polar=True)\n ax.grid(False)\n ax.set_xticklabels(['W', '', 'N', '', 'E', '', 'S', ''])\n \n #plt.fill_between([0,90],[0,0],[360,360],facecolor='0')\n plt.scatter(az2plot[upind],zd2plot[upind],s=starsize[upind],c=data['bv'][upind],cmap='rainbow',linewidth=0,vmax=1.2,vmin=-0.5)\n plt.ylim([0.,90.])\n cb=plt.colorbar(pad=0.10)\n cb.set_label('Star color, B-V')\n #plt.tick_params(axis='x',labelbottom='off')\n plt.tick_params(axis='y',labelleft='off')\n ax.set_xticklabels(['W', '', 'N', '', 'E', '', 'S', ''])\n # add parallels of declination every 30 degrees\n for jdec in range(5):\n pardeg=60.-30.*jdec\n parra=np.array(range(361))\n skpar=SkyCoord(ra=parra*u.degree,dec=pardeg*u.degree)\n paraltaz=skpar.transform_to(AltAz(obstime=showtime,location=obsloc))\n paraz2plot=np.pi/2.+np.array((3.14159265/180.)*u.degree*paraltaz.az)\n parzd2plot=np.array(90.*u.degree-paraltaz.alt)\n plt.plot(paraz2plot,parzd2plot,linewidth=1,color='gray',linestyle=':')\n \n # plot Messier objects\n maltaz=mcoords.transform_to(AltAz(obstime=showtime,location=obsloc))\n maz2plot=np.pi/2.+np.array((3.1415926/180.)*u.degree*maltaz.az)\n mzd2plot=np.array(90.*u.degree-maltaz.alt)\n upm=(mzd2plot < 90.).nonzero()\n \n #plt.scatter(maz2plot[upm],mzd2plot[upm],s=100,c=messier['Mclass'][upm],cmap='rainbow',alpha=0.4,linewidth=0)\n plt.title(str(sitenames[site])+' '+showtime.iso+' UT\\n')\n labelcolors=np.array(['blue','blue','green','orange','red'])\n mlabels=np.array(['{0}'.format(i+1) for i in range(110)])\n for j in range(110):\n plt.annotate(mlabels[j],xy=(maz2plot[j],mzd2plot[j]),xytext=(0,0),textcoords='offset points',color=labelcolors[messier['Mclass'][j]],size='small')\n #add Magellanic clouds\n sklmc=SkyCoord(ra=15.0*5.25*u.degree,dec=-68.7*u.degree)\n sksmc=SkyCoord(ra=0.77*15.0*u.degree,dec=-73.0*u.degree)\n lmcaltaz=sklmc.transform_to(AltAz(obstime=showtime,location=obsloc))\n smcaltaz=sksmc.transform_to(AltAz(obstime=showtime,location=obsloc))\n plt.scatter(np.pi/2.+np.array((3.1415926/180.)*u.degree*lmcaltaz.az),90.*u.degree-lmcaltaz.alt,s=250,c='green',alpha=0.3)\n plt.scatter(np.pi/2.+np.array((3.1415926/180.)*u.degree*smcaltaz.az),90.*u.degree-smcaltaz.alt,s=120,c='green',alpha=0.3)\n \n #add constellation lines\n conlines=np.genfromtxt('constellations.txt',names=\"star1, star2\")\n nstar1=np.array(conlines['star1'])\n nstar2=np.array(conlines['star2'])\n nstars=nstar1.size\n starnumbers=np.array(data['starnum'])\n for jstar in range(nstars):\n indexstar1=np.where(starnumbers==nstar1[jstar])[0]\n indexstar2=np.where(data['starnum']==nstar2[jstar])[0]\n plotx=np.array((az2plot[indexstar1],az2plot[indexstar2]))\n ploty=np.array((zd2plot[indexstar1],zd2plot[indexstar2]))\n plt.plot(plotx,ploty,linewidth=1,color='black',zorder=0)\n \n plt.annotate('Messier Objects:',xy=(0.04,0.18),xycoords='figure fraction')\n plt.annotate('Nebula',xy=(0.05,0.145),xycoords='figure fraction',color='blue')\n plt.annotate('Galaxy',xy=(0.05,0.11),xycoords='figure fraction',color='green')\n plt.annotate('Open cluster',xy=(0.05,0.075),xycoords='figure fraction',color='orange')\n plt.annotate('Globular cluster',xy=(0.05,0.04),xycoords='figure fraction',color='red')\n plt.show()\n if site==0:\n plt.savefig('SkyplotCTIO.png')\n if site==1:\n plt.savefig('SkyplotKPNO.png')\n\n\n \n \n"
] | [
[
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.savefig",
"numpy.genfromtxt",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.clf",
"numpy.array",
"matplotlib.pyplot.tick_params",
"numpy.where",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ClementRolinat/stable-baselines | [
"333c59379f23e1f5c5c9e8bf93cbfa56ac52d13b"
] | [
"stable_baselines/a2c/a2c.py"
] | [
"import time\nfrom collections import deque\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\n\nfrom stable_baselines import logger\nfrom stable_baselines.common import explained_variance, tf_util, ActorCriticRLModel, SetVerbosity, TensorboardWriter\nfrom stable_baselines.common.policies import ActorCriticPolicy, RecurrentActorCriticPolicy\nfrom stable_baselines.common.runners import AbstractEnvRunner\nfrom stable_baselines.a2c.utils import discount_with_dones, Scheduler, find_trainable_variables, mse, \\\n total_episode_reward_logger\nfrom stable_baselines.ppo2.ppo2 import safe_mean\n\nclass A2C(ActorCriticRLModel):\n \"\"\"\n The A2C (Advantage Actor Critic) model class, https://arxiv.org/abs/1602.01783\n\n :param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)\n :param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)\n :param gamma: (float) Discount factor\n :param n_steps: (int) The number of steps to run for each environment per update\n (i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)\n :param vf_coef: (float) Value function coefficient for the loss calculation\n :param ent_coef: (float) Entropy coefficient for the loss caculation\n :param max_grad_norm: (float) The maximum value for the gradient clipping\n :param learning_rate: (float) The learning rate\n :param alpha: (float) RMSProp decay parameter (default: 0.99)\n :param epsilon: (float) RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update)\n (default: 1e-5)\n :param lr_schedule: (str) The type of scheduler for the learning rate update ('linear', 'constant',\n 'double_linear_con', 'middle_drop' or 'double_middle_drop')\n :param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug\n :param tensorboard_log: (str) the log location for tensorboard (if None, no logging)\n :param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance\n (used only for loading)\n :param policy_kwargs: (dict) additional arguments to be passed to the policy on creation\n :param full_tensorboard_log: (bool) enable additional logging when using tensorboard\n WARNING: this logging can take a lot of space quickly\n \"\"\"\n\n def __init__(self, policy, env, gamma=0.99, n_steps=5, vf_coef=0.25, ent_coef=0.01, max_grad_norm=0.5,\n learning_rate=7e-4, alpha=0.99, epsilon=1e-5, lr_schedule='constant', verbose=0, tensorboard_log=None,\n _init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False):\n\n super(A2C, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=True,\n _init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs)\n\n self.n_steps = n_steps\n self.gamma = gamma\n self.vf_coef = vf_coef\n self.ent_coef = ent_coef\n self.max_grad_norm = max_grad_norm\n self.alpha = alpha\n self.epsilon = epsilon\n self.lr_schedule = lr_schedule\n self.learning_rate = learning_rate\n self.tensorboard_log = tensorboard_log\n self.full_tensorboard_log = full_tensorboard_log\n\n self.graph = None\n self.sess = None\n self.learning_rate_ph = None\n self.n_batch = None\n self.actions_ph = None\n self.advs_ph = None\n self.rewards_ph = None\n self.pg_loss = None\n self.vf_loss = None\n self.entropy = None\n self.params = None\n self.apply_backprop = None\n self.train_model = None\n self.step_model = None\n self.step = None\n self.proba_step = None\n self.value = None\n self.initial_state = None\n self.learning_rate_schedule = None\n self.summary = None\n self.episode_reward = None\n\n # if we are loading, it is possible the environment is not known, however the obs and action space are known\n if _init_setup_model:\n self.setup_model()\n\n def _get_pretrain_placeholders(self):\n policy = self.train_model\n if isinstance(self.action_space, gym.spaces.Discrete):\n return policy.obs_ph, self.actions_ph, policy.policy\n return policy.obs_ph, self.actions_ph, policy.deterministic_action\n\n def setup_model(self):\n with SetVerbosity(self.verbose):\n\n assert issubclass(self.policy, ActorCriticPolicy), \"Error: the input policy for the A2C model must be an \" \\\n \"instance of common.policies.ActorCriticPolicy.\"\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n self.sess = tf_util.make_session(graph=self.graph)\n\n self.n_batch = self.n_envs * self.n_steps\n\n n_batch_step = None\n n_batch_train = None\n if issubclass(self.policy, RecurrentActorCriticPolicy):\n n_batch_step = self.n_envs\n n_batch_train = self.n_envs * self.n_steps\n\n step_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,\n n_batch_step, reuse=False, **self.policy_kwargs)\n\n with tf.variable_scope(\"train_model\", reuse=True,\n custom_getter=tf_util.outer_scope_getter(\"train_model\")):\n train_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs,\n self.n_steps, n_batch_train, reuse=True, **self.policy_kwargs)\n\n with tf.variable_scope(\"loss\", reuse=False):\n self.actions_ph = train_model.pdtype.sample_placeholder([None], name=\"action_ph\")\n self.advs_ph = tf.placeholder(tf.float32, [None], name=\"advs_ph\")\n self.rewards_ph = tf.placeholder(tf.float32, [None], name=\"rewards_ph\")\n self.learning_rate_ph = tf.placeholder(tf.float32, [], name=\"learning_rate_ph\")\n\n neglogpac = train_model.proba_distribution.neglogp(self.actions_ph)\n self.entropy = tf.reduce_mean(train_model.proba_distribution.entropy())\n self.pg_loss = tf.reduce_mean(self.advs_ph * neglogpac)\n self.vf_loss = mse(tf.squeeze(train_model.value_flat), self.rewards_ph)\n # https://arxiv.org/pdf/1708.04782.pdf#page=9, https://arxiv.org/pdf/1602.01783.pdf#page=4\n # and https://github.com/dennybritz/reinforcement-learning/issues/34\n # suggest to add an entropy component in order to improve exploration.\n loss = self.pg_loss - self.entropy * self.ent_coef + self.vf_loss * self.vf_coef\n\n tf.summary.scalar('entropy_loss', self.entropy)\n tf.summary.scalar('policy_gradient_loss', self.pg_loss)\n tf.summary.scalar('value_function_loss', self.vf_loss)\n tf.summary.scalar('loss', loss)\n\n self.params = find_trainable_variables(\"model\")\n grads = tf.gradients(loss, self.params)\n if self.max_grad_norm is not None:\n grads, _ = tf.clip_by_global_norm(grads, self.max_grad_norm)\n grads = list(zip(grads, self.params))\n\n with tf.variable_scope(\"input_info\", reuse=False):\n tf.summary.scalar('discounted_rewards', tf.reduce_mean(self.rewards_ph))\n tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate))\n tf.summary.scalar('advantage', tf.reduce_mean(self.advs_ph))\n if self.full_tensorboard_log:\n tf.summary.histogram('discounted_rewards', self.rewards_ph)\n tf.summary.histogram('learning_rate', self.learning_rate)\n tf.summary.histogram('advantage', self.advs_ph)\n if tf_util.is_image(self.observation_space):\n tf.summary.image('observation', train_model.obs_ph)\n else:\n tf.summary.histogram('observation', train_model.obs_ph)\n\n trainer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate_ph, decay=self.alpha,\n epsilon=self.epsilon)\n self.apply_backprop = trainer.apply_gradients(grads)\n\n self.train_model = train_model\n self.step_model = step_model\n self.step = step_model.step\n self.proba_step = step_model.proba_step\n self.value = step_model.value\n self.initial_state = step_model.initial_state\n tf.global_variables_initializer().run(session=self.sess)\n\n self.summary = tf.summary.merge_all()\n\n def _train_step(self, obs, states, rewards, masks, actions, values, update, writer=None):\n \"\"\"\n applies a training step to the model\n\n :param obs: ([float]) The input observations\n :param states: ([float]) The states (used for recurrent policies)\n :param rewards: ([float]) The rewards from the environment\n :param masks: ([bool]) Whether or not the episode is over (used for recurrent policies)\n :param actions: ([float]) The actions taken\n :param values: ([float]) The logits values\n :param update: (int) the current step iteration\n :param writer: (TensorFlow Summary.writer) the writer for tensorboard\n :return: (float, float, float) policy loss, value loss, policy entropy\n \"\"\"\n advs = rewards - values\n cur_lr = None\n for _ in range(len(obs)):\n cur_lr = self.learning_rate_schedule.value()\n assert cur_lr is not None, \"Error: the observation input array cannon be empty\"\n\n td_map = {self.train_model.obs_ph: obs, self.actions_ph: actions, self.advs_ph: advs,\n self.rewards_ph: rewards, self.learning_rate_ph: cur_lr}\n if states is not None:\n td_map[self.train_model.states_ph] = states\n td_map[self.train_model.dones_ph] = masks\n\n if writer is not None:\n # run loss backprop with summary, but once every 10 runs save the metadata (memory, compute time, ...)\n if self.full_tensorboard_log and (1 + update) % 10 == 0:\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n summary, policy_loss, value_loss, policy_entropy, _ = self.sess.run(\n [self.summary, self.pg_loss, self.vf_loss, self.entropy, self.apply_backprop],\n td_map, options=run_options, run_metadata=run_metadata)\n writer.add_run_metadata(run_metadata, 'step%d' % (update * (self.n_batch + 1)))\n else:\n summary, policy_loss, value_loss, policy_entropy, _ = self.sess.run(\n [self.summary, self.pg_loss, self.vf_loss, self.entropy, self.apply_backprop], td_map)\n writer.add_summary(summary, update * (self.n_batch + 1))\n\n else:\n policy_loss, value_loss, policy_entropy, _ = self.sess.run(\n [self.pg_loss, self.vf_loss, self.entropy, self.apply_backprop], td_map)\n\n return policy_loss, value_loss, policy_entropy\n\n def learn(self, total_timesteps, callback=None, seed=None, log_interval=100, tb_log_name=\"A2C\",\n reset_num_timesteps=True):\n\n new_tb_log = self._init_num_timesteps(reset_num_timesteps)\n\n with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \\\n as writer:\n self._setup_learn(seed)\n\n self.learning_rate_schedule = Scheduler(initial_value=self.learning_rate, n_values=total_timesteps,\n schedule=self.lr_schedule)\n\n runner = A2CRunner(self.env, self, n_steps=self.n_steps, gamma=self.gamma)\n self.episode_reward = np.zeros((self.n_envs,))\n # Training stats (when using Monitor wrapper)\n ep_info_buf = deque(maxlen=100)\n\n t_start = time.time()\n for update in range(1, total_timesteps // self.n_batch + 1):\n # true_reward is the reward without discount\n obs, states, rewards, masks, actions, values, ep_infos, true_reward = runner.run()\n ep_info_buf.extend(ep_infos)\n _, value_loss, policy_entropy = self._train_step(obs, states, rewards, masks, actions, values,\n self.num_timesteps // (self.n_batch + 1), writer)\n n_seconds = time.time() - t_start\n fps = int((update * self.n_batch) / n_seconds)\n\n if writer is not None:\n self.episode_reward = total_episode_reward_logger(self.episode_reward,\n true_reward.reshape((self.n_envs, self.n_steps)),\n masks.reshape((self.n_envs, self.n_steps)),\n writer, self.num_timesteps)\n\n self.num_timesteps += self.n_batch + 1\n\n if callback is not None:\n # Only stop training if return value is False, not when it is None. This is for backwards\n # compatibility with callbacks that have no return statement.\n if callback(locals(), globals()) is False:\n break\n\n if self.verbose >= 1 and (update % log_interval == 0 or update == 1):\n explained_var = explained_variance(values, rewards)\n logger.record_tabular(\"nupdates\", update)\n logger.record_tabular(\"total_timesteps\", self.num_timesteps)\n logger.record_tabular(\"fps\", fps)\n logger.record_tabular(\"policy_entropy\", float(policy_entropy))\n logger.record_tabular(\"value_loss\", float(value_loss))\n logger.record_tabular(\"explained_variance\", float(explained_var))\n if len(ep_info_buf) > 0 and len(ep_info_buf[0]) > 0:\n logger.logkv('ep_reward_mean', safe_mean([ep_info['r'] for ep_info in ep_info_buf]))\n logger.logkv('ep_len_mean', safe_mean([ep_info['l'] for ep_info in ep_info_buf]))\n logger.dump_tabular()\n\n return self\n\n def save(self, save_path):\n data = {\n \"gamma\": self.gamma,\n \"n_steps\": self.n_steps,\n \"vf_coef\": self.vf_coef,\n \"ent_coef\": self.ent_coef,\n \"max_grad_norm\": self.max_grad_norm,\n \"learning_rate\": self.learning_rate,\n \"alpha\": self.alpha,\n \"epsilon\": self.epsilon,\n \"lr_schedule\": self.lr_schedule,\n \"verbose\": self.verbose,\n \"policy\": self.policy,\n \"observation_space\": self.observation_space,\n \"action_space\": self.action_space,\n \"n_envs\": self.n_envs,\n \"_vectorize_action\": self._vectorize_action,\n \"policy_kwargs\": self.policy_kwargs\n }\n\n params = self.sess.run(self.params)\n\n self._save_to_file(save_path, data=data, params=params)\n\n\nclass A2CRunner(AbstractEnvRunner):\n def __init__(self, env, model, n_steps=5, gamma=0.99):\n \"\"\"\n A runner to learn the policy of an environment for an a2c model\n\n :param env: (Gym environment) The environment to learn from\n :param model: (Model) The model to learn\n :param n_steps: (int) The number of steps to run for each environment\n :param gamma: (float) Discount factor\n \"\"\"\n super(A2CRunner, self).__init__(env=env, model=model, n_steps=n_steps)\n self.gamma = gamma\n\n def run(self):\n \"\"\"\n Run a learning step of the model\n\n :return: ([float], [float], [float], [bool], [float], [float])\n observations, states, rewards, masks, actions, values\n \"\"\"\n mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [], [], [], [], []\n mb_states = self.states\n ep_infos = []\n for _ in range(self.n_steps):\n actions, values, states, _ = self.model.step(self.obs, self.states, self.dones)\n mb_obs.append(np.copy(self.obs))\n mb_actions.append(actions)\n mb_values.append(values)\n mb_dones.append(self.dones)\n clipped_actions = actions\n # Clip the actions to avoid out of bound error\n if isinstance(self.env.action_space, gym.spaces.Box):\n clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)\n obs, rewards, dones, infos = self.env.step(clipped_actions)\n for info in infos:\n maybe_ep_info = info.get('episode')\n if maybe_ep_info is not None:\n ep_infos.append(maybe_ep_info)\n\n self.states = states\n self.dones = dones\n self.obs = obs\n mb_rewards.append(rewards)\n mb_dones.append(self.dones)\n # batch of steps to batch of rollouts\n mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype).swapaxes(1, 0).reshape(self.batch_ob_shape)\n mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(0, 1)\n mb_actions = np.asarray(mb_actions, dtype=self.env.action_space.dtype).swapaxes(0, 1)\n mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(0, 1)\n mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(0, 1)\n mb_masks = mb_dones[:, :-1]\n mb_dones = mb_dones[:, 1:]\n true_rewards = np.copy(mb_rewards)\n last_values = self.model.value(self.obs, self.states, self.dones).tolist()\n # discount/bootstrap off value fn\n for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):\n rewards = rewards.tolist()\n dones = dones.tolist()\n if dones[-1] == 0:\n rewards = discount_with_dones(rewards + [value], dones + [0], self.gamma)[:-1]\n else:\n rewards = discount_with_dones(rewards, dones, self.gamma)\n mb_rewards[n] = rewards\n\n # convert from [n_env, n_steps, ...] to [n_steps * n_env, ...]\n mb_rewards = mb_rewards.reshape(-1, *mb_rewards.shape[2:])\n mb_actions = mb_actions.reshape(-1, *mb_actions.shape[2:])\n mb_values = mb_values.reshape(-1, *mb_values.shape[2:])\n mb_masks = mb_masks.reshape(-1, *mb_masks.shape[2:])\n true_rewards = true_rewards.reshape(-1, *true_rewards.shape[2:])\n return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values, ep_infos, true_rewards\n"
] | [
[
"numpy.asarray",
"tensorflow.RunMetadata",
"tensorflow.summary.scalar",
"tensorflow.Graph",
"numpy.clip",
"tensorflow.summary.image",
"tensorflow.gradients",
"tensorflow.squeeze",
"numpy.copy",
"numpy.zeros",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.RunOptions",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"tensorflow.summary.histogram",
"tensorflow.reduce_mean",
"tensorflow.clip_by_global_norm",
"tensorflow.variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
lsst-sqre/qa-dashboard | [
"57d40a33f1d6fdc04fb8f5e6e7e4fcfaee25340c"
] | [
"squash/dashboard/viz/api_helper.py"
] | [
"import os\nimport pandas as pd\nimport requests\nfrom datetime import datetime\nfrom furl import furl\n\nSQUASH_API_URL = os.environ.get('SQUASH_API_URL',\n 'http://localhost:8000/dashboard/api/')\n\n\ndef get_endpoint_urls():\n \"\"\"\n Lookup API endpoint URLs\n \"\"\"\n\n r = requests.get(SQUASH_API_URL)\n r.raise_for_status()\n\n return r.json()\n\n\ndef get_data(endpoint, params=None):\n \"\"\"Return data as a dict from\n an API endpoint \"\"\"\n\n api = get_endpoint_urls()\n\n # e.g. http://localhost:8000/AMx?ci_id=1&ci_dataset=cfht&metric=AM1\n r = requests.get(api[endpoint],\n params=params)\n r.raise_for_status()\n\n return r.json()\n\n\ndef get_data_as_pandas_df(endpoint, params=None):\n \"\"\"\n Return data as a pandas dataframe from\n an API endpoint\n \"\"\"\n\n result = get_data(endpoint, params)\n\n data = pd.DataFrame.from_dict(result, orient='index').transpose()\n\n return data\n\n\ndef get_datasets(default=None):\n \"\"\"Get a list of datasets from the API\n and a default value\n Returns\n -------\n datasets : list\n list of dataset names\n default : str\n if a valid default value is provided, overwrite\n the default value obtained from the API\n \"\"\"\n\n datasets = get_data('datasets')\n default_dataset = get_data('defaults')['ci_dataset']\n\n if default:\n if default in datasets:\n default_dataset = default\n\n return {'datasets': datasets, 'default': default_dataset}\n\n\ndef get_metrics(default=None):\n \"\"\"Get the list of metrics from the API\n and a default value\n Returns\n -------\n metrics : list\n list of metric names\n default : str\n if a valid default value is provided, overwrite\n the default value returned from the API\n \"\"\"\n\n r = get_data('metrics')\n metrics = [m['metric'] for m in r['results']]\n\n default_metric = get_data('defaults')['metric']\n\n if default:\n if default in metrics:\n default_metric = default\n\n return {'metrics': metrics, 'default': default_metric}\n\n\ndef get_value(specs, name):\n \"\"\" Helper function to unpack metric specification\n values\n Parameters\n ----------\n specs: dict\n a dict with keys value and name\n name: str\n the spec name\n Return\n ------\n value: float or None\n value of the spec if exists, None otherwise\n \"\"\"\n\n value = None\n\n for s in specs:\n if s['name'] == name:\n value = s['value']\n break\n\n return value\n\n\ndef get_specs(name):\n \"\"\"Get metric specifications thresholds\n from its name\n Parameters\n ----------\n name: str\n a valid metric name\n Returns\n -------\n unit: str\n metric unit\n description:\n metric description\n minimum: float\n metric minimum specification\n design: float\n metric design specification\n stretch: float\n metric stretch goal\n \"\"\"\n\n r = get_data('metrics')\n\n unit = str()\n description = str()\n specs = []\n\n minimum = None\n design = None\n stretch = None\n\n for m in r['results']:\n if m['metric'] == name:\n unit = m['unit']\n description = m['description']\n specs = eval(str(m['specs']))\n break\n\n if specs:\n minimum = get_value(specs, 'minimum')\n design = get_value(specs, 'design')\n stretch = get_value(specs, 'stretch')\n\n return {'unit': unit, 'description': description,\n 'minimum': minimum, 'design': design, 'stretch': stretch}\n\n\ndef get_url_args(doc, defaults=None):\n \"\"\"Return url args recovered from django_full_path cookie in\n the bokeh request header.\n\n If defaults values are provided, overwrite the default values\n obtained from the API\n \"\"\"\n\n args = get_data('defaults')\n\n # overwrite api default values\n if defaults:\n for key in defaults:\n args[key] = defaults[key]\n\n r = doc().session_context.request\n if r:\n if 'django_full_path' in r.cookies:\n django_full_path = r.cookies['django_full_path'].value\n tmp = furl(django_full_path).args\n for key in tmp:\n # overwrite default values with those passed\n # as url args, make sure the url arg (key) is valid\n if key in args:\n args[key] = tmp[key]\n\n # the bokeh app name is the second segment of the url path\n args['bokeh_app'] = furl(django_full_path).path.segments[1]\n\n return args\n\n\n# TODO: these functions are used by the monitor app and need refactoring\ndef get_initial_page(page_size, num_pages, window):\n\n # Page size in hours assuming CI_TIME_INTERVAL\n\n CI_TIME_INTERVAL = 8\n\n page_window = page_size * CI_TIME_INTERVAL\n\n if window == 'weeks':\n initial_page = num_pages - int((24*7)/page_window)\n elif window == 'months':\n # maximum window of 3 months\n initial_page = num_pages - int((24*30*3)/page_window)\n elif window == 'years':\n # maximum window of 1 year\n initial_page = num_pages - int((24*365)/page_window)\n else:\n # everything\n initial_page = 1\n\n # Make sure we have enough pages for the input time window\n if initial_page < 1:\n initial_page = 1\n\n return initial_page\n\n\ndef get_meas_by_dataset_and_metric(selected_dataset, selected_metric, window):\n \"\"\" Get measurements for a given dataset and metric from the measurements\n api endpoint\n\n Parameters\n ----------\n selected_dataset : str\n the current selected dataset\n selected_metric : str\n the current selected metric\n\n Returns\n -------\n ci_id : list\n list of job ids from the CI system\n dates : list\n list of datetimes for each job measurement\n measurements : list\n flat list of dicts where the key is the metric and the value\n is its measurement\n ci_url : list\n list of URLs for the jobs in the CI system\n \"\"\"\n api = get_endpoint_urls()\n\n # http://localhost:8000/dashboard/api/measurements/?job__ci_dataset=cfht&metric=AM1\n\n r = requests.get(api['measurements'],\n params={'job__ci_dataset': selected_dataset,\n 'metric': selected_metric})\n r.raise_for_status()\n\n results = r.json()\n\n # results are paginated, walk through each page\n\n # TODO: figure out how to retrieve the number of pages in DRF\n count = results['count']\n page_size = len(results['results'])\n\n measurements = []\n if page_size > 0:\n # ceiling integer\n num_pages = int(count/page_size) + (count % page_size > 0)\n\n initial_page = get_initial_page(page_size, num_pages, window)\n\n for page in range(initial_page, num_pages + 1):\n r = requests.get(\n api['measurements'],\n params={'job__ci_dataset': selected_dataset,\n 'metric': selected_metric,\n 'page': page})\n r.raise_for_status()\n measurements.extend(r.json()['results'])\n\n ci_ids = [int(m['ci_id']) for m in measurements]\n\n # 2016-08-10T05:22:37.700146Z\n # after DM-7517 jobs return is sorted by date and the same is done for\n # the measurements\n dates = [datetime.strptime(m['date'], '%Y-%m-%dT%H:%M:%S.%fZ')\n for m in measurements]\n\n values = [m['value'] for m in measurements]\n\n ci_urls = [m['ci_url'] for m in measurements]\n\n packages = [m['changed_packages'] for m in measurements]\n\n # list of package names, name is the first element in the tuple\n names = []\n for i, sublist in enumerate(packages):\n names.append([])\n for package in sublist:\n names[i].append(package[0])\n\n # list of git urls, git package commit sha and base url are the second and\n # third elements in the tuple\n git_urls = []\n for i, sublist in enumerate(packages):\n git_urls.append([])\n for package in sublist:\n git_urls[i].append(\"{}/commit/{}\".format(package[2].strip('.git'),\n package[1]))\n\n return {'ci_ids': ci_ids, 'dates': dates, 'values': values,\n 'ci_urls': ci_urls, 'names': names, 'git_urls': git_urls}\n"
] | [
[
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
srubenacker/DeepDog | [
"ce6613e01c04a14f62a2d6f6cd1c60f97efa790a"
] | [
"src/ddog.py"
] | [
"import util\nimport json\nimport numpy as np\nimport random\nimport tensorflow as tf\n\nclass DeepDog:\n \"\"\"\n The DeepDog class loads the training and test set images from\n disk into RAM, and provides functions to get the test set\n and mini batches of the training set. \n \"\"\"\n\n def __init__(self, imageWidth, imageHeight, trainingInRAM=False, classStratify=False,\n randomMirroring=False, randomCropping=None, normalizeImage=False):\n \"\"\"\n The constructor loads the one hot encodings and the entire test set into RAM.\n The training examples are stored on disk, and read into memory when needed\n for each batch. \n\n input:\n imageWidth: int, width of each image\n\n imageHeight: int, height of each image\n\n trainingInRAM: bool, whether or not to load the entire training set\n into RAM on initialization. This would be beneficial for smaller\n image sizes and decreases the time to fetch each batch.\n\n classStratify: bool, whether or not each batch should be equally \n represented by each breed class i.e. in a batch size of 120,\n each breed would show up once in the batch\n (not implemented yet)\n\n randomMirroring: bool, whether or not to randomly mirror individual \n training images returned by getNextMiniBatch()\n\n randomCropping: tuple, (cropWidth, cropHeight), cropWidth and cropHeight\n are the dimensions of the cropped image returned by\n getNextMiniBatch()\n\n normalizeImage: bool, whether or not to scale the images returned\n by getNextMiniBatch() and getTestImagesAndLabesl() to \n have 0 mean and unit standard deviation\n \"\"\"\n self.MIRROR_PROBABILITY = 0.5\n self.randomMirroring = randomMirroring\n self.randomCropping = randomCropping\n if self.randomCropping is not None:\n self.cropWidth = self.randomCropping[0]\n self.cropHeight = self.randomCropping[1]\n self.normalizeImage = normalizeImage\n\n self.image_width = imageWidth\n self.image_height = imageHeight\n self.training_in_RAM = trainingInRAM\n\n # load the one hot encodings from file\n self.one_hot_encodings = {}\n self.loadOneHotEncodings()\n self.numberBreeds = float(len(self.one_hot_encodings.keys()))\n\n # load the test set from file\n self.test_set_images, self.test_set_labels = [], []\n self.loadTestSet()\n\n # load the training annotations from file and randomize the \n # order of the training examples\n # self.training_examples is a list of 2-tuples\n # (breed, index in breed list of training_annotations)\n # self.training_set_images is a dictionary which is created\n # if trainingInRAM is set to True on construction\n # it is of the form {breed: [list of images in rgb form]}\n self.training_annotations = {}\n self.training_set_images = {}\n self.training_examples = []\n self.training_set_size = 0\n self.loadTrainingSet()\n\n # keep track of our place in the training examples list\n # so we can get the next mini batch\n self.current_index = 0\n\n\n ####################################################\n ################ Private Methods ###################\n ####################################################\n\n\n def loadOneHotEncodings(self):\n \"\"\"\n loadOneHotEncodings reads the one hot encodings for each\n breed and saves them to a member dictionary.\n\n input: none\n\n output: (doesn't return, saves to member variable)\n self.one_hot_encodings: dictionary, {'breed': [1, 0, 0]}\n \"\"\"\n with open('one_hot_encodings.json', 'r') as data_file:\n self.one_hot_encodings = json.load(data_file)\n\n\n def loadTrainingSet(self):\n \"\"\"\n loadTrainingSet reads the training_annotations.json\n into a member dictionary, and initializes the random\n order of the training_examples member list.\n\n input: none\n\n output: (doesn't return, saves to member variables)\n self.training_annotations: dictionary, {'breed': [list of annotations]}\n\n self.training_examples: list of 2-tuples\n [(breed, index into list of self.training_annotations), ...]\n \"\"\"\n print(\"Initializing training set order...\\n\")\n\n # load the training_annotations\n with open('training_annotations.json', 'r') as data_file:\n self.training_annotations = json.load(data_file)\n\n # create the list of 2-tuples of training examples (breed, index)\n for j, breed in enumerate(self.training_annotations.keys()):\n if self.training_in_RAM:\n print(str(round(j / self.numberBreeds * 100, 2)) + \"%: Loading training images for \" + breed)\n for i, annotation in enumerate(self.training_annotations[breed]):\n self.training_examples.append((breed, i))\n # if training_in_RAM is True, load the image from disk\n if self.training_in_RAM:\n currentImage = util.getResizedImageData(annotation, self.image_width, self.image_height)\n if breed not in self.training_set_images:\n self.training_set_images[breed] = [currentImage]\n else:\n self.training_set_images[breed].append(currentImage)\n\n self.training_set_size = len(self.training_examples)\n\n # randomize the order of the training examples\n random.shuffle(self.training_examples)\n\n print(\"Finished initializing training set order...\\n\")\n\n\n def loadTestSet(self):\n \"\"\"\n loadTestSet reads the test set images and labels from file\n and saves them into two lists in RAM. \n\n input: none\n\n output: (saves to member lists, doesn't return)\n testImages: numpy array [testSetSize x [imageWidth x imageHeight x 3]]\n\n testLabels: numpy array [testSetSize x [numImageClasses]] \n \"\"\"\n print(\"Loading test set...\\n\")\n\n testing_breeds = {}\n with open('testing_annotations.json', 'r') as data_file:\n testing_breeds = json.load(data_file)\n\n for i, breed in enumerate(testing_breeds.keys()):\n print(str(round(i / self.numberBreeds * 100, 2)) + \"%: Loading test images for \" + breed)\n \n for annotation in testing_breeds[breed]:\n # append the image data to testImages\n if self.randomCropping is None:\n self.test_set_images.append(util.getResizedImageData(annotation, \n self.image_width, self.image_height))\n else:\n self.test_set_images.append(util.getResizedImageData(annotation, \n self.cropWidth, self.cropHeight))\n\n # append the image label's one hot encoding to testLabels\n self.test_set_labels.append(self.one_hot_encodings[annotation['breed']])\n\n # convert python lists to numpy arrays\n self.test_set_images = np.array(self.test_set_images)\n if self.normalizeImage:\n print(\"Normalizing test images...\")\n self.test_set_images = tf.map_fn(tf.image.per_image_standardization, self.test_set_images)\n self.test_set_labels = np.array(self.test_set_labels)\n\n print(\"Finished loading test set.....\\n\")\n\n\n ####################################################\n ################ Public Interface ##################\n ####################################################\n\n\n def getNextMiniBatch(self, batchSize):\n \"\"\"\n getNextMiniBatch returns a 2-tuple of (batchImages, batchLabels).\n batchImages and batchLabels are both arrays, where the image\n at index i in batchImages corresponds to the label at index \n i in batchLabels. The batch images and labels are from\n the training set.\n\n input: \n batchSize: int, number of images and labels to include\n in the mini batch returned by getNextMiniBatch\n\n output:\n batchImages: numpy array [batchSize x [imageWidth x imageHeight x 3]]\n\n batchLabels: numpy array [batchSize x [numImageClasses]]\n \"\"\"\n batchImages = []\n batchLabels = []\n\n # if we have reached the end of the training examples, \n # reshuffle the training examples and start from the \n # beginning of the list\n # in the event that the number of training examples\n # is not evenly divisable by the batchSize,\n # some training examples will be skipped during this reshuffling\n # i trade this off for decreased code complexity\n if self.current_index + batchSize > self.training_set_size:\n self.current_index = 0\n random.shuffle(self.training_examples)\n\n # for each training example annotation, load the resized image and\n # get the one hot encoding of the label\n for breed, index in self.training_examples[self.current_index:self.current_index+batchSize]:\n # placeholder image variable\n imageToAppend = None\n\n # if the training data is already in RAM, read it from self.training_set_images\n # otherwise, fetch the image from disk\n if self.training_in_RAM:\n imageToAppend = self.training_set_images[breed][index]\n else:\n annotation = self.training_annotations[breed][index]\n\n # get the image data for the training example\n imageToAppend = util.getResizedImageData(annotation, \n self.image_width, self.image_height)\n\n # mirror the image if the random number is less than the probability\n if self.randomMirroring and random.random() < self.MIRROR_PROBABILITY:\n imageToAppend = np.fliplr(imageToAppend)\n\n # randomly crop the image\n if self.randomCropping is not None:\n widthDiff = self.image_width - self.cropWidth\n heightDiff = self.image_height - self.cropHeight\n\n widthOffset = int(random.random() * widthDiff)\n heightOffset = int(random.random() * heightDiff)\n\n imageToAppend = imageToAppend[widthOffset:widthOffset+self.cropWidth, \n heightOffset:heightOffset+self.cropHeight, \n :]\n\n # # normalize the image to 0 mean and unit standard deviation\n # if self.normalizeImage:\n # imageToAppend = tf.image.per_image_standardization(imageToAppend)\n\n # finally append the image\n batchImages.append(imageToAppend)\n # get the one hot encoding of the label\n batchLabels.append(self.one_hot_encodings[breed])\n\n self.current_index += batchSize\n\n if self.normalizeImage:\n batchImages = tf.map_fn(tf.image.per_image_standardization, batchImages)\n return batchImages, np.array(batchLabels)\n return np.array(batchImages), np.array(batchLabels)\n\n\n def getTestImagesAndLabels(self):\n \"\"\"\n getTestImagesAndLabels returns a 2-tuple of (testImages, testLabels).\n testImages and testLabels are both numpy arrays, where the image \n at index i in testImages corresponds to the label at index i in \n testLabels. \n\n input: None\n\n output:\n testImages: numpy array [testSetSize x [imageWidth x imageHeight x 3]]\n\n testLabels: numpy array [testSetSize x [numImageClasses]] \n \"\"\"\n return self.test_set_images, self.test_set_labels\n\n\n def getTrainingSetSize(self):\n \"\"\"\n getTraininSetSize returns the size of the training set. This\n function is useful when computing the progress inside an epoch.\n\n input: none\n\n output:\n trainingSetSize: int, number of examples in the training set\n \"\"\"\n return self.training_set_size\n\n\ndef main():\n dd = DeepDog(64, 64)\n im, la = dd.getNextMiniBatch(100)\n print(im.shape, la.shape)\n print(im)\n print(la)\n\n\nif __name__ == \"__main__\":\n main()"
] | [
[
"numpy.fliplr",
"numpy.array",
"tensorflow.map_fn"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
decisionforce/HACO | [
"ebd1dc49598e6ae2704e58c053cc35f2d9e28429",
"ebd1dc49598e6ae2704e58c053cc35f2d9e28429",
"ebd1dc49598e6ae2704e58c053cc35f2d9e28429",
"ebd1dc49598e6ae2704e58c053cc35f2d9e28429",
"ebd1dc49598e6ae2704e58c053cc35f2d9e28429"
] | [
"haco/DIDrive_core/demo/cilrs/cilrs_collect_data.py",
"haco/DIDrive_core/utils/simulator_utils/sensor_utils.py",
"haco/DIDrive_core/simulators/srunner/scenarios/signalized_junction_left_turn.py",
"haco/DIDrive_core/demo/implicit/collect_data.py",
"haco/DIDrive_core/demo/lbc/lbc_bev_test.py"
] | [
"import os\nfrom functools import partial\n\nimport PIL\nimport lmdb\nimport numpy as np\nfrom ding.envs import SyncSubprocessEnvManager\nfrom ding.utils.default_helper import deep_merge_dicts\nfrom easydict import EasyDict\nfrom tqdm import tqdm\n\nfrom haco.DIDrive_core.data import CarlaBenchmarkCollector, BenchmarkDatasetSaver\nfrom haco.DIDrive_core.envs import SimpleCarlaEnv, CarlaEnvWrapper\nfrom haco.DIDrive_core.policy import AutoPIDPolicy\nfrom haco.DIDrive_core.utils.others.tcp_helper import parse_carla_tcp\n\nconfig = dict(\n env=dict(\n env_num=5,\n simulator=dict(\n disable_two_wheels=True,\n planner=dict(\n type='behavior',\n resolution=1,\n ),\n obs=(\n dict(\n name='rgb',\n type='rgb',\n size=[400, 300],\n position=[1.3, 0.0, 2.3],\n fov=100,\n ),\n ),\n verbose=True,\n ),\n col_is_failure=True,\n stuck_is_failure=True,\n ran_light_is_failure=True,\n manager=dict(\n auto_reset=False,\n shared_memory=False,\n context='spawn',\n max_retry=1,\n ),\n wrapper=dict(\n speed_factor=25.,\n scale=1,\n crop=256,\n ),\n ),\n server=[\n dict(carla_host='localhost', carla_ports=[9000, 9010, 2]),\n ],\n policy=dict(\n target_speed=25,\n tl_threshold=13,\n noise=True,\n noise_kwargs=dict(),\n collect=dict(\n n_episode=100,\n dir_path='./datasets_train/cilrs_datasets_train',\n preloads_name='cilrs_datasets_train.npy',\n collector=dict(\n suite='FullTown01-v1',\n nocrash=True,\n ),\n )\n ),\n)\n\nmain_config = EasyDict(config)\n\n\ndef cilrs_postprocess(observasion, scale=1, crop=256):\n rgb = observasion['rgb'].copy()\n im = PIL.Image.fromarray(rgb)\n (width, height) = (int(im.width // scale), int(im.height // scale))\n rgb = im.resize((width, height))\n rgb = np.asarray(rgb)\n start_x = height // 2 - crop // 2\n start_y = width // 2 - crop // 2\n rgb = rgb[start_x:start_x + crop, start_y:start_y + crop]\n sensor_data = {'rgb': rgb}\n others = {}\n return sensor_data, others\n\n\ndef wrapped_env(env_cfg, wrapper_cfg, host, port, tm_port=None):\n return CarlaEnvWrapper(SimpleCarlaEnv(env_cfg, host, port, tm_port), wrapper_cfg)\n\n\ndef post_process(config):\n epi_folder = [x for x in os.listdir(config.policy.collect.dir_path) if x.startswith('epi')]\n\n all_img_list = []\n all_mea_list = []\n\n for item in tqdm(epi_folder):\n lmdb_file = lmdb.open(os.path.join(config.policy.collect.dir_path, item, 'measurements.lmdb')).begin(write=False)\n png_files = [\n x for x in os.listdir(os.path.join(config.policy.collect.dir_path, item)) if (x.endswith('png') and x.startswith('rgb'))\n ]\n png_files.sort()\n for png_file in png_files:\n index = png_file.split('_')[1].split('.')[0]\n measurements = np.frombuffer(lmdb_file.get(('measurements_%05d' % int(index)).encode()), np.float32)\n data = {}\n data['control'] = np.array([measurements[15], measurements[16], measurements[17]]).astype(np.float32)\n data['speed'] = measurements[10] / config.env.wrapper.speed_factor\n data['command'] = float(measurements[11])\n new_dict = {}\n new_dict['brake'] = data['control'][2]\n new_dict['steer'] = (data['control'][0] + 1) / 2\n new_dict['throttle'] = data['control'][1]\n new_dict['speed'] = data['speed']\n new_dict['command'] = data['command']\n all_img_list.append(os.path.join(item, png_file))\n all_mea_list.append(new_dict)\n if not os.path.exists('_preloads'):\n os.mkdir('_preloads')\n np.save('_preloads/{}'.format(config.policy.collect.preloads_name), [all_img_list, all_mea_list])\n\n\ndef main(cfg, seed=0):\n cfg.env.manager = deep_merge_dicts(SyncSubprocessEnvManager.default_config(), cfg.env.manager)\n\n tcp_list = parse_carla_tcp(cfg.server)\n env_num = cfg.env.env_num\n assert len(tcp_list) >= env_num, \\\n \"Carla server not enough! Need {} servers but only found {}.\".format(env_num, len(tcp_list))\n\n collector_env = SyncSubprocessEnvManager(\n env_fn=[partial(wrapped_env, cfg.env, cfg.env.wrapper, *tcp_list[i]) for i in range(env_num)],\n cfg=cfg.env.manager,\n )\n\n policy = AutoPIDPolicy(cfg.policy)\n\n collector = CarlaBenchmarkCollector(cfg.policy.collect.collector, collector_env, policy.collect_mode)\n\n if not os.path.exists(cfg.policy.collect.dir_path):\n os.makedirs(cfg.policy.collect.dir_path)\n\n collected_episodes = 0\n data_postprocess = lambda x: cilrs_postprocess(x, scale=cfg.env.wrapper.scale, crop=cfg.env.wrapper.crop)\n saver = BenchmarkDatasetSaver(cfg.policy.collect.dir_path, cfg.env.simulator.obs, data_postprocess)\n print('[MAIN] Start collecting data')\n saver.make_dataset_path(cfg.policy.collect)\n while collected_episodes < cfg.policy.collect.n_episode:\n # Sampling data from environments\n n_episode = min(cfg.policy.collect.n_episode - collected_episodes, env_num * 2)\n new_data = collector.collect(n_episode=n_episode)\n saver.save_episodes_data(new_data, start_episode=collected_episodes)\n collected_episodes += n_episode\n print('[MAIN] Current collected: ', collected_episodes, '/', cfg.policy.collect.n_episode)\n\n collector_env.close()\n saver.make_index()\n print('[MAIN] Making preloads')\n post_process(cfg)\n\n\nif __name__ == '__main__':\n main(main_config)\n",
"import os\nimport copy\nimport logging\nimport time\nimport numpy as np\nimport carla\nimport math\nimport weakref\nimport shapely.geometry\nfrom enum import Enum\nfrom easydict import EasyDict\nfrom collections import deque\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nfrom haco.DIDrive_core.simulators.carla_data_provider import CarlaDataProvider\nfrom haco.DIDrive_core.utils.others.config_helper import deep_merge_dicts\n\nDEFAULT_CAMERA_CONFIG = {\n 'size': [384, 160],\n 'fov': 90,\n 'position': [2.0, 0.0, 1.4],\n 'rotation': [0, 0, 0],\n}\n\nDEFAULT_CAMERA_AUG_CONFIG = {'position_range': [0, 0, 0], 'rotation_range': [0, 0, 0]}\n\nDEFAULT_LIDAR_CONFIG = {\n 'channels': 1,\n 'range': 2000,\n 'points_per_second': 1000,\n 'rotation_frequency': 10,\n 'upper_fov': -3,\n 'lower_fov': -3,\n 'position': [0, 0.0, 1.4],\n 'rotation': [0, -90, 0],\n 'draw': False,\n}\n\nDEFAULT_GNSS_CONFIG = {\n 'position': [0.0, 0.0, 1.4],\n}\n\n\nclass TrafficLightState(Enum):\n RED = 0\n YELLOW = 1\n GREEN = 2\n OFF = 3\n\n\ndef get_random_sample(range_list):\n res = []\n for _range in range_list:\n num = np.random.random() * _range * 2 - _range\n res.append(num)\n return res\n\n\nclass SensorHelper(object):\n \"\"\"\n Interfaces for sensors required for vehicles and data buffer for all sensor data in Carla. The updating for Carla\n sensors are not synchronous. In each tick, the newest sensor data is obtained from sensor data buffer and returned\n to the simulator. This class provides an interface that can easily create, receive data and destroy all\n kinds of sensors in Carla according to config, and apply the same sensor augmantation to all camera sensors.\n\n :Arguments:\n - obs_cfg (Dict): Config dict for sensor\n - aug_cfg (Dict, optional): Config dict for sensor augmentation. Defaults to None.\n\n :Interfaces: setup_sensors, get_sensors_data, clear_up\n \"\"\"\n\n def __init__(\n self,\n obs_cfg: Dict,\n aug_cfg: Optional[Dict] = None,\n ) -> None:\n self._obs_cfg = obs_cfg\n self._aug_cfg = aug_cfg\n self._sensors_dict = {}\n self._data_buffers = {}\n self._timestamps = {}\n self._random_aug_pos = None\n self._random_aug_rot = None\n\n def setup_sensors(self, world: carla.World, vehicle: carla.Actor) -> None:\n \"\"\"\n Create the sensors defined in config and attach them to the hero vehicle\n\n :Arguments:\n - world (carla.World): Carla world\n - vehicle (carla.Actor): ego vehicle\n \"\"\"\n bp_library = world.get_blueprint_library()\n if self._aug_cfg:\n self._aug_cfg = EasyDict(deep_merge_dicts(DEFAULT_CAMERA_AUG_CONFIG, self._aug_cfg))\n if min(self._aug_cfg.position_range) < 0 or min(self._aug_cfg.rotation_range) < 0:\n raise ValueError('Augmentation parameters must greater than 0!')\n self._random_aug_pos = get_random_sample(self._aug_cfg.position_range)\n self._random_aug_rot = get_random_sample(self._aug_cfg.rotation_range)\n else:\n self._random_aug_pos = [0, 0, 0]\n self._random_aug_rot = [0, 0, 0]\n for obs_item in self._obs_cfg:\n if obs_item.type in ['rgb', 'depth', 'segmentation']:\n obs_item = EasyDict(deep_merge_dicts(DEFAULT_CAMERA_CONFIG, obs_item))\n bp_name = {\n 'rgb': 'sensor.camera.rgb',\n 'depth': 'sensor.camera.depth',\n 'segmentation': 'sensor.camera.semantic_segmentation',\n }[obs_item.type]\n sensor_bp = bp_library.find(bp_name)\n sensor_bp.set_attribute('image_size_x', str(obs_item.size[0]))\n sensor_bp.set_attribute('image_size_y', str(obs_item.size[1]))\n sensor_bp.set_attribute('fov', str(obs_item.fov))\n sensor_location = carla.Location(\n obs_item.position[0] + self._random_aug_pos[0], obs_item.position[1] + self._random_aug_pos[1],\n obs_item.position[2] + self._random_aug_pos[2]\n )\n sensor_rotation = carla.Rotation(\n obs_item.rotation[0] + self._random_aug_rot[0], obs_item.rotation[1] + self._random_aug_rot[1],\n obs_item.rotation[2] + self._random_aug_rot[2]\n )\n\n elif obs_item.type == 'lidar':\n obs_item = EasyDict(deep_merge_dicts(DEFAULT_LIDAR_CONFIG, obs_item))\n sensor_bp = bp_library.find('sensor.lidar.ray_cast')\n sensor_bp.set_attribute('range', str(obs_item.range))\n sensor_bp.set_attribute('rotation_frequency', str(obs_item.rotation_frequency))\n sensor_bp.set_attribute('channels', str(obs_item.channels))\n sensor_bp.set_attribute('upper_fov', str(obs_item.upper_fov))\n sensor_bp.set_attribute('lower_fov', str(obs_item.lower_fov))\n sensor_bp.set_attribute('points_per_second', str(obs_item.points_per_second))\n sensor_location = carla.Location(obs_item.position[0], obs_item.position[1], obs_item.position[2])\n sensor_rotation = carla.Rotation(obs_item.rotation[0], obs_item.rotation[1], obs_item.rotation[2])\n\n elif obs_item.type == 'gnss':\n obs_item = EasyDict(deep_merge_dicts(DEFAULT_GNSS_CONFIG, obs_item))\n obs_item.update(obs_item)\n sensor_bp = bp_library.find('sensor.other.gnss')\n sensor_location = carla.Location(obs_item.position[0], obs_item.position[1], obs_item.position[2])\n sensor_rotation = carla.Rotation()\n else:\n continue\n\n sensor_transform = carla.Transform(sensor_location, sensor_rotation)\n sensor = world.spawn_actor(sensor_bp, sensor_transform, attach_to=vehicle)\n sensor.listen(CallBack(obs_item.name, obs_item.type, self))\n self.register_sensor(obs_item.name, sensor)\n\n def clean_up(self) -> None:\n \"\"\"\n Remove and destroy all sensors\n \"\"\"\n for key in self._sensors_dict:\n if self._sensors_dict[key] is not None:\n if self._sensors_dict[key].is_alive:\n self._sensors_dict[key].stop()\n self._sensors_dict[key].destroy()\n self._sensors_dict[key] = None\n time.sleep(0.1)\n self._sensors_dict.clear()\n self._data_buffers.clear()\n self._timestamps.clear()\n\n def register_sensor(self, tag: str, sensor: Any) -> None:\n \"\"\"\n Registers the sensors\n \"\"\"\n if tag in self._sensors_dict:\n raise ValueError(\"Duplicated sensor tag [{}]\".format(tag))\n\n self._sensors_dict[tag] = sensor\n self._data_buffers[tag] = None\n self._timestamps[tag] = -1\n\n def update_sensor(self, tag: str, data: Any, timestamp: Any) -> None:\n \"\"\"\n Updates the sensor\n \"\"\"\n if tag not in self._sensors_dict:\n raise ValueError(\"The sensor with tag [{}] has not been created!\".format(tag))\n self._data_buffers[tag] = data\n self._timestamps[tag] = timestamp\n\n def all_sensors_ready(self) -> bool:\n \"\"\"\n Checks if all the sensors have sent data at least once\n \"\"\"\n for key in self._sensors_dict:\n if self._data_buffers[key] is None:\n return False\n return True\n\n def get_sensors_data(self) -> Dict:\n \"\"\"\n Get all registered sensor data from buffer\n\n :Returns:\n Dict: all newest sensor data\n \"\"\"\n sensor_data = {}\n for obs_item in self._obs_cfg:\n if obs_item.type in ['rgb', 'segmentation', 'lidar', 'gnss']:\n key = obs_item.name\n img = self._data_buffers[key]\n sensor_data[key] = img\n elif obs_item.type == 'depth':\n key = obs_item.name\n raw = self._data_buffers[key]\n img = raw.astype(np.float64)\n R = img[..., 0]\n G = img[..., 1]\n B = img[..., 2]\n depth = (R + G * 256 + B * 256 * 256) / (256 * 256 * 256 - 1)\n depth = 1000 * depth\n sensor_data[key] = depth\n if self._aug_cfg:\n sensor_data['aug'] = {\n 'aug_pos': np.array(self._random_aug_pos),\n 'aug_rot': np.array(self._random_aug_rot),\n }\n return sensor_data\n\n\nclass CallBack(object):\n \"\"\"\n Class the sensors listen to in order to receive their data each frame\n \"\"\"\n\n def __init__(self, tag: str, type: str, wrapper: Any) -> None:\n \"\"\"\n Initializes the call back\n \"\"\"\n self._tag = tag\n self._type = type\n self._data_wrapper = wrapper\n\n def __call__(self, data: Any) -> None:\n \"\"\"\n call function\n \"\"\"\n if isinstance(data, carla.Image):\n self._parse_image_cb(data, self._tag)\n elif isinstance(data, carla.LidarMeasurement):\n self._parse_lidar_cb(data, self._tag)\n elif isinstance(data, carla.GnssMeasurement):\n self._parse_gnss_cb(data, self._tag)\n else:\n logging.error('No callback method for this sensor.')\n\n # Parsing CARLA physical Sensors\n def _parse_image_cb(self, image: Any, tag: str) -> None:\n \"\"\"\n parses cameras\n \"\"\"\n if self._type == 'rgb':\n image.convert(carla.ColorConverter.Raw)\n if self._type == 'segmentation':\n image.convert(carla.ColorConverter.CityScapesPalette)\n img = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n img = np.reshape(img, (image.height, image.width, 4))\n img = img[:, :, :3]\n img = img[:, :, ::-1]\n img = copy.deepcopy(img)\n self._data_wrapper.update_sensor(tag, img, image.frame)\n\n def _parse_lidar_cb(self, lidar_data: Any, tag: str) -> None:\n \"\"\"\n parses lidar sensors\n \"\"\"\n points = np.frombuffer(lidar_data.raw_data, dtype=np.dtype('f4'))\n points = copy.deepcopy(points)\n points = np.reshape(points, (int(points.shape[0] / 3), 3))\n self._data_wrapper.update_sensor(tag, points, lidar_data.frame)\n\n def _parse_gnss_cb(self, gnss_data: Any, tag: str) -> None:\n \"\"\"\n parses gnss sensors\n \"\"\"\n array = np.array([gnss_data.latitude, gnss_data.longitude, gnss_data.altitude], dtype=np.float64)\n self._data_wrapper.update_sensor(tag, array, gnss_data.frame)\n\n\nclass CollisionSensor(object):\n \"\"\"\n Carla sensor interface used to detect collision info in simulator. Once created,\n it will automatically update every tick.\n\n :Arguments:\n - parent_actor (carla.Actor): Actor to detect collision\n - col_threshold (float): Threshold value of collided impulse\n\n :Interfaces: clear\n \"\"\"\n\n def __init__(self, parent_actor: carla.Actor, col_threshold: float) -> None:\n self.sensor = None\n self._history = deque(maxlen=500)\n self._parent = parent_actor\n self._threshold = col_threshold\n world = self._parent.get_world()\n bp = world.get_blueprint_library().find('sensor.other.collision')\n self.sensor = world.spawn_actor(bp, carla.Transform(), attach_to=self._parent)\n # We need to pass the lambda a weak reference to self to avoid circular\n # reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda event: CollisionSensor._on_collision(weak_self, event))\n\n self.collided = False\n self.collided_frame = -1\n\n @staticmethod\n def _on_collision(weak_self, event: Any) -> None:\n self = weak_self()\n if not self:\n return\n impulse = event.normal_impulse\n intensity = math.sqrt(impulse.x ** 2 + impulse.y ** 2 + impulse.z ** 2)\n self._history.append((event.frame, intensity))\n if intensity > self._threshold:\n self.collided = True\n self.collided_frame = event.frame\n\n def clear(self) -> None:\n \"\"\"\n Clear collision sensor in Carla world.\n \"\"\"\n self._history.clear()\n if self.sensor.is_alive:\n self.sensor.stop()\n self.sensor.destroy()\n\n\nclass TrafficLightHelper(object):\n \"\"\"\n Interface of traffic light detector and recorder. It detects next traffic light state,\n calculates distance from hero vehicle to the end of this road, and if hero vehicle crosses\n this line when correlated light is red, it will record running a red light\n\n :Arguments:\n - hero_vehicle (carla.Actor): Hero vehicle\n\n :Interfaces:\n - tick\n \"\"\"\n\n def __init__(self, hero_vehicle: carla.Actor, debug: bool = False) -> None:\n self._hero_vehicle = hero_vehicle\n self._world = CarlaDataProvider.get_world()\n self._map = CarlaDataProvider.get_map()\n\n self._light_dis_thresh = 20\n self._active_light = None\n self._last_light = None\n\n self.total_lights_ran = 0\n self.total_lights = 0\n self.ran_light = False\n self.active_light_state = TrafficLightState.OFF\n self.active_light_dis = 200\n\n self._debug = debug\n\n def tick(self) -> None:\n \"\"\"\n Tick one step. It will check the next traffic light and its state, update the count number\n of traffic light if needed. It will check the running light event by getting the last waypoints\n in current road and check if the vehicle has crossed them.\n \"\"\"\n self.ran_light = False\n vehicle_transform = CarlaDataProvider.get_transform(self._hero_vehicle)\n vehicle_location = vehicle_transform.location\n\n self._active_light, light_trigger_location = self._get_active_light()\n\n if self._active_light is not None:\n if self._debug:\n self._world.debug.draw_point(light_trigger_location + carla.Location(z=2), size=0.1)\n self.active_light_state = {\n carla.TrafficLightState.Green: TrafficLightState.GREEN,\n carla.TrafficLightState.Yellow: TrafficLightState.YELLOW,\n carla.TrafficLightState.Red: TrafficLightState.RED,\n carla.TrafficLightState.Off: TrafficLightState.OFF,\n }[self._active_light.state]\n delta = vehicle_location - light_trigger_location\n distance = np.sqrt(sum([delta.x ** 2, delta.y ** 2, delta.z ** 2]))\n self.active_light_dis = min(200, distance)\n if self.active_light_dis < self._light_dis_thresh:\n if self._last_light is None or self._active_light.id != self._last_light.id:\n self.total_lights += 1\n self._last_light = self._active_light\n\n else:\n self.active_light_state = TrafficLightState.OFF\n self.active_light_dis = 200\n\n if self._last_light is not None:\n if self._last_light.state != carla.TrafficLightState.Red:\n return\n\n veh_extent = self._hero_vehicle.bounding_box.extent.x\n\n tail_close_pt = self._rotate_point(\n carla.Vector3D(-0.8 * veh_extent, 0.0, vehicle_location.z), vehicle_transform.rotation.yaw\n )\n tail_close_pt = vehicle_location + carla.Location(tail_close_pt)\n\n tail_far_pt = self._rotate_point(\n carla.Vector3D(-veh_extent - 1, 0.0, vehicle_location.z), vehicle_transform.rotation.yaw\n )\n tail_far_pt = vehicle_location + carla.Location(tail_far_pt)\n\n trigger_waypoints = self._get_traffic_light_trigger_waypoints(self._last_light)\n\n if self._debug:\n z = 2.1\n if self._last_light.state == carla.TrafficLightState.Red:\n color = carla.Color(155, 0, 0)\n elif self._last_light.state == carla.TrafficLightState.Green:\n color = carla.Color(0, 155, 0)\n else:\n color = carla.Color(155, 155, 0)\n for wp in trigger_waypoints:\n text = \"{}.{}\".format(wp.road_id, wp.lane_id)\n self._world.debug.draw_string(wp.transform.location + carla.Location(x=1, z=z), text, color=color)\n self._world.debug.draw_point(wp.transform.location + carla.Location(z=z), size=0.1, color=color)\n\n for wp in trigger_waypoints:\n tail_wp = self._map.get_waypoint(tail_far_pt)\n\n # Calculate the dot product (Might be unscaled, as only its sign is important)\n ve_dir = vehicle_transform.get_forward_vector()\n wp_dir = wp.transform.get_forward_vector()\n dot_ve_wp = ve_dir.x * wp_dir.x + ve_dir.y * wp_dir.y + ve_dir.z * wp_dir.z\n\n # Check the lane until all the \"tail\" has passed\n if tail_wp.road_id == wp.road_id and tail_wp.lane_id == wp.lane_id and dot_ve_wp > 0:\n # This light is red and is affecting our lane\n yaw_wp = wp.transform.rotation.yaw\n lane_width = wp.lane_width\n location_wp = wp.transform.location\n\n lft_lane_wp = self._rotate_point(carla.Vector3D(0.4 * lane_width, 0.0, location_wp.z), yaw_wp + 90)\n lft_lane_wp = location_wp + carla.Location(lft_lane_wp)\n rgt_lane_wp = self._rotate_point(carla.Vector3D(0.4 * lane_width, 0.0, location_wp.z), yaw_wp - 90)\n rgt_lane_wp = location_wp + carla.Location(rgt_lane_wp)\n\n # Is the vehicle traversing the stop line?\n if self._is_vehicle_crossing_line((tail_close_pt, tail_far_pt), (lft_lane_wp, rgt_lane_wp)):\n self.ran_light = True\n self.total_lights_ran += 1\n self._last_light = None\n\n def _get_active_light(self) -> Tuple[Optional[carla.Actor], Optional[carla.Vector3D]]:\n lights_list = CarlaDataProvider.get_actor_list().filter(\"*traffic_light*\")\n\n vehicle_transform = CarlaDataProvider.get_transform(self._hero_vehicle)\n vehicle_location = vehicle_transform.location\n vehicle_waypoint = CarlaDataProvider._map.get_waypoint(vehicle_location)\n\n for traffic_light in lights_list:\n object_location = CarlaDataProvider.get_trafficlight_trigger_location(traffic_light)\n object_waypoint = CarlaDataProvider._map.get_waypoint(object_location)\n\n if object_waypoint.road_id != vehicle_waypoint.road_id:\n continue\n\n ve_dir = vehicle_waypoint.transform.get_forward_vector()\n wp_dir = object_waypoint.transform.get_forward_vector()\n dot_ve_wp = ve_dir.x * wp_dir.x + ve_dir.y * wp_dir.y + ve_dir.z * wp_dir.z\n\n if dot_ve_wp < 0:\n continue\n while not object_waypoint.is_intersection:\n next_waypoint = object_waypoint.next(0.5)[0]\n if next_waypoint and not next_waypoint.is_intersection:\n object_waypoint = next_waypoint\n else:\n break\n\n return traffic_light, object_waypoint.transform.location\n\n return None, None\n\n def _get_traffic_light_trigger_waypoints(self, traffic_light: carla.Actor) -> List[carla.Waypoint]:\n base_transform = traffic_light.get_transform()\n base_rot = base_transform.rotation.yaw\n area_loc = base_transform.transform(traffic_light.trigger_volume.location)\n\n # Discretize the trigger box into points\n area_ext = traffic_light.trigger_volume.extent\n x_values = np.arange(-0.9 * area_ext.x, 0.9 * area_ext.x, 1.0) # 0.9 to avoid crossing to adjacent lanes\n\n area = []\n for x in x_values:\n point = self._rotate_point(carla.Vector3D(x, 0, area_ext.z), base_rot)\n point_location = area_loc + carla.Location(x=point.x, y=point.y)\n area.append(point_location)\n\n # Get the waypoints of these points, removing duplicates\n ini_wps = []\n for pt in area:\n wpx = self._map.get_waypoint(pt)\n # As x_values are arranged in order, only the last one has to be checked\n if not ini_wps or ini_wps[-1].road_id != wpx.road_id or ini_wps[-1].lane_id != wpx.lane_id:\n ini_wps.append(wpx)\n\n # Advance them until the intersection\n wps = []\n for wpx in ini_wps:\n while not wpx.is_intersection:\n next_wp = wpx.next(0.5)[0]\n if next_wp and not next_wp.is_intersection:\n wpx = next_wp\n else:\n break\n wps.append(wpx)\n\n return wps\n\n def _is_vehicle_crossing_line(self, seg1: List, seg2: List) -> bool:\n \"\"\"\n check if vehicle crosses a line segment\n \"\"\"\n line1 = shapely.geometry.LineString([(seg1[0].x, seg1[0].y), (seg1[1].x, seg1[1].y)])\n line2 = shapely.geometry.LineString([(seg2[0].x, seg2[0].y), (seg2[1].x, seg2[1].y)])\n inter = line1.intersection(line2)\n\n return not inter.is_empty\n\n def _rotate_point(self, point: carla.Vector3D, angle: float) -> carla.Vector3D:\n \"\"\"\n rotate a given point by a given angle\n \"\"\"\n x_ = math.cos(math.radians(angle)) * point.x - math.sin(math.radians(angle)) * point.y\n y_ = math.sin(math.radians(angle)) * point.x + math.cos(math.radians(angle)) * point.y\n return carla.Vector3D(x_, y_, point.z)\n",
"#!/usr/bin/env python\n\n#\n# This work is licensed under the terms of the MIT license.\n# For a copy, see <https://opensource.org/licenses/MIT>.\n\"\"\"\nCollection of traffic scenarios where the ego vehicle (hero)\nis making a left turn\n\"\"\"\n\nimport numpy as np\nimport py_trees\nimport carla\nfrom haco.DIDrive_core.utils.planner import RoadOption\nfrom six.moves.queue import Queue # pylint: disable=relative-import\n\nfrom haco.DIDrive_core.simulators.carla_data_provider import CarlaDataProvider\nfrom haco.DIDrive_core.simulators.srunner.scenariomanager.scenarioatomics.atomic_behaviors import (\n ActorTransformSetter, ActorDestroy, ActorSource, ActorSink, TrafficLightStateSetter, WaypointFollower, StopVehicle\n)\nfrom haco.DIDrive_core.simulators.srunner.scenariomanager.scenarioatomics.atomic_criteria import CollisionTest\nfrom haco.DIDrive_core.simulators.srunner.scenariomanager.scenarioatomics.atomic_trigger_conditions import DriveDistance, \\\n InTriggerDistanceToLocation\nfrom haco.DIDrive_core.simulators.srunner.scenarios.basic_scenario import BasicScenario\nfrom haco.DIDrive_core.simulators.srunner.tools.scenario_helper import generate_target_waypoint\n\n\nclass SignalizedJunctionLeftTurn(BasicScenario):\n \"\"\"\n Implementation class for Hero\n Vehicle turning left at signalized junction scenario,\n Traffic Scenario 08.\n\n This is a single ego vehicle scenario\n \"\"\"\n\n def __init__(\n self, world, ego_vehicles, config, randomize=False, debug_mode=False, criteria_enable=True, timeout=60\n ):\n \"\"\"\n Setup all relevant parameters and create scenario\n \"\"\"\n self._world = world\n self.timeout = timeout\n self._map = CarlaDataProvider.get_map()\n self._target_vel = 6.9\n self._brake_value = 0.5\n self._ego_distance = 70\n self._traffic_light = None\n self._other_actor_transform = None\n self._blackboard_queue_name = 'SignalizedJunctionLeftTurn/actor_flow_queue'\n self._queue = py_trees.blackboard.Blackboard().set(self._blackboard_queue_name, Queue())\n self._initialized = True\n super(SignalizedJunctionLeftTurn, self).__init__(\n \"TurnLeftAtSignalizedJunction\", ego_vehicles, config, world, debug_mode, criteria_enable=criteria_enable\n )\n\n def _initialize_actors(self, config):\n \"\"\"\n Custom initialization\n \"\"\"\n self._other_actor_transform = config.other_actors[0].transform\n first_vehicle_transform = carla.Transform(\n carla.Location(\n config.other_actors[0].transform.location.x, config.other_actors[0].transform.location.y,\n config.other_actors[0].transform.location.z\n ), config.other_actors[0].transform.rotation\n )\n first_vehicle = CarlaDataProvider.request_new_actor(\n config.other_actors[0].model, self._other_actor_transform, disable_two_wheels=True\n )\n first_vehicle.set_transform(first_vehicle_transform)\n first_vehicle.set_simulate_physics(enabled=False)\n self.other_actors.append(first_vehicle)\n\n self._traffic_light = CarlaDataProvider.get_next_traffic_light(self.ego_vehicles[0], False)\n self._traffic_light_other = CarlaDataProvider.get_next_traffic_light(self.other_actors[0], False)\n\n if config.trigger_points is not None:\n trigger_waypoint = CarlaDataProvider.get_map().get_waypoint(config.trigger_points[0].location)\n self._traffic_light = CarlaDataProvider.get_next_traffic_light_from_waypoint(trigger_waypoint)\n\n # if self._traffic_light is None or self._traffic_light_other is None:\n # raise RuntimeError(\"No traffic light for the given location found\")\n\n def _create_behavior(self):\n \"\"\"\n Hero vehicle is turning left in an urban area,\n at a signalized intersection, while other actor coming straight\n .The hero actor may turn left either before other actor\n passes intersection or later, without any collision.\n After 80 seconds, a timeout stops the scenario.\n \"\"\"\n\n sequence = py_trees.composites.Sequence(\"Sequence Behavior\")\n\n set_traffic_light = py_trees.composites.Sequence(\"Traffic Light Setter\")\n if self._traffic_light is not None:\n set_light_green = TrafficLightStateSetter(self._traffic_light, carla.TrafficLightState.Green)\n set_traffic_light.add_child(set_light_green)\n if self._traffic_light_other is not None:\n set_other_light_green = TrafficLightStateSetter(self._traffic_light_other, carla.TrafficLightState.Green)\n set_traffic_light.add_child(set_other_light_green)\n # Selecting straight path at intersection\n straight_target_waypoint = generate_target_waypoint(\n CarlaDataProvider.get_map().get_waypoint(self.other_actors[0].get_location()), 0\n )\n target_waypoint = CarlaDataProvider.get_map().get_waypoint(self.other_actors[0].get_location())\n # Generating waypoint list till next intersection\n plan = []\n wp_choice = target_waypoint.next(1.0)\n wp_location1 = wp_choice[0].transform.location\n while not wp_choice[0].is_intersection:\n target_waypoint = wp_choice[0]\n wp_choice = target_waypoint.next(2.0)\n junction = wp_choice[0].get_junction()\n wp_location2 = wp_choice[0].transform.location\n init_vector = []\n x = wp_location2.x - wp_location1.x\n init_vector.append(x)\n y = wp_location2.y - wp_location1.y\n init_vector.append(y)\n init_vector = np.array(init_vector)\n\n for lane_waypoints in junction.get_waypoints(wp_choice[0].lane_type):\n wp_prev = lane_waypoints[0].previous(2.0)[0]\n if (wp_prev.road_id == target_waypoint.road_id and wp_prev.lane_id == target_waypoint.lane_id):\n # Get end\n wp_next = lane_waypoints[0].next_until_lane_end(1.0)[-1]\n wp_next0 = wp_next.next(1.0)[0]\n wp_next1 = wp_next.next(1.0)[0].next(1.0)[0]\n if (wp_next0.road_id != straight_target_waypoint.road_id):\n junc_vector = []\n x = wp_next1.transform.location.x - wp_next0.transform.location.x\n junc_vector.append(x)\n y = wp_next1.transform.location.y - wp_next0.transform.location.y\n junc_vector.append(y)\n junc_vector = np.array(junc_vector)\n if (np.cross(init_vector, junc_vector) < -0.5):\n wp_choice = lane_waypoints[0].next(1.0)\n break\n else:\n continue\n else:\n continue\n\n while wp_choice[0].is_intersection:\n target_waypoint = wp_choice[0]\n plan.append((target_waypoint, RoadOption.LANEFOLLOW))\n wp_choice = target_waypoint.next(1.0)\n while not wp_choice[0].is_intersection:\n target_waypoint = wp_choice[0]\n plan.append((target_waypoint, RoadOption.LANEFOLLOW))\n wp_choice = target_waypoint.next(1.0)\n\n move_actor = WaypointFollower(self.other_actors[0], self._target_vel, plan=plan)\n move_free = WaypointFollower(self.other_actors[0], self._target_vel)\n #stop = StopVehicle(self.other_actors[0], self._brake_value)\n\n # stop other actor\n move_actor_sequence = py_trees.composites.Sequence()\n move_actor_sequence.add_child(move_actor)\n move_actor_sequence.add_child(move_free)\n #move_actor_sequence.add_child(stop)\n #move_actor_sequence.add_child(ActorDestroy(self.other_actors[0]))\n\n # end condition\n #waypoint_follower_end = InTriggerDistanceToLocation(self.other_actors[0], plan[-1][0].transform.location, 10)\n drive = DriveDistance(self.ego_vehicles[0], self._ego_distance)\n end_condition = py_trees.composites.Parallel(\n name='End Condition', policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE\n )\n #end_condition.add_child(waypoint_follower_end)\n end_condition.add_child(drive)\n\n behavior = py_trees.composites.Parallel(policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)\n behavior.add_child(move_actor_sequence)\n behavior.add_child(end_condition)\n\n sequence = py_trees.composites.Sequence()\n sequence.add_child(ActorTransformSetter(self.other_actors[0], self._other_actor_transform))\n sequence.add_child(set_traffic_light)\n sequence.add_child(behavior)\n sequence.add_child(ActorDestroy(self.other_actors[0]))\n\n return sequence\n\n def _create_test_criteria(self):\n \"\"\"\n A list of all test criteria will be created that is later used\n in parallel behavior tree.\n \"\"\"\n criteria = []\n\n collison_criteria = CollisionTest(self.ego_vehicles[0])\n criteria.append(collison_criteria)\n\n return criteria\n\n def __del__(self):\n self._traffic_light = None\n self.remove_all_actors()\n",
"import os\nfrom easydict import EasyDict\nfrom pathlib import Path\nfrom functools import partial\n\nimport lmdb\nfrom PIL import Image\nimport numpy as np\n\nfrom haco.DIDrive_core.data import CarlaBenchmarkCollector\nfrom haco.DIDrive_core.envs import SimpleCarlaEnv, CarlaEnvWrapper\nfrom haco.DIDrive_core.policy import AutoPIDPolicy\nfrom haco.DIDrive_core.utils.others.tcp_helper import parse_carla_tcp\nfrom ding.envs import BaseEnvManager, SyncSubprocessEnvManager\nfrom ding.utils.default_helper import deep_merge_dicts\n\nconfig = dict(\n env=dict(\n env_num=5,\n simulator=dict(\n disable_two_wheels=True,\n waypoint_num=32,\n planner=dict(\n type='behavior',\n resolution=1,\n ),\n obs=(\n dict(\n name='rgb',\n type='rgb',\n size=[288, 288],\n fov=100,\n position=[1.5, 0.0, 2.4],\n rotation=[0.0, 0.0, 0.0],\n ),\n dict(\n name='segmentation',\n type='segmentation',\n size=[256, 256],\n fov=100,\n position=[1.5, 0.0, 2.4],\n rotation=[0.0, 0.0, 0.0],\n )\n ),\n aug=dict(\n position_range=[2.0, 0.0, 0.0],\n rotation_range=[0.0, 30.0, 0.0],\n ),\n verbose=True,\n ),\n col_is_failure=True,\n stuck_is_failure=True,\n manager=dict(\n auto_reset=False,\n shared_memory=False,\n context='spawn',\n max_retry=1,\n ),\n wrapper=dict(),\n ),\n server=[\n dict(carla_host='localhost', carla_ports=[9000, 9010, 2]),\n ],\n policy=dict(\n target_speed=25,\n noise=False,\n collect=dict(\n save_dir='dataset/',\n n_episode=50,\n collector=dict()\n ),\n ),\n)\n\nmain_config = EasyDict(config)\n\n\ndef write_episode_data(episode_path, episode_data):\n lmdb_store_keys = ['aug_rot', 'aug_pos', 'is_junction', 'tl_dis', 'tl_state']\n sensor_keys = ['segmentation', 'rgb']\n lmdb_env = lmdb.open(os.path.join(episode_path, \"measurements.lmdb\"), map_size=1e10)\n with lmdb_env.begin(write=True) as txn:\n txn.put('len'.encode(), str(len(episode_data)).encode())\n for i, x in enumerate(episode_data):\n data = episode_data[i]['obs']\n data['aug_rot'] = data['aug']['aug_rot']\n data['aug_pos'] = data['aug']['aug_pos']\n for key in lmdb_store_keys:\n txn.put(('%s_%05d' % (key, i)).encode(), np.ascontiguousarray(data[key]).astype(np.float32))\n for key in sensor_keys:\n image = Image.fromarray(data[key])\n image.save(os.path.join(episode_path, \"%s_%05d.png\" % (key, i)))\n\n\ndef wrapped_env(env_cfg, wrapper_cfg, host, port, tm_port=None):\n return CarlaEnvWrapper(SimpleCarlaEnv(env_cfg, host, port, tm_port), wrapper_cfg)\n\n\ndef main(cfg, seed=0):\n cfg.env.manager = deep_merge_dicts(SyncSubprocessEnvManager.default_config(), cfg.env.manager)\n\n tcp_list = parse_carla_tcp(cfg.server)\n env_num = cfg.env.env_num\n assert len(tcp_list) >= env_num, \\\n \"Carla server not enough! Need {} servers but only found {}.\".format(env_num, len(tcp_list))\n\n collector_env = SyncSubprocessEnvManager(\n env_fn=[partial(wrapped_env, cfg.env, cfg.env.wrapper, *tcp_list[i]) for i in range(env_num)],\n cfg=cfg.env.manager,\n )\n collector_env.seed(seed)\n\n policy = AutoPIDPolicy(cfg.policy)\n\n collector = CarlaBenchmarkCollector(cfg.policy.collect.collector, collector_env, policy.collect_mode)\n\n if not os.path.exists(cfg.policy.collect.save_dir):\n os.mkdir(cfg.policy.collect.save_dir)\n\n collected_episodes = 0\n\n while collected_episodes < cfg.policy.collect.n_episode:\n # Sampling data from environments\n print('start collect data')\n new_data = collector.collect(n_episode=env_num)\n for i in range(len(new_data)):\n collected_episodes += 1\n episode_path = Path(cfg.policy.collect.save_dir).joinpath('episode_%05d' % collected_episodes)\n if not os.path.exists(episode_path):\n os.mkdir(episode_path)\n write_episode_data(episode_path, new_data[i]['data'])\n if collected_episodes > cfg.policy.collect.n_episode:\n break\n\n collector_env.close()\n\n\nif __name__ == '__main__':\n main(main_config)\n",
"from easydict import EasyDict\nimport torch\n\nfrom haco.DIDrive_core.envs import SimpleCarlaEnv\nfrom haco.DIDrive_core.policy import LBCBirdviewPolicy\nfrom haco.DIDrive_core.eval import SingleCarlaEvaluator\nfrom haco.DIDrive_core.utils.others.tcp_helper import parse_carla_tcp\nfrom ding.utils import set_pkg_seed\nfrom haco.DIDrive_core.demo.lbc.lbc_env_wrapper import LBCEnvWrapper\n\n\nlbc_config = dict(\n env=dict(\n simulator=dict(\n town='Town01',\n disable_two_wheels=True,\n n_vehicles=10,\n n_pedestrians=10,\n verbose=False,\n planner=dict(\n type='lbc',\n resolution=2.5,\n threshold_before=9.0,\n threshold_after=1.5,\n ),\n obs=(\n dict(\n name='birdview',\n type='bev',\n size=[320, 320],\n pixels_per_meter=5,\n pixels_ahead_vehicle=100,\n ),\n ),\n ),\n visualize=dict(\n type='birdview',\n outputs=['show']\n ),\n wrapper=dict(),\n ),\n server=[dict(carla_host='localhost', carla_ports=[9000, 9002, 2])],\n policy=dict(\n ckpt_path='model-256.th',\n eval=dict(\n evaluator=dict(\n render=True,\n ),\n )\n ),\n)\n\nmain_config = EasyDict(lbc_config)\n\n\ndef wrapped_env(env_cfg, host, port, tm_port=None):\n return LBCEnvWrapper(SimpleCarlaEnv(env_cfg, host, port))\n\n\ndef main(cfg, seed=0):\n tcp_list = parse_carla_tcp(cfg.server)\n assert len(tcp_list) > 0, \"No Carla server found!\"\n\n carla_env = wrapped_env(cfg.env, *tcp_list[0])\n carla_env.seed(seed)\n set_pkg_seed(seed)\n lbc_policy = LBCBirdviewPolicy(cfg.policy).eval_mode\n state_dict = torch.load(cfg.policy.ckpt_path)\n lbc_policy.load_state_dict(state_dict)\n\n evaluator = SingleCarlaEvaluator(cfg.policy.eval.evaluator, carla_env, lbc_policy)\n evaluator.eval()\n\n evaluator.close()\n\n\nif __name__ == '__main__':\n main(main_config)\n"
] | [
[
"numpy.asarray",
"numpy.array"
],
[
"numpy.random.random",
"numpy.reshape",
"numpy.arange",
"numpy.dtype",
"numpy.array"
],
[
"numpy.array",
"numpy.cross"
],
[
"numpy.ascontiguousarray"
],
[
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yuguiyang/python_demo | [
"1be2406bfc920e22a0f92bf10d9a3665984067ba",
"1be2406bfc920e22a0f92bf10d9a3665984067ba",
"1be2406bfc920e22a0f92bf10d9a3665984067ba",
"1be2406bfc920e22a0f92bf10d9a3665984067ba"
] | [
"old_code/pandas_order.py",
"old_code/plt_bar.py",
"old_code/plt_histogram.py",
"code/pandas/pd_duplicated.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 28 13:42:30 2017\n\n@author: hexo\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n#读取第一个sheet页\ndf = pd.read_excel('D:\\Tableau_data\\示例 - 超市.xls',sheetname=0)\n\nprint(type(df))\n\n#每一列的数据类型\nprint(df.dtypes)\n#每种类型的数量\nprint(df.get_dtype_counts())\n\n#还不知道这个ftype到底是干嘛的,sparse|dense,稀疏|密集,表示什么呢?\nprint(df.ftypes)\nprint(df.get_ftype_counts())\n\ntop_10_data=df.head(10)\n\n#print(top_10_data)\n\nprint('----------------------------')\n#axis=0表示纵轴,axis=1表示横轴\n#这是每一列,每一列的均值\nprint(top_10_data.mean(axis=0))\nprint('----------------------------')\n#这是每一行,每一行的均值\nprint(top_10_data.mean(axis=1))\n\nprint('----------------------------')\n#sort_index\n#坑啊,这个axis到底是个什么鬼(ok)\n#但是这个level是干嘛的依然没有搞懂\n#按第1列降序排列\n#print(top_10_data.sort_index(axis=0,level=0,ascending=True))\n#print(top_10_data.sort_index(axis=0,level=1,ascending=True))\n\nprint(top_10_data)\nprint('----------------------------')\n#终于成功按照订单日期降序排列了!!!\n#这里按多了排序的话,貌似只可以执行一个排序方式,都是降序\nprint(top_10_data.sort_values(by=['订单日期','行 ID'] , ascending=False).head(2))\n\n\n\n\n\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 10 14:01:31 2017\n\n@author: hexo\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties \n\nfont_set = FontProperties(fname=r'C:\\Windows\\Fonts\\simsun.ttc', size=13)\n\n'''\nmatplotlib.pyplot.bar(left, height, width=0.8, bottom=None, \n hold=None, data=None, **kwargs)\nMake a bar plot\nMake a bar plot with rectangles bounded by:\n\n left, left + width, bottom, bottom + height\n (left, right, bottom and top edges)\n\n\n'''\n\nplt.figure(u'条形图练习')\n\nlabel = ['one','two','three','four','five']\ndata = [10,15,30,20,10]\n\n\nplt.subplot(2,2,1)\nplt.title(u'最简单的柱形图',fontproperties=font_set)\nplt.bar(range(len(data)),data)\n\n\n\nplt.subplot(2,2,2)\nplt.title(u'不同颜色的bar',fontproperties=font_set)\n#color:scalar or array-like, optional\n#the colors of the bar faces\nplt.bar(range(len(data)),data,color=['red','yellow','blue','green','black'],label='label2')\nplt.legend()\n\n\n\nplt.subplot(2,2,3)\nplt.title(u'自定义label',fontproperties=font_set)\n\n#plt.bar(np.arange(len(data)),data,color='green',tick_label=label)\n#edgecolor:边框颜色,facecolor:填充颜色\n#linewidth:线条宽度,\n#alpha:透明度\n#tick_label:标签\nplt.bar(np.arange(len(data)),data,edgecolor='red',\n facecolor='green',linewidth=2,alpha=0.3\n ,tick_label=label,label='label3')\n\nplt.legend()\n#这个方法不行,位置显示有问题\n#ax = plt.gca()\n#ax.set_xticklabels(['one','two','three','four'])\n\nplt.subplot(2,2,4)\nplt.title(u'设置填充',fontproperties=font_set)\n\nplt.bar(np.arange(len(data)),data,hatch='+',tick_label=label,label='label4')\n\n\n\nplt.show()",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 10 14:01:31 2017\n\n@author: hexo\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#配置每次生成的随机数相同\nnp.random.seed(0)\n\nmu = 200\nsigma = 25\nx = np.random.normal(mu, sigma, size=500)\n\nprint(x)\n\nax0 = plt.gca()\n\nax0.hist(x, 20, normed=1, histtype='stepfilled', facecolor='g', alpha=0.75)\nax0.set_title('stepfilled')\n\nplt.show()",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 15 16:03:44 2017\n\n@author: hexo\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\n\ndata = pd.DataFrame({'k1': ['one'] * 3 + ['two'] * 4,\n 'k2': [1, 1, 2, 3, 3, 4, 4]})\ndata\n\ndata.duplicated()\n\ndata.drop_duplicates()\n\ndata['v1'] = range(7)\ndata.drop_duplicates(['k1'])\n\ndata.drop_duplicates(['k1', 'k2'], take_last=True)\n\n\n\n\n#重复数据\ndata = pd.Series([1., -999., 2., -999., -1000., 3.])\ndata\n\ndata.replace(-999, np.nan)\n\ndata.replace([-999, -1000], np.nan)\n\ndata.replace([-999, -1000], [np.nan, 0])\n\ndata.replace({-999: np.nan, -1000: 0})\n\n"
] | [
[
"pandas.read_excel"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.font_manager.FontProperties",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.gca",
"numpy.random.normal",
"matplotlib.pyplot.show",
"numpy.random.seed"
],
[
"pandas.Series",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
tuanho27/torchstat | [
"46d653795a1262f0e58a2069276a69d6bd43078c"
] | [
"torchstat/compute_flops.py"
] | [
"import torch.nn as nn\nimport torch\nimport numpy as np\n\n\ndef compute_flops(module, inp, out):\n if isinstance(module, nn.Conv2d):\n return compute_Conv2d_flops(module, inp, out)\n elif isinstance(module, nn.BatchNorm2d):\n return compute_BatchNorm2d_flops(module, inp, out)\n elif isinstance(module, (nn.AvgPool2d, nn.MaxPool2d)):\n return compute_Pool2d_flops(module, inp, out)\n elif isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU)):\n return compute_ReLU_flops(module, inp, out)\n elif isinstance(module, nn.Upsample):\n return compute_Upsample_flops(module, inp, out)\n elif isinstance(module, nn.Linear):\n return compute_Linear_flops(module, inp, out)\n # elif \"loss\" in module:\n # pass\n else:\n print(f\"[Flops]: {type(module).__name__} is not supported!\")\n return 0\n pass\n\n\ndef compute_Conv2d_flops(module, inp, out):\n # Can have multiple inputs, getting the first one\n assert isinstance(module, nn.Conv2d)\n assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())\n\n batch_size = inp.size()[0]\n in_c = inp.size()[1]\n k_h, k_w = module.kernel_size\n out_c, out_h, out_w = out.size()[1:]\n groups = module.groups\n\n filters_per_channel = out_c // groups\n conv_per_position_flops = k_h * k_w * in_c * filters_per_channel\n active_elements_count = batch_size * out_h * out_w\n\n total_conv_flops = conv_per_position_flops * active_elements_count\n\n bias_flops = 0\n if module.bias is not None:\n bias_flops = out_c * active_elements_count\n\n total_flops = total_conv_flops + bias_flops\n return total_flops\n\n\ndef compute_BatchNorm2d_flops(module, inp, out):\n assert isinstance(module, nn.BatchNorm2d)\n assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())\n in_c, in_h, in_w = inp.size()[1:]\n batch_flops = np.prod(inp.shape)\n if module.affine:\n batch_flops *= 2\n return batch_flops\n\n\ndef compute_ReLU_flops(module, inp, out):\n assert isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU))\n batch_size = inp.size()[0]\n active_elements_count = batch_size\n\n for s in inp.size()[1:]:\n active_elements_count *= s\n\n return active_elements_count\n\n\ndef compute_Pool2d_flops(module, inp, out):\n assert isinstance(module, nn.MaxPool2d) or isinstance(module, nn.AvgPool2d)\n assert len(inp.size()) == 4 and len(inp.size()) == len(out.size())\n return np.prod(inp.shape)\n\n\ndef compute_Linear_flops(module, inp, out):\n assert isinstance(module, nn.Linear)\n assert len(inp.size()) == 2 and len(out.size()) == 2\n batch_size = inp.size()[0]\n return batch_size * inp.size()[1] * out.size()[1]\n\ndef compute_Upsample_flops(module, inp, out):\n assert isinstance(module, nn.Upsample)\n output_size = out[0]\n batch_size = inp.size()[0]\n output_elements_count = batch_size\n for s in output_size.shape[1:]:\n output_elements_count *= s\n\n return output_elements_count\n"
] | [
[
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pshustov/DateTimeTools | [
"e542fd3f0e3c5290faad09b7cf8a2751132d4dd3"
] | [
"build/lib/DateTimeTools/WithinTimeRange.py"
] | [
"import numpy as np\nfrom ._CFunctions import _CWithinTimeRange\nfrom ._CTConv import _CTConv\n\ndef WithinTimeRange(Timet,Time0,Time1,BoolOut=False):\n\t'''\n\tPerforms a simple check on a test time (Timet) to see if it exists\n\tbetween Time0 and time1.\n\t\n\tInputs\n\t======\n\tTimet : tuple | float \n\t\tTest time - either a single floating point (array or \n\t\tscalar) to denote hours of the day, or a tuple containing \n\t\t(Date,Time).\n\tTime0 :\ttuple | float\n\t\tStart time, same format as above.\n\tTime1 : tuple | float\n\t\tEnd time, same format as above.\n\tBoolOut : boolean\n\t\tTrue by default, returns a boolean array with the same size as \n\t\tTimet, where eath element in the range Time0 to Time1 is true.\n\t\tWhen False, returns a list of indices within the time range.\n\t\t\n\tOutput\n\t======\n\tout : bool | int\n\t\tIf BoolOut == True boolean (array or scalar), True if within \n\t\ttime range.\n\t\tWhen BoolOut == False, an integer array of indices is returned.\n\t'''\n\tsh = np.shape(Timet)\n\ts0 = np.size(Time0)\n\ts1 = np.size(Time1)\n\t\n\tif s0 == 2:\n\t\tD0 = Time0[0]\n\t\tT0 = Time0[1]\n\telse:\n\t\tT0 = Time0\n\t\tD0 = 20000101\n\t\t\n\tif s1 == 2:\n\t\tD1 = Time1[0]\n\t\tT1 = Time1[1]\n\telse:\n\t\tT1 = Time1\n\t\tD1 = 20000101\t\n\t\n\t\n\tif sh[0] == 2 and np.size(sh) == 2:\n\t\t#hopefully this is a list of date and time\n\t\tD = np.array([Timet[0]]).flatten()\n\t\tT = np.array([Timet[1]]).flatten()\n\telse: \n\t\tT = np.array(Timet)\n\t\tD = np.zeros(T.size,dtype='int32') + 20000101\n\t\t\n\t#convert the dtypes for compatibility with the C++ code\n\t_n = _CTConv(np.size(D),'c_int')\n\t_Date = _CTConv(D,'c_int_ptr')\n\t_ut = _CTConv(T,'c_float_ptr')\n\t_Date0 = _CTConv(D0,'c_int')\n\t_ut0 = _CTConv(T0,'c_float')\n\t_Date1 = _CTConv(D1,'c_int')\n\t_ut1 = _CTConv(T1,'c_float')\n\t_ni = np.zeros(1,dtype='int32')\n\t_ind = np.zeros(_n,dtype='int32')\n\t\t\n\t\t\n\t#call the C++ code\n\t_CWithinTimeRange(_n,_Date,_ut,_Date0,_ut0,_Date1,_ut1,_ni,_ind)\n\t\n\t#reduce the side of the index array\n\t_ind = _ind[:_ni[0]]\n\n\t#either return the indices or the boolean array\n\tif BoolOut:\n\t\tout = np.zeros(_n,dtype='bool8')\n\t\tout[_ind] = True\n\t\treturn out\n\telse:\n\t\treturn _ind\n"
] | [
[
"numpy.size",
"numpy.array",
"numpy.shape",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liute62/NumCpp | [
"d6922b2b5e1f575021b0577aea1445e041ec7180"
] | [
"unitTests/testScripts/TestIntegrate.py"
] | [
"import numpy as np\nimport scipy.special as sp\nfrom termcolor import colored\nimport sys\nif sys.platform == 'linux':\n sys.path.append(r'../lib')\nelse:\n sys.path.append(r'../build/x64/Release')\nimport NumCpp\n\n\n####################################################################################\nNUM_DECIMALS_ROUND = 1\n\n\n####################################################################################\ndef doTest():\n print(colored('Testing Integration Module', 'magenta'))\n\n print(colored('Testing gauss_legendre', 'cyan'))\n numCoefficients = np.random.randint(2, 5, [1, ]).item()\n coefficients = np.random.randint(-20, 20, [numCoefficients, ])\n coefficientsC = NumCpp.NdArray(1, numCoefficients)\n coefficientsC.setArray(coefficients)\n poly = np.poly1d(np.flipud(coefficients), False)\n polyIntegral = poly.integ()\n polyC = NumCpp.Poly1d(coefficientsC, False)\n a, b = np.sort(np.random.rand(2) * 100 - 50)\n area = np.round(polyIntegral(b) - polyIntegral(a), NUM_DECIMALS_ROUND)\n areaC = np.round(NumCpp.integrate_gauss_legendre(polyC, a, b), NUM_DECIMALS_ROUND)\n if area == areaC:\n print(colored('\\tPASS', 'green'))\n else:\n print(area)\n print(areaC)\n print(colored('\\tFAIL', 'red'))\n\n print(colored('Testing romberg', 'cyan'))\n PERCENT_LEEWAY = 0.1\n numCoefficients = np.random.randint(2, 5, [1, ]).item()\n coefficients = np.random.randint(-20, 20, [numCoefficients, ])\n coefficientsC = NumCpp.NdArray(1, numCoefficients)\n coefficientsC.setArray(coefficients)\n poly = np.poly1d(np.flipud(coefficients), False)\n polyIntegral = poly.integ()\n polyC = NumCpp.Poly1d(coefficientsC, False)\n a, b = np.sort(np.random.rand(2) * 100 - 50)\n area = np.round(polyIntegral(b) - polyIntegral(a), NUM_DECIMALS_ROUND)\n areaC = np.round(NumCpp.integrate_romberg(polyC, a, b), NUM_DECIMALS_ROUND)\n # romberg is much less acurate so let's give it some leeway\n areaLow, areaHigh = np.sort([area * (1 - PERCENT_LEEWAY), area * (1 + PERCENT_LEEWAY)])\n if areaLow < areaC < areaHigh:\n print(colored('\\tPASS', 'green'))\n else:\n print(area)\n print(areaC)\n print(colored('\\tFAIL', 'red'))\n\n print(colored('Testing simpson', 'cyan'))\n numCoefficients = np.random.randint(2, 5, [1, ]).item()\n coefficients = np.random.randint(-20, 20, [numCoefficients, ])\n coefficientsC = NumCpp.NdArray(1, numCoefficients)\n coefficientsC.setArray(coefficients)\n poly = np.poly1d(np.flipud(coefficients), False)\n polyIntegral = poly.integ()\n polyC = NumCpp.Poly1d(coefficientsC, False)\n a, b = np.sort(np.random.rand(2) * 100 - 50)\n area = np.round(polyIntegral(b) - polyIntegral(a), NUM_DECIMALS_ROUND)\n areaC = np.round(NumCpp.integrate_simpson(polyC, a, b), NUM_DECIMALS_ROUND)\n if area == areaC:\n print(colored('\\tPASS', 'green'))\n else:\n print(area)\n print(areaC)\n print(colored('\\tFAIL', 'red'))\n\n print(colored('Testing trapazoidal', 'cyan'))\n numCoefficients = np.random.randint(2, 5, [1, ]).item()\n coefficients = np.random.randint(-20, 20, [numCoefficients, ])\n coefficientsC = NumCpp.NdArray(1, numCoefficients)\n coefficientsC.setArray(coefficients)\n poly = np.poly1d(np.flipud(coefficients), False)\n polyIntegral = poly.integ()\n polyC = NumCpp.Poly1d(coefficientsC, False)\n a, b = np.sort(np.random.rand(2) * 100 - 50)\n area = np.round(polyIntegral(b) - polyIntegral(a), NUM_DECIMALS_ROUND)\n areaC = np.round(NumCpp.integrate_trapazoidal(polyC, a, b), NUM_DECIMALS_ROUND)\n if area == areaC:\n print(colored('\\tPASS', 'green'))\n else:\n print(area)\n print(areaC)\n print(colored('\\tFAIL', 'red'))\n\n\n####################################################################################\nif __name__ == '__main__':\n doTest()\n"
] | [
[
"numpy.flipud",
"numpy.random.rand",
"numpy.sort",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vsegurar/DeepMSPeptide | [
"ab73f125b2297a7be01da3fa19a1c0b35c29d493"
] | [
"DeepMSPeptide/DeepMSPeptide.py"
] | [
"import warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\n\n\nparser = argparse.ArgumentParser(description='''Predicts the detectability of input peptides using a single dimension\n Convolutionar Neural Network, based on Tensorflow 1.13.1\n Requierements: Tensorflow 1.13.1''')\nparser.add_argument('infile', metavar='F', type=str, nargs='+',\n help='File containing the peptides to be predicted, one per line (max length= 81)')\nargs = parser.parse_args()\n\n\ndef load_pep_and_codify(file, max_len):\n aa_dict={'A':1,'R':2,'N':3,'D':4,'C':5,'Q':6,'E':7,'G':8,'H':9,'I':10,'L':11,'K':12,'M':13,'F':14,\n 'P':15,'O':16,'S':17,'U':18,'T':19,'W':20,'Y':21,'V':22}\n with open(file, 'r') as inf:\n lines = inf.read().splitlines()\n pep_codes=[]\n long_pep_counter = 0\n newLines = []\n for pep in lines:\n if not len(pep) > max_len:\n current_pep=[]\n for aa in pep:\n current_pep.append(aa_dict[aa])\n pep_codes.append(current_pep)\n newLines.extend([pep])\n else:\n long_pep_counter += 1\n predict_data = keras.preprocessing.sequence.pad_sequences(pep_codes, value=0, padding='post', maxlen=max_len)\n return predict_data, long_pep_counter, newLines\n\n\nprint('Loading model...')\nmodel_2_1D = keras.models.load_model('model_2_1D.h5')\n\nprint('Loading input peptides')\npredict_data, skipped, lines = load_pep_and_codify(args.infile[0], 81)\nprint('Succesfully loaded {0} peptides and skipped {1}'.format(len(lines), str(skipped)))\n\nprint('Making predictions')\nmodel_2_1D_pred = model_2_1D.predict(predict_data)\nmodel_2_1D_pred = np.hstack((np.array(lines).reshape(len(lines), 1),model_2_1D_pred)).tolist()\n\nPred_output = []\nfor pred in model_2_1D_pred:\n if float(pred[1]) > 0.5:\n # pred.extend('0')\n Pred_output.append([pred[0], str(1-float(pred[1])), '0'])\n else:\n Pred_output.append([pred[0], str(1-float(pred[1])), '1'])\n # pred.extend('1')\n\noutFile = '{0}_Predictions.txt'.format(args.infile[0].split('.')[0])\nprint('Saving predictions to file {}'.format(outFile))\nwith open(outFile, 'w') as outf:\n outf.write('Peptide\\tProb\\tDetectability\\n')\n outf.writelines('\\t'.join(i) + '\\n' for i in Pred_output)\n"
] | [
[
"tensorflow.keras.models.load_model",
"numpy.array",
"tensorflow.keras.preprocessing.sequence.pad_sequences"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
markhliu99/CAI | [
"b97f0831ee8ea6e2352fe4f25032fdd1019c1ba8"
] | [
"PythonControllerTemplates/src/py-ctrl.py"
] | [
"\"\"\"\npy-ctrl script\n1. generate problem PD file\n 1.1 save PD file in /inputfiles\n2. solve convex hull\n 2.1 save hull information in /output\n 2.2 show figure for 10 sec\n 2.3 save figure in /output\n\"\"\"\n\nimport os\nimport subprocess\nimport argparse\nimport matplotlib.pyplot as plt\n\n\nparser = argparse.ArgumentParser(description = 'Plot tradeoff')\n\n# parser.add_argument('-S', '--Solver', type = int, choices = [0, 1], default = 0, help = \"0: Cplex\\n1: Gurobi\")\n\nparser.add_argument('-P', '--Problem', type = int, choices = [1, 2, 3], default = 1, help=\" 1: Coded Caching\\n 2: Private Information Retrieval\\n 3: Symmetric Private Information Retrieval\")\n\nparser.add_argument('-N1', '--N1', type = int, choices = range(2, 10), default = 2, help = \"number of files in coded caching\")\nparser.add_argument('-K1', '--K1', type = int, choices = range(2, 10), default = 3, help = \"number of users in coded caching\")\n\nparser.add_argument('-N2', '--N2', type = int, choices = range(1, 10), default = 2, help = \"number of servers in private information retrieval\")\nparser.add_argument('-K2', '--K2', type = int, choices = range(1, 10), default = 2, help = \"number of files in private information retrieval\")\n\nparser.add_argument('-N3', '--N3', type = int, choices = range(1, 10), default = 2, help = \"number of servers in symmetric private information retrieval\")\nparser.add_argument('-K3', '--K3', type = int, choices = range(1, 10), default = 2, help = \"number of files in symmetric private information retrieval\")\n\n\nparser.add_argument('-IP', '--InPt', type = str, help = \"list of achievable points, e.g. \\\"(1,1);(1.25,0.85)\\\"\", default=None)\n\n\nif __name__ == \"__main__\":\n # directory of CAI repository\n cai = os.path.dirname(os.path.abspath(__file__)) + \"/../../\"\n\n #### HERE\n # You might need to change these lines:\n # 1. directory of solver\n SOLVER = cai + \"CplexCompute/cplexcompute.out\" \n # 2. duration of the convex hull figure pausing in sec\n PAUSE = 10\n\n # read args\n args = parser.parse_args()\n\n # generate PD file\n print(\"Genearte PD file\")\n if args.Problem == 1:\n from gen_pd_cache import gen_pd_cache\n fn = gen_pd_cache(args.N1, args.K1)\n title = \"Coded Caching with {} files and {} users\".format(args.N1, args.K1)\n xlabel = \"Storage\"\n ylabel = \"Download\"\n name = \"cache{}x{}\".format(args.N1, args.K1)\n \n elif args.Problem == 2:\n from gen_pd_pir import gen_pd_pir\n fn = gen_pd_pir(args.N2, args.K2)\n xlabel = \"Storage\"\n ylabel = \"Download\"\n title = \"Private Information Retrieval with {} servers and {} files\".format(args.N2, args.K2)\n name = \"PIR{}x{}\".format(args.N2, args.K2)\n\n elif args.Problem == 3:\n from gen_pd_spir import gen_pd_spir\n fn = gen_pd_spir(args.N3, args.K3)\n xlabel = \"Storage\"\n ylabel = \"Download\"\n title = \"Symmetric Private Information Retrieval with {} servers and {} files\".format(args.N3, args.K3)\n name = \"SPIR{}x{}\".format(args.N3, args.K3)\n \n # Solve PD\n print()\n print(\"Solve the convex hull\")\n if not os.path.exists(cai + 'PlotTradeoff/output'):\n os.makedirs(cai + 'PlotTradeoff/output')\n print('Open ' + cai + 'PlotTradeoff/output/Hull_' + name + '.txt for details')\n if os.path.exists(cai + 'PlotTradeoff/output/Hull_' + name + '.txt') and os.path.exists(cai + 'PlotTradeoff/output/Fig_' + name + '.eps'):\n print(\"file \" + cai + \"PlotTradeoff/output/Hull_\" + name + \".txt already exists\")\n print(\"Overwrite[y/n]:\", end=\"\")\n if input() == \"y\":\n pass\n else:\n with open(cai + 'PlotTradeoff/output/Hull_' + name + '.txt', 'w') as fout:\n subprocess.run([SOLVER, fn, \"hull\"], stdout=fout, text=True)\n \"\"\"\n if args.Solver == 0:\n subprocess.run([cai + \"CplexCompute/cplexcompute.out\", fn, \"hull\"], stdout=fout, text=True)\n else:\n subprocess.run([cai + \"GurobiCompute/gurobicompute.out\", fn, \"hull\"], stdout=fout, text=True)\n \"\"\"\n with open(cai + 'PlotTradeoff/output/Hull_' + name + '.txt', 'r') as fout:\n res = fout.read()\n\n # capture the points on the hull\n res = res[res.find(\"List of found points on the hull:\\n\"):-1].split(\"\\n\")[1: -1]\n points = []\n for p in res:\n points.append(tuple(map(float, p[1: -2].split(', '))))\n \n # plot region\n points = sorted(points, key=lambda x: x[0])\n width = points[0][1] - points[-1][1]\n plt.plot(*zip(*points), label = \"Outer Bounds\")\n if args.InPt != None:\n InPt = []\n for p in args.InPt.split(\";\"):\n InPt.append(tuple(map(float, p[1: -1].split(','))))\n plt.plot(*zip(*InPt), 'o', label = \"Achievable Points\")\n plt.ylim(points[-1][1]- 0.01 * width, points[0][1] + 0.01 * width)\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n plt.title(title)\n plt.savefig(cai + 'PlotTradeoff/output/Fig_' + name + '.eps', format='eps')\n plt.legend()\n plt.show(block=False)\n plt.pause(PAUSE)\n plt.close()\n\n print(\"Figure \" + cai + \"PlotTradeoff/output/Fig_\" + name + '.eps')"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tcavazos2/hw2-Clustering | [
"60b536729ba03dcb28384be99e1575c3c3c0fe7e"
] | [
"hw2skeleton/cluster.py"
] | [
"from .utils import Atom, Residue, ActiveSite\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom .helpers import *\nfrom Bio import pairwise2\nimport rmsd\nfrom sklearn.decomposition import PCA\nimport networkx as nx\nfrom networkx.drawing.nx_agraph import graphviz_layout\n\ndef compute_similarity(site_a, site_b):\n \"\"\"\n Compute the similarity between two given ActiveSite instances.\n\n Input: two ActiveSite instances\n Output: the similarity between them (a floating point number)\n \"\"\"\n # Get strings of single letter aa residues\n s_a = output_aa_string(site_a.residues)\n s_b = output_aa_string(site_b.residues)\n \n # Align strings using local alignment algorithm which relies\n # on dynamic programming to compute all possible alignments and\n # returns the highest scoring alignment. \n \n # Local alignment aims to find the max alignment for substrings\n # of two larger strings.\n # Matches = +1\n # Mismatches, gaps = +0\n \n alignments = pairwise2.align.localxx(s_a, s_b) # perform alignment\n if len(alignments) == 0: return float(\"inf\") # return INF if no alignment found\n align_a, align_b, s = alignments[0][:3] # extract first alignment\n \n # Output indices where nucleotides in alignment match\n inds_a, inds_b = match(align_a, align_b)\n \n if len(inds_a) < 2: return float(\"inf\")\n \n # Create matrix of coordinates for atom CA\n V = create_coord_matrix(site_a, inds_a)\n W = create_coord_matrix(site_b, inds_b)\n \n # Center and rotate Ca matrices then calculate Root-Mean-Square-Deviation (RMSD)\n # It measures the average distance between backbone atoms of two\n # superimposed proteins.\n\n # The greater the RMSD, the less similar the proteins are.\n # A RMSD equal to 0 represents identical proteins.\n\n # Each protein is a matrix containing x, y, and z coordinates for each CA atom\n # The rows of the two matrices are matching residues obtained from the alignment\n\n # To minimize RMSD you must first center the coordinates on the origin so the\n # two vectors can be near each other.\n V -= rmsd.centroid(V)\n W -= rmsd.centroid(W)\n\n # Then find the optimal rotation for matrix W that aligns it best with V\n # This is the Kabasch algorithm which works by calculating a covariance matrix\n # and then finding the singular value decomposition (SVD) of the cov. matrix\n # Last, find the optimal rotation matrix which is the dot product of V and W\n # optimized by lowest RMSD\n return rmsd.kabsch_rmsd(V,W)\n\ndef output_similarity_matrix(active_sites):\n \"\"\"\n Calculate RMSD for all pairwise active sites. This distance measure\n is converted into a similarity metric by dividing by the max element and\n subtracting 1\n\n Input: list of active sites from PDB files\n Output: similarity matrix for active sites\n \"\"\"\n # Create empty pairwise matrix \n mat = np.empty([len(active_sites), len(active_sites)])\n # For every pair calculate the RMSD\n for (x,y), value in np.ndenumerate(mat):\n mat[x][y] = compute_similarity(active_sites[x], active_sites[y])\n # Infinite values means proteins had less than 3 similar amino acids, set to none\n mat[np.isinf(mat)] = None\n # Find max value in array for normalization\n max_val = np.nanmax(mat)\n # Make none values max value\n mat[np.isnan(mat)] = max_val\n # Get normalized dissimilarity matrix\n norm_mat = mat/max_val\n # Convert dissimilarity matrix to similarity by subtracting 1\n norm_mat_sim = 1 - norm_mat\n return norm_mat_sim\n\ndef cluster_by_partitioning(active_sites,k):\n \"\"\"\n Cluster a given set of ActiveSite instances using a partitioning method.\n\n Input: a list of ActiveSite instances\n Output: a clustering of ActiveSite instances\n (this is really a list of clusters, each of which is list of\n ActiveSite instances)\n \"\"\"\n cost_max = float(\"-inf\")\n mat = output_similarity_matrix(active_sites)\n \n # randomly choose k medoids\n centers = initialize_k_mediods(mat, k)\n # assign elements to cluster medoid with max similarity\n clusters = assign_k_clusters(mat, centers)\n # calculate cost of clustering (sum of similarity of points to cluster)\n cost = calculate_cost(mat, centers, clusters)\n # iterate until cost does not increase\n while cost_max < cost:\n cost_max = cost\n # Loop through medoids and all elements not in medoids\n for i in range(0, len(centers)):\n m = centers[i]\n for o in range(len(active_sites)):\n if o != m:\n # replace medoid with element and re-calculate clusters\n # and cost\n centers[i] = o\n clusters_temp = assign_k_clusters(mat, centers)\n cost_swap = calculate_cost(mat, centers, clusters_temp)\n # if cost increases then replace clusters\n if cost_swap > cost: \n cost = cost_swap\n clusters = clusters_temp\n # if cost decreases or stays the same leave center\n else: centers[i] = m\n return output_cluster_list(active_sites, clusters)\n\ndef cluster_hierarchically(active_sites,k):\n \"\"\"\n Cluster the given set of ActiveSite instances using a hierarchical algorithm. #\n\n Input: a list of ActiveSite instances\n Output: a list of clusterings\n (each clustering is a list of lists of Sequence objects)\n \"\"\"\n # Create similarity matrix\n mat_original = output_similarity_matrix(active_sites)\n mat = output_similarity_matrix(active_sites)\n # Fill diagonals with -infinity \n np.fill_diagonal(mat, float(\"-inf\"))\n \n # Create cluster array to keep track of number of clusters\n vals = [np.array([v]) for v in range(len(active_sites))]\n keys = np.arange(0,len(active_sites))\n clusters = dict(zip(keys, vals))\n all_clusters = []\n\n all_clusters.append(output_cluster_list(active_sites, clusters.values()))\n # Group the most similar elements until you only have one more cluster\n while len(clusters) > k:\n # Get most similar clusters\n i,j = np.unravel_index(mat.argmax(), mat.shape)\n # Get two clusters\n c_i = clusters.get(i)\n c_j = clusters.get(j)\n # Add new combined cluster\n c_new = list(clusters.keys())[-1]+1\n clusters[c_new] = np.append(c_i, c_j)\n \n # Add new row/column to similarity matrix\n new_dist = dist_HC(active_sites, clusters,c_new, mat_original)\n new_col = np.append(new_dist, float(\"-inf\"))\n mat = np.vstack([mat, new_dist])\n mat = np.column_stack([mat, new_col])\n # Replace row/column with negative infinitys that correspond to \n # most similar elements\n mat[i], mat[j] = float(\"-inf\"), float(\"-inf\")\n mat[:,j], mat[:,i] = float(\"-inf\"), float(\"-inf\")\n # Drop most similar elements from cluster\n clusters.pop(i)\n clusters.pop(j)\n all_clusters.append(output_cluster_list(active_sites, clusters.values()))\n return all_clusters\n"
] | [
[
"numpy.nanmax",
"numpy.isnan",
"numpy.append",
"numpy.column_stack",
"numpy.ndenumerate",
"numpy.array",
"numpy.isinf",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ferenctorok/potential_field_planner | [
"7a9f7ae70a91523cc6d42029f869f9020cc1ea35",
"7a9f7ae70a91523cc6d42029f869f9020cc1ea35"
] | [
"gradplanner/controller/low_level_controller.py",
"tests/attractor_field_tests.py"
] | [
"import numpy as np\n\n\nclass LowLevelController:\n \"\"\"Low level controller of a point mass robot with dynamics:\n\n x_{k+1} = x_k + v_k * Ts * cos(psi_k)\n y_{k+1} = y_k + v_k * Ts * sin(psi_k)\n v_{k+1} = v_k + Ts * a_k\n psi_{k+1} = psi_k + Ts * omega_k\n omega_{k+1} = omega_k + Ts * epsilon_k\n\n Where a_k and epsilon_k are the inputs and are the translational and rotational\n accelerations respectively.\n\n For now we assume, that it is a perfect controller which is able to produce\n the exact commanded outputs if they are reachable with the provided\n input constraints.\n \"\"\"\n\n def __init__(self,\n params):\n \"\"\"Initializes a LowLevelController.\"\"\"\n\n self._init_from_params(params)\n\n \n def get_inputs(self, state, cmd_vel):\n \"\"\"produces control inputs based on the actual state and the commanded\n velocities in cmd_vel = np.array([v_des, omega_des])\"\"\"\n\n v_des = cmd_vel[0]\n omega_des = cmd_vel[1]\n v_k = state[2]\n omega_k = state[4]\n\n # translational acceleration:\n a_k = (v_des - v_k) / self._Ts\n if a_k > self._acc_max:\n a_k = self._acc_max\n elif a_k < self._acc_min:\n a_k = self._acc_min\n\n # angular acceleration:\n epsilon_k = (omega_des - omega_k) / self._Ts\n if epsilon_k > self._epsilon_max:\n a_epsilon_kk = self._epsilon_max\n elif epsilon_k < self._epsilon_min:\n epsilon_k = self._epsilon_min\n\n return np.array([a_k, epsilon_k])\n\n\n def _init_from_params(self, params):\n \"\"\"Initializes some variables from the params.\"\"\"\n\n self._Ts = params[\"general\"][\"Ts\"]\n self._acc_min = params[\"LowLevelController\"][\"acc_min\"]\n self._acc_max = params[\"LowLevelController\"][\"acc_max\"]\n self._epsilon_min = params[\"LowLevelController\"][\"epsilon_min\"]\n self._epsilon_max = params[\"LowLevelController\"][\"epsilon_max\"]\n",
"import unittest2\nimport numpy as np\n\nfrom gradplanner.planner.attractor_field import AttractorField\nfrom gradplanner.planner.utils import array_is_in_list\nfrom gradplanner.planner.field_utils import get_values_from_field\n\n\nclass AttractorFieldTests(unittest2.TestCase):\n \"\"\"Tests of the AttractorField class.\"\"\"\n\n def setUp(self):\n \"\"\"Sets up the tests.\"\"\"\n self.N, self.M = 10, 12\n self.occupancy_grid = np.ones((self.N, self.M))\n self.occupancy_grid[1: -1, 1: -1] = 0\n self.goal = np.array([5, 5])\n\n \n def test_init(self):\n \"\"\"Tests the __init__() function of the AttractorField\"\"\"\n # testing without args:\n field = AttractorField()\n self.assertIsNone(field._occupancy_grid)\n self.assertIsNone(field._goal)\n\n # testing with provided occupancy_grid:\n field = AttractorField(occupancy_grid=self.occupancy_grid)\n self.assertTrue((field._occupancy_grid == self.occupancy_grid).all())\n self.assertIsNone(field._goal)\n self.assertEqual((self.N, self.M), field._grid_shape)\n self.assertTrue((np.array([self.N, self.M]) == field._grid_shape_arr).all())\n\n # testing with provided goal:\n field = AttractorField(goal=self.goal)\n self.assertIsNone(field._occupancy_grid)\n self.assertTrue((field._goal == self.goal).all())\n\n # testing with provided occupancy grid and goal:\n field = AttractorField(occupancy_grid=self.occupancy_grid, goal=self.goal)\n self.assertTrue((field._occupancy_grid == self.occupancy_grid).all())\n self.assertTrue((field._goal == self.goal).all())\n self.assertEqual((self.N, self.M), field._grid_shape)\n self.assertTrue((np.array([self.N, self.M]) == field._grid_shape_arr).all())\n self.assertIsNotNone(field._field)\n\n\n def test_init_field(self):\n \"\"\"Tests the _init_field method of the AttractorField\"\"\"\n # testing without args:\n field = AttractorField()\n with self.assertRaises(AssertionError):\n field._init_field()\n with self.assertRaises(AttributeError):\n field._field\n\n # testing with provided occupancy grid:\n field = AttractorField(occupancy_grid=self.occupancy_grid)\n with self.assertRaises(AssertionError):\n field._init_field()\n with self.assertRaises(AttributeError):\n field._field\n\n # testing with provided goal:\n field = AttractorField(goal=self.goal)\n with self.assertRaises(AssertionError):\n field._init_field()\n with self.assertRaises(AttributeError):\n field._field\n\n # testing with everything provided:\n field = AttractorField(occupancy_grid=self.occupancy_grid, goal=self.goal)\n field._init_field()\n self.assertIsNotNone(field._field)\n\n\n def test_set_new_goal(self):\n \"\"\"Tests the set_new_goal method of the AttractorField\"\"\"\n\n field = AttractorField()\n # checking assertion errors:\n with self.assertRaises(AssertionError):\n field.set_new_goal(np.array([1, 2, 3]))\n field.set_new_goal(np.array([[1, 1]]))\n \n # checking goal setting without occupancy_grid:\n field.set_new_goal(self.goal)\n self.assertTrue((field._goal == self.goal).all())\n self.assertIsNone(field._occupancy_grid)\n\n # checking goal setting with occupancy grid:\n field = AttractorField()\n field._occupancy_grid = self.occupancy_grid\n field.set_new_goal(self.goal)\n self.assertTrue((field._goal == self.goal).all())\n self.assertIsNotNone(field._field)\n\n\n def test_update_occupancy_grid(self):\n \"\"\"Tests the update_occupancy_grid method of the AttractorField\"\"\"\n # testing without original occupancy grid:\n field = AttractorField(goal=self.goal)\n field.update_field(self.occupancy_grid)\n self.assertTrue((field._occupancy_grid == self.occupancy_grid).all())\n self.assertEqual((self.N, self.M), field._grid_shape)\n self.assertTrue((np.array([self.N, self.M]) == field._grid_shape_arr).all())\n\n # testing with original occupancy grid:\n field = AttractorField(occupancy_grid=self.occupancy_grid, goal=self.goal)\n\n # test wrong shape assertion:\n with self.assertRaises(AssertionError):\n field.update_field(np.zeros((self.N - 1, self.M)))\n\n # check if nothing has changed:\n new_grid = self.occupancy_grid.copy()\n field.update_field(new_grid)\n self.assertTrue((field._occupancy_grid == new_grid).all())\n with self.assertRaises(AttributeError):\n a = field._changed_indices\n\n # check if something has changed:\n new_grid[5, 5] = 1\n new_grid[0, 3] = 0\n etalon_changes = np.sort(np.array([[5, 5], [0, 3]]), axis=0)\n field.update_field(new_grid)\n changes = np.sort(np.array(field._changed_indices), axis=0)\n self.assertTrue((field._occupancy_grid == new_grid).all())\n self.assertEqual(len(field._changed_indices), 2)\n i = 0\n for ind in changes:\n self.assertTrue((ind == etalon_changes[i]).all())\n i += 1\n\n\n def test_list_expandable_indices(self):\n \"\"\"Tests the _list_expandable_indices method of the AttractorField\"\"\"\n \n field = AttractorField(occupancy_grid=self.occupancy_grid, goal=self.goal)\n field._changed_indices = [np.array([0, 5]), np.array([5, 5])]\n # expected list:\n etalon_indices = [np.array([1, 5]), np.array([4, 5]), np.array([6, 5]), np.array([5, 4]), np.array([5, 6])]\n\n # run function:\n indices = field._list_expandable_indices()\n\n # check if the list members are the same:\n self.assertEqual(len(etalon_indices), len(indices))\n for index in etalon_indices:\n self.assertTrue(array_is_in_list(index, indices))\n\n # check the order of the list:\n for i, index in enumerate(indices[: -1]):\n next_index = indices[i + 1]\n value = field._field[index[0], index[1]].value\n next_value = field._field[next_index[0], next_index[1]].value\n self.assertTrue(value >= next_value)\n\n\n def test_update_pixel(self):\n \"\"\"Tests the _update_pixel method of the AttractorField\"\"\"\n\n field = AttractorField(occupancy_grid=self.occupancy_grid, goal=self.goal)\n # setting some pixels:\n for ind in [(6, 5), (6, 6), (5, 6), (4, 6), (4, 5), (4, 4), (6, 4)]:\n field._field[ind].value = -3\n field._field[5, 4].value = -2\n\n # the pixel value is already bigger:\n field._field[5, 5].value = -1\n new_pix = field._field[5, 5]\n new_pix = field._update_pixel(new_pix)\n self.assertEqual(new_pix.value, -1)\n self.assertTrue((new_pix.grad == np.array([0, 0])).all())\n\n # the pixel value has to be changed:\n field._field[5, 5].value = -4\n new_pix = field._field[5, 5]\n new_pix = field._update_pixel(new_pix)\n self.assertEqual(new_pix.value, -3)\n self.assertTrue((new_pix.grad == np.array([0, -1])).all())\n\n\n def test_expand_pixel(self):\n \"\"\"Tests the _expand_pixel method of the AttractorField\"\"\"\n\n # if expanding again any pixels without changing their values, nothing should change:\n # field1 is left as it was and field2 is modified.\n field1 = AttractorField(occupancy_grid=self.occupancy_grid, goal=self.goal)\n field2 = AttractorField(occupancy_grid=self.occupancy_grid, goal=self.goal)\n\n for ind in [[1, 1], [5, 5], [3, 7], [8, 1], [8, 4]]:\n field2._expand_pixel(np.array(ind))\n for i in range(self.N):\n for j in range(self.M):\n self.assertEqual(field1._field[i, j].value, field2._field[i, j].value)\n self.assertTrue((field1._field[i, j].grad == field2._field[i, j].grad).all())\n\n \n ### placing an obstacle and then removing it from field2: ###\n occ_grid = self.occupancy_grid.copy()\n occ_grid[7, 5] = 1\n field1 = AttractorField(occupancy_grid=occ_grid, goal=self.goal)\n field2 = AttractorField(occupancy_grid=occ_grid, goal=self.goal)\n\n field2._field[7, 5].value = 0\n field2._expand_pixel(np.array([6, 5]))\n\n # indices where the value did not change:\n for ind in [(5, 5), (6, 5), (6, 6), (7, 6), (8, 6), (9, 6), (6, 4), (7, 4), (8, 4), (9, 4), (9, 5)]:\n self.assertEqual(field1._field[ind].value, field2._field[ind].value)\n\n # indices where the gradient did not change:\n for ind in [(5, 5), (6, 5), (6, 6), (7, 7), (8, 7), (9, 7), (6, 4), (7, 3), (8, 3), (9, 3)]:\n self.assertEqual(field1._field[ind].value, field2._field[ind].value)\n self.assertTrue((field1._field[ind].grad == field2._field[ind].grad).all())\n\n # indices where the value has changed:\n self.assertEqual(field1._field[6, 5].value - 1, field2._field[7, 5].value)\n self.assertEqual(field1._field[8, 5].value + 2, field2._field[8, 5].value)\n\n # some indices with different gradients:\n self.assertTrue((field2._field[7, 5].grad == np.array([-1, 0])).all())\n self.assertTrue((field2._field[8, 5].grad == np.array([-1, 0])).all())\n self.assertTrue((field2._field[7, 6].grad == np.array([-1 / np.sqrt(2), -1 / np.sqrt(2)])).all())\n self.assertTrue((field2._field[8, 6].grad == np.array([-1 / np.sqrt(2), -1 / np.sqrt(2)])).all())\n self.assertTrue((field2._field[7, 4].grad == np.array([-1 / np.sqrt(2), 1 / np.sqrt(2)])).all())\n self.assertTrue((field2._field[8, 4].grad == np.array([-1 / np.sqrt(2), 1 / np.sqrt(2)])).all())\n\n ### insterting an extra obstacle: ###\n # TODO\n\n\n def test_update_occupancy_grid(self):\n \"\"\"Tests the update_occupancy_grid method of the AttractorField\"\"\"\n\n # testing without original occupancy grid:\n field = AttractorField(goal=self.goal)\n field.update_occupancy_grid(self.occupancy_grid)\n self.assertTrue((field._occupancy_grid == self.occupancy_grid).all())\n self.assertEqual((self.N, self.M), field._grid_shape)\n self.assertTrue((np.array([self.N, self.M]) == field._grid_shape_arr).all())\n\n # testing with original occupancy grid:\n field = AttractorField(occupancy_grid=self.occupancy_grid, goal=self.goal)\n\n # test wrong shape assertion:\n with self.assertRaises(AssertionError):\n field.update_occupancy_grid(np.zeros((self.N - 1, self.M)))\n\n # check if nothing has changed:\n new_grid = self.occupancy_grid.copy()\n field.update_occupancy_grid(new_grid)\n self.assertTrue((field._occupancy_grid == new_grid).all())\n with self.assertRaises(AttributeError):\n a = field._changed_indices\n\n # check if something has changed:\n new_grid[5, 5] = 1\n new_grid[0, 3] = 0\n etalon_changes = np.sort(np.array([[5, 5], [0, 3]]), axis=0)\n field.update_occupancy_grid(new_grid)\n changes = np.sort(np.array(field._changed_indices), axis=0)\n self.assertTrue((field._occupancy_grid == new_grid).all())\n self.assertEqual(len(field._changed_indices), 2)\n i = 0\n for ind in changes:\n self.assertTrue((ind == etalon_changes[i]).all())\n i += 1\n\n #########################################################\n # Test with changes in the occupancy grid\n\n goal = np.array([3, 3])\n occ_grid_no_obst = self.occupancy_grid.copy()\n # occupancy grid with an U shaped obstacle:\n occ_grid_with_obst = self.occupancy_grid.copy()\n occ_grid_with_obst[6, 4: 7] = 1\n occ_grid_with_obst[7, 4] = 1\n occ_grid_with_obst[7, 6] = 1\n\n # testing the insertion of new obstacle:\n field1 = AttractorField(occupancy_grid=occ_grid_with_obst, goal=goal)\n field2 = AttractorField(occupancy_grid=occ_grid_no_obst, goal=goal)\n field2.update_occupancy_grid(occ_grid_with_obst)\n\n # testing the values:\n result_vals1 = get_values_from_field(field1._field)\n result_vals2 = get_values_from_field(field2._field)\n self.assertTrue((result_vals1 == result_vals2).all())\n\n # testing the grads:\n for i in range(self.N):\n for j in range(self.M):\n self.assertTrue((field1._field[i, j].grad == field2._field[i, j].grad).all())\n\n # testing when the obstacle dissappears:\n field1 = AttractorField(occupancy_grid=occ_grid_no_obst, goal=goal)\n field2 = AttractorField(occupancy_grid=occ_grid_with_obst, goal=goal)\n field2.update_occupancy_grid(occ_grid_no_obst)\n\n # testing the values:\n result_vals1 = get_values_from_field(field1._field)\n result_vals2 = get_values_from_field(field2._field)\n self.assertTrue((result_vals1 == result_vals2).all())\n\n # testing the grads:\n for i in range(self.N):\n for j in range(self.M):\n self.assertTrue((field1._field[i, j].grad == field2._field[i, j].grad).all())\n\n\nif __name__ == \"__main__\":\n unittest2.main()"
] | [
[
"numpy.array"
],
[
"numpy.array",
"numpy.zeros",
"numpy.sqrt",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DPaletti/mida_acv | [
"9b492adaf75ce24c94dfa6993c5757e1bb96c700"
] | [
"src/mida_acv/utilities.py"
] | [
"from pathlib import Path\nfrom typing import Tuple, List, Dict\n\nimport pandas as pd\nimport numpy as np\nfrom tsfresh.utilities.dataframe_functions import roll_time_series\n\n\ndef get_path(df: pd.DataFrame) -> np.array:\n out = []\n for index, row in df.iterrows():\n out.append((row[\"Latitude\"], row[\"Longitude\"]))\n return np.array(out)\n\n\ndef write_to_csv(path: str, data: Dict[str, List[pd.DataFrame]]) -> None:\n full_path: Path\n for k, v in data.items():\n full_path = Path(path).joinpath(k[: k.find(\"-\")], k[k.find(\"-\") + 1 :])\n full_path.mkdir(parents=True, exist_ok=True)\n for index, df in enumerate(v):\n df.to_csv(full_path.joinpath(\"timeseries-\" + str(index) + \".csv\").open(\"w\"))\n\n\ndef to_tsfresh(data_path: str) -> Tuple[pd.DataFrame, pd.Series, pd.Series]:\n df = pd.DataFrame()\n weight_series = pd.Series()\n drivers_series = pd.Series()\n temp_df: pd.DataFrame\n # ident: str = \"\"\n i: int = 0\n for placement in {\"deck\", \"stem\"}:\n for driver_number in {\"single\", \"double\"}:\n for ds in Path(data_path).joinpath(placement, driver_number).iterdir():\n temp_df = pd.read_csv(str(ds))\n weight = temp_df[\"Weight\"][0]\n # ident = placement + \"_\" + driver_number + \"_\" + temp_df[\"Driver\"][0]\n temp_df = temp_df.assign(id=i)\n temp_df = temp_df.drop(\n [\"Unnamed: 0\", \"Driver\", \"Weight\", \"Placement\"], axis=1\n )\n df = df.append(temp_df)\n weight_series.loc[i] = weight\n drivers_series.loc[i] = 0 if driver_number == \"single\" else 1\n i += 1\n return df.fillna(0), weight_series, drivers_series\n\n\ndef window_df(df: pd.DataFrame):\n return roll_time_series(\n df, column_id=\"id\", column_sort=\"Timestamp\", column_kind=None\n )\n\n\ndef align(signal_1: np.array, signal_2: np.array):\n # Standardization\n signal_1 = (signal_1 - np.mean(signal_1)) / np.std(signal_1)\n signal_2 = (signal_2 - np.mean(signal_2)) / np.std(signal_2)\n\n # Cross-Correlation\n correlation = np.correlate(signal_1, signal_2, \"full\")\n center = len(correlation) - min(len(signal_1), len(signal_1))\n max_position = correlation.argmax()\n phase = np.abs(center - max_position)\n if phase == 0:\n reversed_correlation_signal = correlation[::-1]\n max_position_reversed = reversed_correlation_signal.argmax()\n phase_reversed = np.abs(center - max_position_reversed)\n phase = np.max([phase, phase_reversed])\n return signal_1, signal_2[phase:]\n"
] | [
[
"numpy.abs",
"pandas.Series",
"pandas.DataFrame",
"numpy.max",
"numpy.std",
"numpy.mean",
"numpy.array",
"numpy.correlate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
by-liu/calibration-framework | [
"7b306e4bbe6361d411b209759b7ba3d016bd0d17"
] | [
"netcal/scaling/LogisticCalibration.py"
] | [
"# Copyright (C) 2019-2021 Ruhr West University of Applied Sciences, Bottrop, Germany\n# AND Elektronische Fahrwerksysteme GmbH, Gaimersheim Germany\n#\n# This Source Code Form is subject to the terms of the Apache License 2.0\n# If a copy of the APL2 was not distributed with this\n# file, You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.txt.\n\nfrom collections import OrderedDict\nfrom typing import Union\n\nimport numpy as np\nimport torch\nimport torch.distributions.constraints as constraints\nimport pyro\nimport pyro.distributions as dist\n\nfrom netcal.scaling import AbstractLogisticRegression\n\n\nclass LogisticCalibration(AbstractLogisticRegression):\n \"\"\"\n On classification, apply the logistic calibration method aka Platt scaling to obtain a\n calibration mapping. This method is originally proposed by [1]_.\n For the multiclass case, we use the Vector scaling proposed in [2]_.\n On detection mode, this calibration method uses multiple independent normal distributions to obtain a\n calibration mapping by means of the confidence as well as additional features [3]_. This calibration scheme\n assumes independence between all variables.\n\n On detection, it is necessary to provide all data in input parameter ``X`` as an NumPy array\n of shape ``(n_samples, n_features)``,\n whereas the confidence must be the first feature given in the input array. The ground-truth samples ``y``\n must be an array of shape ``(n_samples,)`` consisting of binary labels :math:`y \\\\in \\\\{0, 1\\\\}`. Those\n labels indicate if the according sample has matched a ground truth box :math:`\\\\text{m}=1` or is a false\n prediction :math:`\\\\text{m}=0`.\n\n **Mathematical background:** For confidence calibration in classification tasks, a\n confidence mapping :math:`g` is applied on top of a miscalibrated scoring classifier :math:`\\\\hat{p} = h(x)` to\n deliver a calibrated confidence score :math:`\\\\hat{q} = g(h(x))`.\n\n For detection calibration, we can also use the additional box regression output which we denote as\n :math:`\\\\hat{r} \\\\in [0, 1]^J` with :math:`J` as the number of dimensions used for the box encoding (e.g.\n :math:`J=4` for x position, y position, width and height).\n Therefore, the calibration map is not only a function of the confidence score, but also of :math:`\\\\hat{r}`.\n To define a general calibration map for binary problems, we use the logistic function and the combined\n input :math:`s = (\\\\hat{p}, \\\\hat{r})` of size K by\n\n .. math::\n\n g(s) = \\\\frac{1}{1 + \\\\exp(-z(s))} ,\n\n According to [1]_, we can interpret the logit :math:`z` as the logarithm of the posterior odds\n\n .. math::\n\n z(s) = \\\\log \\\\frac{f(\\\\text{m}=1 | s)}{f(\\\\text{m}=0 | s)} \\\\approx\n \\\\log \\\\frac{f(s | \\\\text{m}=1)}{f(s | \\\\text{m}=1)} = \\\\ell r(s)\n\n If we assume independence of all variables given in :math:`s`, we can use multiple univariate probability\n density distributions with the same variance to obtain a calibration mapping. Using this formulation, we can\n simply extend the scaling factor (from classification logistic calibration) to a scaling\n vector :math:`w \\\\in \\\\mathbb{R}^K`.\n However, instead of using the uncalibrated confidence estimate :math:`\\\\hat{p}`, we use the logit of the\n network as part of :math:`s` to be conform with the original formulation in [1]_ and [2]_. Thus,\n the log-likelihood ratio can be expressed as\n\n .. math::\n \\\\ell r(s) = s^T w + c,\n\n with bias :math:`c \\\\in \\\\mathbb{R}`.\n We utilize standard optimization methods to determine the calibration mapping :math:`g(s)`.\n\n Parameters\n ----------\n temperature_only : bool, default: False\n If True, use Temperature Scaling instead of Platt/Vector Scaling.\n method : str, default: \"mle\"\n Method that is used to obtain a calibration mapping:\n - 'mle': Maximum likelihood estimate without uncertainty using a convex optimizer.\n - 'momentum': MLE estimate using Momentum optimizer for non-convex optimization.\n - 'variational': Variational Inference with uncertainty.\n - 'mcmc': Markov-Chain Monte-Carlo sampling with uncertainty.\n momentum_epochs : int, optional, default: 1000\n Number of epochs used by momentum optimizer.\n mcmc_steps : int, optional, default: 20\n Number of weight samples obtained by MCMC sampling.\n mcmc_chains : int, optional, default: 1\n Number of Markov-chains used in parallel for MCMC sampling (this will result\n in mcmc_steps * mcmc_chains samples).\n mcmc_warmup_steps : int, optional, default: 100\n Warmup steps used for MCMC sampling.\n vi_epochs : int, optional, default: 1000\n Number of epochs used for ELBO optimization.\n detection : bool, default: False\n If False, the input array 'X' is treated as multi-class confidence input (softmax)\n with shape (n_samples, [n_classes]).\n If True, the input array 'X' is treated as a box predictions with several box features (at least\n box confidence must be present) with shape (n_samples, [n_box_features]).\n independent_probabilities : bool, optional, default: False\n Boolean for multi class probabilities.\n If set to True, the probability estimates for each\n class are treated as independent of each other (sigmoid).\n use_cuda : str or bool, optional, default: False\n Specify if CUDA should be used. If str, you can also specify the device\n number like 'cuda:0', etc.\n\n References\n ----------\n .. [1] Platt, John:\n \"Probabilistic outputs for support vector machines and comparisons to regularized likelihood methods.\"\n Advances in large margin classifiers 10.3: 61-74, 1999\n `Get source online <https://www.researchgate.net/profile/John_Platt/publication/2594015_Probabilistic_Outputs_for_Support_Vector_Machines_and_Comparisons_to_Regularized_Likelihood_Methods/links/004635154cff5262d6000000.pdf>`_\n\n .. [2] Chuan Guo, Geoff Pleiss, Yu Sun and Kilian Q. Weinberger:\n \"On Calibration of Modern Neural Networks.\"\n Proceedings of the 34th International Conference on Machine Learning-Volume 70. JMLR. org, 2017.\n `Get source online <https://arxiv.org/abs/1706.04599>`_\n\n .. [3] Fabian Küppers, Jan Kronenberger, Amirhossein Shantia and Anselm Haselhoff:\n \"Multivariate Confidence Calibration for Object Detection.\"\n The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops.\n\n .. [4] Fabian Küppers, Jan Kronenberger, Jonas Schneider and Anselm Haselhoff:\n \"Bayesian Confidence Calibration for Epistemic Uncertainty Modelling.\"\n 2021 IEEE Intelligent Vehicles Symposium (IV), 2021\n \"\"\"\n\n def __init__(self, *args, temperature_only: bool = False, **kwargs):\n \"\"\" Create an instance of `LogisticCalibration`. Detailed parameter description given in class docs. \"\"\"\n\n super().__init__(*args, **kwargs)\n self.temperature_only = temperature_only\n\n # -------------------------------------------------\n\n @property\n def intercept(self) -> Union[np.ndarray, float]:\n \"\"\" Getter for intercept of logistic calibration. \"\"\"\n if self._sites is None:\n raise ValueError(\"Intercept is None. You have to call the method 'fit' first.\")\n\n if self.temperature_only:\n raise ValueError(\"There is no intercept for temperature scaling.\")\n\n return self._sites['bias']['values']\n\n @property\n def weights(self) -> Union[np.ndarray, float]:\n \"\"\" Getter for weights of logistic calibration. \"\"\"\n if self._sites is None:\n raise ValueError(\"Weights is None. You have to call the method 'fit' first.\")\n\n return self._sites['weights']['values']\n\n # -------------------------------------------------\n\n def prepare(self, X: np.ndarray) -> torch.Tensor:\n \"\"\"\n Preprocessing of input data before called at the beginning of the fit-function.\n\n Parameters\n ----------\n X : np.ndarray, shape=(n_samples, [n_classes]) or (n_samples, [n_box_features])\n NumPy array with confidence values for each prediction on classification with shapes\n 1-D for binary classification, 2-D for multi class (softmax).\n On detection, this array must have 2 dimensions with number of additional box features in last dim.\n\n Returns\n -------\n torch.Tensor\n Prepared data vector X as torch tensor.\n \"\"\"\n\n if len(X.shape) == 1:\n X = np.reshape(X, (-1, 1))\n\n # on detection mode, convert confidence to sigmoid and append the remaining features\n if self.detection:\n data_input = np.concatenate((self._inverse_sigmoid(X[:, 0]).reshape(-1, 1), X[:, 1:]), axis=1)\n\n # on binary classification, simply convert the confidences to logits\n elif self._is_binary_classification():\n data_input = self._inverse_sigmoid(X)\n\n # on multiclass classification, use inverse softmax instead\n else:\n data_input = self._inverse_softmax(X)\n\n return torch.Tensor(data_input)\n\n def prior(self):\n \"\"\"\n Prior definition of the weights used for log regression. This function has to set the\n variables 'self.weight_prior_dist', 'self.weight_mean_init' and 'self.weight_stddev_init'.\n \"\"\"\n\n self._sites = OrderedDict()\n\n # on temperature scaling, we only have one single weight for all classes\n if self.temperature_only:\n self._sites['weights'] = {\n 'values': None,\n 'constraint': constraints.real,\n 'init': {\n 'mean': torch.ones(1),\n 'scale': torch.ones(1)\n },\n 'prior': dist.Normal(torch.ones(1), 10 * torch.ones(1), validate_args=True)\n }\n\n else:\n\n # on detection mode or binary classification, we have a weight for each given feature (one for binary\n # classification) and bias\n if self.detection or self._is_binary_classification():\n num_bias = 1\n num_weights = self.num_features\n\n # on multiclass classification, we have one weight and one bias for each class separately\n else:\n num_bias = self.num_classes\n num_weights = self.num_classes\n\n # set properties for \"weights\"\n self._sites['weights'] = {\n 'values': None,\n 'constraint': constraints.real,\n 'init': {\n 'mean': torch.ones(num_weights),\n 'scale': torch.ones(num_weights)\n },\n 'prior': dist.Normal(torch.ones(num_weights), 10 * torch.ones(num_weights), validate_args=True),\n }\n\n # set properties for \"bias\"\n self._sites['bias'] = {\n 'values': None,\n 'constraint': constraints.real,\n 'init': {\n 'mean': torch.zeros(num_bias),\n 'scale': torch.ones(num_bias)\n },\n 'prior': dist.Normal(torch.zeros(num_bias), 10 * torch.ones(num_bias), validate_args=True),\n }\n\n def model(self, X: torch.Tensor = None, y: torch.Tensor = None) -> torch.Tensor:\n \"\"\"\n Definition of the log regression model.\n\n Parameters\n ----------\n X : torch.Tensor, shape=(n_samples, n_log_regression_features)\n Input data that has been prepared by \"self.prepare\" function call.\n y : torch.Tensor, shape=(n_samples, [n_classes])\n Torch tensor with ground truth labels.\n Either as label vector (1-D) or as one-hot encoded ground truth array (2-D) (for multiclass MLE only).\n\n Returns\n -------\n torch.Tensor, shape=(n_samples, [n_classes])\n Logit of the log regression model.\n \"\"\"\n\n # sample from prior - on MLE, this weight will be set as conditional\n weights = pyro.sample(\"weights\", self._sites[\"weights\"][\"prior\"])\n\n if self.temperature_only:\n bias = 0.\n else:\n bias = pyro.sample(\"bias\", self._sites[\"bias\"][\"prior\"])\n\n # on detection or binary classification, use dot product to sum up all given features to one logit\n if self.detection or self._is_binary_classification():\n\n # we need squeeze to remove last (unnecessary) dim to avoid site-effects\n # temperature scaling: sinlge scalar\n if self.temperature_only:\n def logit_op(x, w, b): return torch.squeeze(torch.sum(torch.mul(x, w), dim=1))\n\n # platt scaling: one weight for each feature given\n else:\n weights = torch.reshape(weights, (-1, 1))\n def logit_op(x, w, b): return torch.squeeze(torch.matmul(x, w) + b)\n\n # define as probabilistic output the sigmoid and a bernoulli distribution\n prob_op = torch.sigmoid\n dist_op = dist.Bernoulli\n\n else:\n\n # the op for calculating the logit is an element-wise multiplication\n # for vector scaling and to keep multinomial output\n def logit_op(x, w, b): return torch.mul(x, w) + b\n\n # define as probabilistic output the softmax and a categorical distribution\n def prob_op(logit): return torch.softmax(logit, dim=1)\n dist_op = dist.Categorical\n\n # the first dimension of the given input data is the \"independent\" sample dimension\n with pyro.plate(\"data\", X.shape[0]):\n\n # calculate logit\n logit = logit_op(X, weights, bias)\n\n # if MLE, (slow) sampling is not necessary. However, this is needed for 'variational' and 'mcmc'\n if self.method in ['variational', 'mcmc']:\n probs = prob_op(logit)\n pyro.sample(\"obs\", dist_op(probs=probs, validate_args=True), obs=y)\n\n return logit\n"
] | [
[
"torch.softmax",
"torch.ones",
"torch.Tensor",
"torch.zeros",
"numpy.reshape",
"torch.reshape",
"torch.matmul",
"torch.mul"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
manzt/bioimage-latency-benchmark | [
"134a368f90cdf38532723f621e1766f31e2d3214"
] | [
"notebooks/chunks.py"
] | [
"#!/usr/bin/env python\nimport argparse\nimport math\n\nimport matplotlib.pyplot as plt\n\n\ndef file_count(shape, chunkXY, chunkZ=1, chunkT=1, chunkC=1):\n t, c, z, y, x = shape\n return (\n math.ceil(x / chunkXY)\n * math.ceil(y / chunkXY)\n * math.ceil(z / chunkZ)\n * math.ceil(t / chunkT)\n * math.ceil(c / chunkC)\n )\n\n\ndef plot(ax, twoD=True, font=16):\n if twoD:\n shape = (1, 8, 1, 2 ** 16, 2 ** 16)\n chunkSizesXY = [32, 1024]\n chunkSizesOther = (1, 2, 4, 8)\n else:\n shape = (100, 1, 1024, 1024, 1024)\n chunkSizesXY = (16, 32, 64, 128)\n chunkSizesOther = (1, 10, 100)\n\n ax.set_ylabel(\"Number of chunks\")\n ax.set_yscale(\"log\")\n ax.set_xscale(\"log\")\n ax.set(xlim=(10, 2 * 10 ** 3), ylim=(10, 10 ** 8))\n\n if twoD:\n ax.set_xlabel(\"Chunk size (X and Y)\")\n ax.set_title(\"XYZCT: (64k, 64k, 1, 8, 1)\")\n chunkDim = \"C\"\n annTitle = \"Chosen chunk size:\\n(256, 256, 1, 1, 1)\"\n xy = ((256), file_count(shape, 256))\n else:\n ax.set_xlabel(\"Chunk size (XYZ)\")\n ax.set_title(\"XYZCT: (1k, 1k, 1k, 1, 100)\")\n chunkDim = \"T\"\n annTitle = \"Chosen chunk size:\\n(32, 32, 32, 1, 1)\"\n xy = ((32), file_count(shape, 32, chunkZ=32))\n\n for item in (\n [ax.title, ax.xaxis.label, ax.yaxis.label]\n + ax.get_xticklabels()\n + ax.get_yticklabels()\n ):\n item.set_fontsize(font)\n\n styles = [\"solid\", \"dashed\", \"dashdot\", \"dotted\"]\n for whichChunk, chunkOther in enumerate(chunkSizesOther):\n numFiles = []\n fileSize = []\n for i in chunkSizesXY:\n if twoD:\n count = file_count(shape, i, **{f\"chunk{chunkDim}\": chunkOther})\n else:\n # Could be simpler\n count = file_count(\n shape, i, chunkZ=i, **{f\"chunk{chunkDim}\": chunkOther}\n )\n numFiles.append(count)\n fileSize.append(i)\n ax.plot(\n fileSize,\n numFiles,\n linewidth=0.5,\n label=f\"{chunkOther}\",\n linestyle=styles.pop(0),\n )\n\n ax.annotate(\n annTitle,\n xy=xy,\n xycoords=\"data\",\n xytext=(0, 40),\n textcoords=\"offset points\",\n arrowprops=dict(facecolor=\"black\", shrink=0.05),\n horizontalalignment=\"left\",\n verticalalignment=\"center\",\n fontsize=font - 4,\n )\n leg = ax.legend(\n loc=\"lower left\",\n title=f\"Chunk size ({chunkDim})\",\n frameon=False,\n prop={\"size\": font},\n )\n for legobj in leg.legendHandles:\n legobj.set_linewidth(0.5)\n\n for axis in [\"top\", \"bottom\", \"left\", \"right\"]:\n ax.spines[axis].set_linewidth(0.5)\n\n return fig\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"filename\")\n ns = parser.parse_args()\n # fig = plt.figure()\n # ax2D = fig.add_subplot(2, 1, 1)\n # ax3D = fig.add_subplot(2, 1, 2)\n\n fig, ax = plt.subplots(1, 2, figsize=(12, 5))\n plot(ax[1], False)\n plot(ax[0], True)\n\n plt.savefig(ns.filename)\n"
] | [
[
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
H43RO/examples | [
"54acd5f38d6368a29208b231e5028f16d18c954b"
] | [
"tensorflow_examples/lite/model_maker/core/task/model_spec/audio_spec.py"
] | [
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Audio model specification.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport collections\nimport csv\nimport io\nimport os\nimport tempfile\n\nimport tensorflow as tf\nfrom tensorflow_examples.lite.model_maker.core.api.api_util import mm_export\nfrom tensorflow_examples.lite.model_maker.core.task import model_util\nimport tensorflow_hub as hub\n\ntry:\n from tflite_support.metadata_writers import audio_classifier as md_writer # pylint: disable=g-import-not-at-top\n from tflite_support.metadata_writers import metadata_info as md_info # pylint: disable=g-import-not-at-top\n from tflite_support.metadata_writers import writer_utils # pylint: disable=g-import-not-at-top\n ENABLE_METADATA = True\nexcept ImportError:\n ENABLE_METADATA = False\n\n\nclass MetadataWriter:\n \"\"\"Helper class to populate Audio Metadata, to be used in `with` statement.\n\n Simple usage for model with two classification heads.\n\n with MetadataWriter(tflite_path) as writer:\n writer.add_input(sample_rate=16000, channels=1)\n writer.add_output(name='animal_sound', labels=['dog', 'cat'])\n writer.add_output(name='speech_command', labels=['yes', 'no'])\n writer.save(tflite_path, json_filepath)\n\n `add_output` can also take an ordered dict for multiple locales, example:\n\n writer.add_output(name='animal_sound', labels=collections.OrderedDict([\n ('en', ['bird', 'cat']),\n ('fr', ['oiseau', 'chat'])\n ]))\n \"\"\"\n\n def __init__(self, tflite_filepath, **kwargs):\n self._model = writer_utils.load_file(tflite_filepath)\n self._general_md = md_info.GeneralMd(**kwargs)\n self._inputs = []\n self._outputs = []\n\n def __enter__(self):\n self._temp_folder = tempfile.TemporaryDirectory()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self._temp_folder.cleanup()\n # Delete the attribute so that it errors out if not in `with` statement.\n delattr(self, '_temp_folder')\n\n def add_input(self, **kwargs):\n \"\"\"Add metadta for the input tensor.\"\"\"\n self._inputs.append(md_info.InputAudioTensorMd(**kwargs))\n\n def add_output(self, name, labels, **kwargs):\n \"\"\"Add metadata for output tensor in order.\"\"\"\n if isinstance(labels, list):\n default_locale = None\n labels = collections.OrderedDict([(default_locale, labels)])\n return self.add_output(name, labels, **kwargs)\n\n label_files = []\n if isinstance(labels, collections.OrderedDict):\n for locale, label_list in labels.items():\n full_path = os.path.join(\n self._temp_folder.name,\n '{}_labels_{}.txt'.format(name, locale or 'default'))\n model_util.export_labels(full_path, label_list)\n label_files.append(\n md_info.LabelFileMd(file_path=full_path, locale=locale))\n else:\n raise ValueError(\n '`labels` should be either a list of labels or an ordered dict mapping `locale` -> list of labels. got: {}'\n .format(labels))\n\n idx = len(self._outputs)\n self._outputs.append(\n md_info.ClassificationTensorMd(\n name=name,\n label_files=label_files,\n tensor_type=writer_utils.get_output_tensor_types(self._model)[idx],\n **kwargs))\n\n def save(self, tflite_filepath=None, json_filepath=None):\n \"\"\"Persist model with metadata.\"\"\"\n if len(self._inputs) > 1:\n raise ValueError('Only supports single input, got {}'.format(\n len(self._inputs)))\n input_md = self._inputs[0]\n\n writer = md_writer.MetadataWriter.create_from_metadata_info_for_multihead(\n model_buffer=self._model,\n general_md=self._general_md,\n input_md=input_md,\n output_md_list=self._outputs)\n if tflite_filepath:\n writer_utils.save_file(writer.populate(), tflite_filepath, mode='wb')\n if json_filepath:\n writer_utils.save_file(\n writer.get_metadata_json(), json_filepath, mode='wt')\n\n\ndef _ensure_tf25(version):\n if version < '2.5':\n raise RuntimeError(\n 'Audio Tasks requires TF2.5 or later. For example, you can run the '\n 'following command to install TF2.5.0rc2:\\n\\n'\n 'pip3 install tensorflow==2.5.0rc2\\n\\n')\n\n\ndef _get_tf_version():\n return tf.__version__\n\n\nclass BaseSpec(abc.ABC):\n \"\"\"Base model spec for audio classification.\"\"\"\n\n def __init__(self, model_dir=None, strategy=None):\n _ensure_tf25(_get_tf_version())\n self.model_dir = model_dir\n if not model_dir:\n self.model_dir = tempfile.mkdtemp()\n tf.compat.v1.logging.info('Checkpoints are stored in %s', self.model_dir)\n self.strategy = strategy or tf.distribute.get_strategy()\n\n @abc.abstractproperty\n def target_sample_rate(self):\n pass\n\n @abc.abstractmethod\n def create_model(self, num_classes, train_whole_model=False):\n pass\n\n @abc.abstractmethod\n def run_classifier(self, model, epochs, train_ds, validation_ds, **kwargs):\n pass\n\n def preprocess_ds(self, ds, is_training=False, cache_fn=None):\n \"\"\"Returns a preprocessed dataset.\"\"\"\n _ = is_training\n _ = cache_fn\n return ds\n\n def get_default_quantization_config(self):\n \"\"\"Gets the default quantization configuration.\"\"\"\n return None\n\n\ndef _remove_suffix_if_possible(text, suffix):\n return text.rsplit(suffix, 1)[0]\n\n\nTFJS_MODEL_ROOT = 'https://storage.googleapis.com/tfjs-models/tfjs'\n\n\ndef _load_browser_fft_preprocess_model():\n \"\"\"Load a model replicating WebAudio's AnalyzerNode.getFloatFrequencyData.\"\"\"\n model_name = 'sc_preproc_model'\n file_extension = '.tar.gz'\n filename = model_name + file_extension\n # Load the preprocessing model, which transforms audio waveform into\n # spectrograms (2D image-like representation of sound).\n # This model replicates WebAudio's AnalyzerNode.getFloatFrequencyData\n # (https://developer.mozilla.org/en-US/docs/Web/API/AnalyserNode/getFloatFrequencyData).\n # It performs short-time Fourier transform (STFT) using a length-2048 Blackman\n # window. It opeartes on mono audio at the 44100-Hz sample rate.\n filepath = tf.keras.utils.get_file(\n filename,\n f'{TFJS_MODEL_ROOT}/speech-commands/conversion/{filename}',\n cache_subdir='model_maker',\n extract=True)\n model_path = _remove_suffix_if_possible(filepath, file_extension)\n return tf.keras.models.load_model(model_path)\n\n\ndef _load_tfjs_speech_command_model():\n \"\"\"Download TFJS speech command model for fine-tune.\"\"\"\n origin_root = f'{TFJS_MODEL_ROOT}/speech-commands/v0.3/browser_fft/18w'\n files_to_download = [\n 'metadata.json', 'model.json', 'group1-shard1of2', 'group1-shard2of2'\n ]\n for filename in files_to_download:\n filepath = tf.keras.utils.get_file(\n filename,\n f'{origin_root}/{filename}',\n cache_subdir='model_maker/tfjs-sc-model')\n model_path = os.path.join(os.path.dirname(filepath), 'model.json')\n return model_util.load_tfjs_keras_model(model_path)\n\n\n@mm_export('audio_classifier.BrowserFftSpec')\nclass BrowserFFTSpec(BaseSpec):\n \"\"\"Model good at detecting speech commands, using Browser FFT spectrum.\"\"\"\n\n EXPECTED_WAVEFORM_LENGTH = 44032\n\n # Information used to populate TFLite metadata.\n _MODEL_NAME = 'AudioClassifier'\n _MODEL_DESCRIPTION = ('Identify the most prominent type in the audio clip '\n 'from a known set of categories.')\n\n _MODEL_VERSION = 'v1'\n _MODEL_AUTHOR = 'TensorFlow Lite Model Maker'\n _MODEL_LICENSES = ('Apache License. Version 2.0 '\n 'http://www.apache.org/licenses/LICENSE-2.0.')\n\n _SAMPLE_RATE = 44100\n _CHANNELS = 1\n\n _INPUT_NAME = 'audio_clip'\n _INPUT_DESCRIPTION = 'Input audio clip to be classified.'\n\n _OUTPUT_NAME = 'probability'\n _OUTPUT_DESCRIPTION = 'Scores of the labels respectively.'\n\n def __init__(self, model_dir=None, strategy=None):\n \"\"\"Initialize a new instance for BrowserFFT spec.\n\n Args:\n model_dir: The location to save the model checkpoint files.\n strategy: An instance of TF distribute strategy. If none, it will use the\n default strategy (either SingleDeviceStrategy or the current scoped\n strategy.\n \"\"\"\n super(BrowserFFTSpec, self).__init__(model_dir, strategy)\n self._preprocess_model = _load_browser_fft_preprocess_model()\n self._tfjs_sc_model = _load_tfjs_speech_command_model()\n\n @property\n def target_sample_rate(self):\n return 44100\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[None], dtype=tf.float32),\n tf.TensorSpec([], dtype=tf.int32)\n ])\n def _ensure_length(self, wav, unused_label):\n return len(wav) >= self.EXPECTED_WAVEFORM_LENGTH\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[None], dtype=tf.float32),\n tf.TensorSpec([], dtype=tf.int32)\n ])\n def _split(self, wav, label):\n \"\"\"Split the long audio samples into multiple trunks.\"\"\"\n # wav shape: (audio_samples, )\n chunks = tf.math.floordiv(len(wav), self.EXPECTED_WAVEFORM_LENGTH)\n unused = tf.math.floormod(len(wav), self.EXPECTED_WAVEFORM_LENGTH)\n # Drop unused data\n wav = wav[:len(wav) - unused]\n # Split the audio sample into multiple chunks\n wav = tf.reshape(wav, (chunks, 1, self.EXPECTED_WAVEFORM_LENGTH))\n\n return wav, tf.repeat(tf.expand_dims(label, 0), len(wav))\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[1, EXPECTED_WAVEFORM_LENGTH], dtype=tf.float32),\n tf.TensorSpec([], dtype=tf.int32)\n ])\n def _preprocess(self, x, label):\n \"\"\"Preprocess the dataset to extract the spectrum.\"\"\"\n # x has shape (1, EXPECTED_WAVEFORM_LENGTH)\n spectrum = self._preprocess_model(x)\n # y has shape (1, embedding_len)\n spectrum = tf.squeeze(spectrum, axis=0)\n # y has shape (embedding_len,)\n return spectrum, label\n\n def preprocess_ds(self, ds, is_training=False, cache_fn=None):\n del is_training\n\n autotune = tf.data.AUTOTUNE\n ds = ds.filter(self._ensure_length)\n ds = ds.map(self._split, num_parallel_calls=autotune).unbatch()\n ds = ds.map(self._preprocess, num_parallel_calls=autotune)\n if cache_fn:\n ds = cache_fn(ds)\n return ds\n\n def create_model(self, num_classes, train_whole_model=False):\n if num_classes <= 1:\n raise ValueError(\n 'AudioClassifier expects `num_classes` to be greater than 1')\n model = tf.keras.Sequential()\n for layer in self._tfjs_sc_model.layers[:-1]:\n model.add(layer)\n model.add(\n tf.keras.layers.Dense(\n name='classification_head', units=num_classes,\n activation='softmax'))\n if not train_whole_model:\n # Freeze all but the last layer of the model. The last layer will be\n # fine-tuned during transfer learning.\n for layer in model.layers[:-1]:\n layer.trainable = False\n return model\n\n def run_classifier(self, model, epochs, train_ds, validation_ds, **kwargs):\n model.compile(\n optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])\n\n hist = model.fit(\n train_ds, validation_data=validation_ds, epochs=epochs, **kwargs)\n return hist\n\n def create_serving_model(self, training_model):\n \"\"\"Create a model for serving.\"\"\"\n combined = tf.keras.Sequential()\n combined.add(self._preprocess_model)\n combined.add(training_model)\n # Build the model.\n combined.build([None, self.EXPECTED_WAVEFORM_LENGTH])\n return combined\n\n def _export_metadata(self, tflite_filepath, index_to_label,\n export_metadata_json_file):\n \"\"\"Export TFLite metadata.\"\"\"\n with MetadataWriter(\n tflite_filepath,\n name=self._MODEL_NAME,\n description=self._MODEL_DESCRIPTION,\n version=self._MODEL_VERSION,\n author=self._MODEL_AUTHOR,\n licenses=self._MODEL_LICENSES) as writer:\n writer.add_input(\n name=self._INPUT_NAME,\n description=self._INPUT_DESCRIPTION,\n sample_rate=self._SAMPLE_RATE,\n channels=self._CHANNELS)\n\n writer.add_output(\n labels=index_to_label,\n name=self._OUTPUT_NAME,\n description=self._OUTPUT_DESCRIPTION)\n\n json_filepath = (os.path.splitext(tflite_filepath)[0] +\n '.json') if export_metadata_json_file else None\n writer.save(tflite_filepath, json_filepath)\n\n def export_tflite(self,\n model,\n tflite_filepath,\n with_metadata=True,\n export_metadata_json_file=True,\n index_to_label=None):\n \"\"\"Converts the retrained model to tflite format and saves it.\n\n This method overrides the default `CustomModel._export_tflite` method, and\n include the pre-processing in the exported TFLite library since support\n library can't handle audio tasks yet.\n\n Args:\n model: An instance of the keras classification model to be exported.\n tflite_filepath: File path to save tflite model.\n with_metadata: Whether the output tflite model contains metadata.\n export_metadata_json_file: Whether to export metadata in json file. If\n True, export the metadata in the same directory as tflite model.Used\n only if `with_metadata` is True.\n index_to_label: A list that map from index to label class name.\n \"\"\"\n combined = self.create_serving_model(model)\n\n # Sets batch size from None to 1 when converting to tflite.\n model_util.set_batch_size(model, batch_size=1)\n\n model_util.export_tflite(\n combined, tflite_filepath, quantization_config=None)\n\n # Sets batch size back to None to support retraining later.\n model_util.set_batch_size(model, batch_size=None)\n\n if with_metadata:\n if not ENABLE_METADATA:\n print('Writing Metadata is not support in the installed tflite-support '\n 'version. Please use tflite-support >= 0.2.*')\n else:\n self._export_metadata(tflite_filepath, index_to_label,\n export_metadata_json_file)\n\n\n@mm_export('audio_classifier.YamNetSpec')\nclass YAMNetSpec(BaseSpec):\n \"\"\"Model good at detecting environmental sounds, using YAMNet embedding.\"\"\"\n\n EXPECTED_WAVEFORM_LENGTH = 15600 # effectively 0.975s\n EMBEDDING_SIZE = 1024\n\n # Information used to populate TFLite metadata.\n _MODEL_NAME = 'yamnet/classification'\n _MODEL_DESCRIPTION = 'Recognizes sound events'\n _MODEL_VERSION = 'v1'\n _MODEL_AUTHOR = 'TensorFlow Lite Model Maker'\n _MODEL_LICENSES = ('Apache License. Version 2.0 '\n 'http://www.apache.org/licenses/LICENSE-2.0.')\n\n _SAMPLE_RATE = 16000\n _CHANNELS = 1\n\n _INPUT_NAME = 'audio_clip'\n _INPUT_DESCRIPTION = 'Input audio clip to be classified.'\n\n _YAMNET_OUTPUT_NAME = 'yamnet'\n _YAMNET_OUTPUT_DESCRIPTION = ('Scores in range 0..1.0 for each of the 521 '\n 'output classes.')\n\n _CUSTOM_OUTPUT_NAME = 'custom'\n _CUSTOM_OUTPUT_DESCRIPTION = (\n 'Scores in range 0..1.0 for each output classes.')\n\n def __init__(\n self,\n model_dir: None = None,\n strategy: None = None,\n yamnet_model_handle='https://tfhub.dev/google/yamnet/1',\n frame_length=EXPECTED_WAVEFORM_LENGTH, # Window size 0.975 s\n frame_step=EXPECTED_WAVEFORM_LENGTH // 2, # Hop of 0.975 /2 s\n keep_yamnet_and_custom_heads=True):\n \"\"\"Initialize a new instance for YAMNet spec.\n\n Args:\n model_dir: The location to save the model checkpoint files.\n strategy: An instance of TF distribute strategy. If none, it will use the\n default strategy (either SingleDeviceStrategy or the current scoped\n strategy.\n yamnet_model_handle: Path of the TFHub model for retrining.\n frame_length: The number of samples in each audio frame. If the audio file\n is shorter than `frame_length`, then the audio file will be ignored.\n frame_step: The number of samples between two audio frames. This value\n should be bigger than `frame_length`.\n keep_yamnet_and_custom_heads: Boolean, decides if the final TFLite model\n contains both YAMNet and custom trained classification heads. When set\n to False, only the trained custom head will be preserved.\n \"\"\"\n super(YAMNetSpec, self).__init__(model_dir, strategy)\n self._yamnet_model_handle = yamnet_model_handle\n self._yamnet_model = hub.load(yamnet_model_handle)\n self._frame_length = frame_length\n self._frame_step = frame_step\n self._keep_yamnet_and_custom_heads = keep_yamnet_and_custom_heads\n\n @property\n def target_sample_rate(self):\n return self._SAMPLE_RATE\n\n def create_model(self, num_classes, train_whole_model=False):\n model = tf.keras.Sequential([\n tf.keras.layers.InputLayer(\n input_shape=(YAMNetSpec.EMBEDDING_SIZE),\n dtype=tf.float32,\n name='embedding'),\n tf.keras.layers.Dense(\n num_classes, name='classification_head', activation='softmax')\n ])\n return model\n\n def run_classifier(self, model, epochs, train_ds, validation_ds, **kwargs):\n model.compile(\n optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])\n\n hist = model.fit(\n train_ds, validation_data=validation_ds, epochs=epochs, **kwargs)\n return hist\n\n # Annotate the TF function with input_signature to avoid re-tracing. Otherwise\n # the TF function gets retraced everytime the input shape is changed.\n # Check https://www.tensorflow.org/api_docs/python/tf/function#args_1 for more\n # information.\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[None], dtype=tf.float32),\n tf.TensorSpec([], dtype=tf.int32)\n ])\n def _frame(self, wav, label):\n clips = tf.signal.frame(\n wav, frame_length=self._frame_length, frame_step=self._frame_step)\n batch_labels = tf.repeat(tf.expand_dims(label, 0), len(clips))\n\n return clips, batch_labels\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[None], dtype=tf.float32),\n tf.TensorSpec([], dtype=tf.int32)\n ])\n def _extract_embedding(self, wav, label):\n _, embeddings, _ = self._yamnet_model(wav) # (chunks, EMBEDDING_SIZE)\n embedding = tf.reduce_mean(embeddings, axis=0)\n return embedding, label\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[EMBEDDING_SIZE], dtype=tf.float32),\n tf.TensorSpec([], dtype=tf.int32)\n ])\n def _add_noise(self, embedding, label):\n noise = tf.random.normal(\n embedding.shape, mean=0.0, stddev=.2, dtype=tf.dtypes.float32)\n return noise + embedding, label\n\n def preprocess_ds(self, ds, is_training=False, cache_fn=None):\n autotune = tf.data.AUTOTUNE\n ds = ds.map(self._frame, num_parallel_calls=autotune).unbatch()\n ds = ds.map(self._extract_embedding, num_parallel_calls=autotune)\n\n # Cache intermediate results right before data augmentation.\n if cache_fn:\n ds = cache_fn(ds)\n\n if is_training:\n ds = ds.map(self._add_noise, num_parallel_calls=autotune)\n return ds\n\n def _yamnet_labels(self):\n class_map_path = self._yamnet_model.class_map_path().numpy()\n class_map_csv_text = tf.io.read_file(class_map_path).numpy().decode('utf-8')\n class_map_csv = io.StringIO(class_map_csv_text)\n class_names = [\n display_name for (class_index, mid,\n display_name) in csv.reader(class_map_csv)\n ]\n class_names = class_names[1:] # Skip CSV header\n return class_names\n\n def _export_metadata(self, tflite_filepath, index_to_label,\n export_metadata_json_file):\n \"\"\"Export TFLite metadata.\"\"\"\n with MetadataWriter(\n tflite_filepath,\n name=self._MODEL_NAME,\n description=self._MODEL_DESCRIPTION,\n version=self._MODEL_VERSION,\n author=self._MODEL_AUTHOR,\n licenses=self._MODEL_LICENSES) as writer:\n writer.add_input(\n name=self._INPUT_NAME,\n description=self._INPUT_DESCRIPTION,\n sample_rate=self._SAMPLE_RATE,\n channels=self._CHANNELS)\n\n if self._keep_yamnet_and_custom_heads:\n writer.add_output(\n labels=self._yamnet_labels(),\n name=self._YAMNET_OUTPUT_NAME,\n description=self._YAMNET_OUTPUT_DESCRIPTION)\n\n writer.add_output(\n labels=index_to_label,\n name=self._CUSTOM_OUTPUT_NAME,\n description=self._CUSTOM_OUTPUT_DESCRIPTION)\n\n json_filepath = (os.path.splitext(tflite_filepath)[0] +\n '.json') if export_metadata_json_file else None\n writer.save(tflite_filepath, json_filepath)\n\n def create_serving_model(self, training_model):\n \"\"\"Create a model for serving.\"\"\"\n embedding_extraction_layer = hub.KerasLayer(\n self._yamnet_model_handle, trainable=False)\n keras_input = tf.keras.Input(\n shape=(YAMNetSpec.EXPECTED_WAVEFORM_LENGTH,),\n dtype=tf.float32,\n name='audio') # (1, wav)\n reshaped_input = tf.reshape(keras_input,\n (YAMNetSpec.EXPECTED_WAVEFORM_LENGTH,)) # (wav)\n\n scores, embeddings, _ = embedding_extraction_layer(reshaped_input)\n serving_outputs = training_model(embeddings)\n\n if self._keep_yamnet_and_custom_heads:\n serving_model = tf.keras.Model(keras_input, [scores, serving_outputs])\n else:\n serving_model = tf.keras.Model(keras_input, serving_outputs)\n\n return serving_model\n\n def export_tflite(self,\n model,\n tflite_filepath,\n with_metadata=True,\n export_metadata_json_file=True,\n index_to_label=None):\n \"\"\"Converts the retrained model to tflite format and saves it.\n\n This method overrides the default `CustomModel._export_tflite` method, and\n include the spectrom extraction in the model.\n\n The exported model has input shape (1, number of wav samples)\n\n Args:\n model: An instance of the keras classification model to be exported.\n tflite_filepath: File path to save tflite model.\n with_metadata: Whether the output tflite model contains metadata.\n export_metadata_json_file: Whether to export metadata in json file. If\n True, export the metadata in the same directory as tflite model. Used\n only if `with_metadata` is True.\n index_to_label: A list that map from index to label class name.\n \"\"\"\n serving_model = self.create_serving_model(model)\n\n # TODO(b/164229433): Remove SELECT_TF_OPS once changes in the bug are\n # released.\n model_util.export_tflite(\n serving_model, tflite_filepath, quantization_config=None)\n\n if with_metadata:\n if not ENABLE_METADATA:\n print('Writing Metadata is not support in the current tflite-support '\n 'version. Please use tflite-support >= 0.2.*')\n else:\n self._export_metadata(tflite_filepath, index_to_label,\n export_metadata_json_file)\n"
] | [
[
"tensorflow.signal.frame",
"tensorflow.keras.models.load_model",
"tensorflow.keras.Input",
"tensorflow.reduce_mean",
"tensorflow.keras.layers.Dense",
"tensorflow.reshape",
"tensorflow.keras.Sequential",
"tensorflow.squeeze",
"tensorflow.expand_dims",
"tensorflow.keras.Model",
"tensorflow.keras.layers.InputLayer",
"tensorflow.compat.v1.logging.info",
"tensorflow.keras.utils.get_file",
"tensorflow.io.read_file",
"tensorflow.distribute.get_strategy",
"tensorflow.random.normal",
"tensorflow.TensorSpec"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
Jmion/SwisscomMIP | [
"d29b0de222be44f85a84bc7dc3f4521741fdeda1"
] | [
"dataFetcher.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# # Loading data\n\n\n\nimport pandas as pd\nimport plotly.express as px\nfrom tqdm import tqdm\nimport functools\nimport numpy as np\nfrom difflib import SequenceMatcher\nfrom oauthlib.oauth2 import BackendApplicationClient\nfrom requests_oauthlib import OAuth2Session\nfrom datetime import datetime, timedelta\nimport pprint\nimport requests\nimport os\nimport getpass\nimport json\n\nfrom queue import Queue\nfrom threading import Thread\nfrom time import time\nimport logging\nimport os\n\n\n\n#cashing in case of multiple calls.\[email protected]_cache(maxsize=128)\ndef get_tiles(municipalityId: int) -> pd.DataFrame:\n \"\"\"Fetches tile information for a municipality id.\n \n Args:\n municipalityId: id of the municipality as defined in by the federal office of statistics,\n https://www.bfs.admin.ch/bfs/fr/home/bases-statistiques/repertoire-officiel-communes-suisse.assetdetail.11467406.html\n \n Return:\n A dataframe containing the following columns:\n [tileId, ll_lon, ll_lat, urL-lon, ur_lat]\n \n tileID: corresponds to a unique ID as defined in the Swisscom FAQ page.\n ll_lon: longitude coordinate of the lower left corner of the tile.\n ll_lat: latitude coordinate of the lower left corner of the tile.\n ur_lon: longitude coordinate of the upper right corner of the tile.\n ur_lat: latitude coordinate of the upper right corner of the tile.\n \n If municipalityId is invalid will print an error message and return an empty DataFrame\n \"\"\"\n api_request = (\n BASE_URL\n + f'/grids/municipalities/{municipalityId}'\n )\n\n data = oauth.get(api_request, headers=headers).json()\n if(data.get('status') == None):\n tileID = [t['tileId'] for t in data['tiles']]\n ll_lon = [t['ll']['x'] for t in data['tiles']]\n ll_lat= [t['ll']['y'] for t in data['tiles']]\n ur_lon = [t['ur']['x'] for t in data['tiles']]\n ur_lat = [t['ur']['y'] for t in data['tiles']]\n else:\n print(f'get_tiles: failed with status code {data.get(\"status\")}. {data.get(\"message\")}')\n return pd.DataFrame(data={'tileID': [], 'll_lat': [], 'll_lon': [], 'ur_lat': [], 'ur_lon': []})\n \n return pd.DataFrame(data={'tileID': tileID, 'll_lat': ll_lat, 'll_lon': ll_lon, 'ur_lat': ur_lat, 'ur_lon': ur_lon})\n\n\n\ndef get_municipalityID(name: str) -> np.array(int):\n \"\"\"Converts a municipality name to ID\n \n Args:\n name of municipality.\n \n Returns:\n An array containing all the municipality ID's corresponding to the name.\n \n If the name invalid will return an empty array.\n \"\"\"\n return commune.loc[commune.GDENAME == name].GDENR.to_numpy()\n\n\n\ndef visualize_coordinates(df: pd.DataFrame, latitude: str, longitude: str) -> None :\n \"\"\"Visualizes coordinates in dataframe on map\n \n Retrieves columns with name latitude and logitude and visualizes it on a map.\n \n Args:\n df: A dataframe containing the coordinates.\n latitude: String key of the column in the dataframe containing the latitude.\n longitude: String key of the column in the dataframe containing the longitude.\n \"\"\"\n fig = px.scatter_mapbox(df, lat=latitude, lon=longitude,\n color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10,\n mapbox_style=\"carto-positron\")\n fig.show()\n\n\n\n\ndef get_all_tiles_switzerland() -> pd.DataFrame:\n \"\"\"Fetches the tile information for all the tiles in Switzerland.\n \n Returns:\n A Dataframe containg the tile information for every tile in switzerland.\n \n The format of the DataFrame is the same as the return of get_tiles()\n \n \"\"\"\n tiles = get_tiles(commune.GDENR.unique()[0])\n for c in tqdm(commune.GDENR.unique().tolist()):\n tiles = tiles.append(get_tiles(c))\n return tiles\n\n\n\n\n\ndef get_daily_demographics(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0) ):\n \"\"\"Fetches daily demographics\n \n Fetches the daily demographics, age distribution, of the tiles.\n \n Args:\n tiles: Array of tile id's, what will be used to querry demographic data.\n day: date of the data to be fetched.\n \n Returns:\n A dataframe containing as a key the tileID and as columns ageDistribution and the maleProportion\n \n +----------+-----------------------+---------------------+\n | | ageDistribution | maleProportion |\n +----------+-----------------------+---------------------+\n | 44554639 | NaN | 0.49828359484672546 |\n +----------+-----------------------+---------------------+\n | 44271906 | [0.21413850784301758, | 0.493218 |\n | | 0.27691012620925903, | |\n | | 0.37422287464141846, | |\n | | 0.13472850620746613] | |\n +----------+-----------------------+---------------------+\n In the example above tile 44554639 does not have any age distribution data.\n \n The data is k-anonymized. Therefor is some tiles are missing data it\n means that the data is not available. To find out more about demographics visit the Heatmap FAQ.\n \"\"\"\n dates = [(day + timedelta(hours=delta)) for delta in range(24)]\n date2score = dict()\n for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:\n api_request = (\n BASE_URL\n + f'/heatmaps/dwell-demographics/daily/{day.isoformat().split(\"T\")[0]}'\n + \"?tiles=\"\n + \"&tiles=\".join(map(str, tiles_subset))\n )\n data = oauth.get(api_request, headers=headers).json()\n for t in data.get(\"tiles\", []):\n if date2score.get(t['tileId']) == None:\n date2score[t['tileId']] = dict()\n date2score[t['tileId']] = {\"ageDistribution\": t.get(\"ageDistribution\"),\"maleProportion\": t.get(\"maleProportion\")}\n \n \n return pd.DataFrame.from_dict(date2score).transpose()\n\n\n\n\n\ndef get_hourly_demographics_dataframe(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):\n \"\"\"Fetches hourly demographics of age categories for 24 hours\n \n Fetches the hourly demographics, age distribution, of the tiles.\n \n Age categories are the following 0 - 19, 20 - 39, 40 - 64, >64\n \n Args:\n tiles: Array of tile id's, what will be used to querry demographic data.\n day: date of the data to be fetched.\n \n Returns:\n DataFrame containing the demographics. The name\n of the collumns are:\n [age_cat, age_distribution, male_proportion]\n \n +----------+---------------------+---------+------------------+-----------------+\n | | | age_cat | age_distribution | male_proportion |\n +----------+---------------------+---------+------------------+-----------------+\n | tileID | time | | | |\n +----------+---------------------+---------+------------------+-----------------+\n | 44394309 | 2020-01-27T00:00:00 | NaN | NaN | 0.474876 |\n +----------+---------------------+---------+------------------+-----------------+\n | | 2020-01-27T01:00:00 | NaN | NaN | 0.483166 |\n +----------+---------------------+---------+------------------+-----------------+\n | | ... | | | |\n +----------+---------------------+---------+------------------+-----------------+\n | 44290729 | 2020-01-27T06:00:00 | 0.0 | 0.192352 | 0.497038 |\n +----------+---------------------+---------+------------------+-----------------+\n | | 2020-01-27T06:00:00 | 1.0 | 0.269984 | 0.497038 |\n +----------+---------------------+---------+------------------+-----------------+\n | | 2020-01-27T06:00:00 | 2.0 | 0.363481 | 0.497038 |\n +----------+---------------------+---------+------------------+-----------------+\n | | 2020-01-27T06:00:00 | 3.0 | 0.174183 | 0.497038 |\n +----------+---------------------+---------+------------------+-----------------+\n \n The data is k-anonymized. Therefor is some tiles are not present in the output dataframe it \n means that the data is not available. To find out more about demographics visit the Heatmap FAQ.\n \"\"\"\n def get_hourly_demographics(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0) ):\n \"\"\"Fetches hourly male proportion and age categories for 24 hours\n\n Args:\n tiles: Array of tile id's, what will be used to querry demographic data.\n day: date of the data to be fetched.\n\n Returns:\n Returns a dictionary with as a key the tileID, and as a value an object that is as follows:\n\n {tileID: {dateTime:{ \"ageDistribution\": [0-19, 20-39, 40-64, 64+], \"maleProportion\": value},\n {dateTime2: ...}}}\n\n\n\n 26994514: {'2020-01-27T00:00:00': {'ageDistribution': [0.1925136297941208,\n 0.2758632302284241,\n 0.362215131521225,\n 0.16940800845623016],\n 'maleProportion': 0.4727686941623688},\n '2020-01-27T01:00:00': {'ageDistribution': None,\n 'maleProportion': 0.4896690547466278},\n '2020-01-27T02:00:00': {'ageDistribution': None,\n 'maleProportion': 0.48882684111595154},\n\n The data is k-anonymized. Therefor is some values are None it means that no data was available \n To find out more about demographics visit the Heatmap FAQ.\n \"\"\"\n dates = [(day + timedelta(hours=delta)) for delta in range(24)]\n date2score = dict()\n for dt in tqdm(dates, desc=\"get_hourly_demographics: hours\", leave=True):\n for tiles_subset in [tiles[i:i + 100] for i in range(0, len(tiles), 100)]:\n api_request = (\n BASE_URL\n + f'/heatmaps/dwell-demographics/hourly/{dt.isoformat()}'\n + \"?tiles=\"\n + \"&tiles=\".join(map(str, tiles_subset))\n )\n data = oauth.get(api_request, headers=headers).json()\n for t in data.get(\"tiles\", []):\n if date2score.get(t['tileId']) == None:\n date2score[t['tileId']] = dict()\n date2score.get(t['tileId'])[dt.isoformat()] = {\"ageDistribution\": t.get(\"ageDistribution\"),\"maleProportion\": t.get(\"maleProportion\")}\n return date2score\n \n \n \n data = get_hourly_demographics(tiles, day)\n tile_id = []\n time_data = []\n age_distribution = []\n age_cat = []\n male_proportion = []\n for i in data:\n for time in data[i]:\n if data[i][time].get(\"ageDistribution\") != None:\n for (idx,a) in enumerate(data[i][time].get(\"ageDistribution\", [])):\n age_cat.append(idx)\n age_distribution.append(a)\n tile_id.append(i)\n time_data.append(time)\n male_proportion.append(data[i][time].get(\"maleProportion\"))\n else:\n tile_id.append(i)\n time_data.append(time)\n age_distribution.append(None)\n male_proportion.append(data[i][time].get(\"maleProportion\"))\n age_cat.append(None)\n return pd.DataFrame(data={'tileID': tile_id, \"age_cat\": age_cat, 'age_distribution':age_distribution, \"male_proportion\": male_proportion, 'time': time_data}).set_index(['tileID', 'time'])\n\n\n\n\ndef get_daily_density(tiles: np.array(int), day=datetime(year=2020, month=1, day=27)) -> pd.DataFrame:\n \"\"\"Fetches the daily density of tiles.\n \n Fetches the daily density of the tiles and creates a dataframe of the fetched data.\n \n Args:\n tiles: Array of tile id's that daily density data needs to be fetched.\n day: Day to fetch the density data for.\n \n Returns:\n DataFrame containg the tileId and the score. The name of the collumns are:\n [score]\n \n The identifier of the row is bassed on the tileID\n \n +----------+-------+\n | | score |\n +----------+-------+\n | tileID | |\n +----------+-------+\n | 44394309 | 1351 |\n +----------+-------+\n | 44394315 | 1103 |\n +----------+-------+\n | 44460297 | 875 |\n +----------+-------+\n | 44488589 | 1387 |\n +----------+-------+\n | 44498028 | 678 |\n +----------+-------+\n \n Tile with k-anonymized dwell density score. If tile not present Swisscom is\n unable to provide a value due to k-anonymization. To find out more on density\n scores read the Heatmap FAQ. \n \"\"\"\n tileID = []\n score = []\n for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:\n api_request = (\n BASE_URL\n + f'/heatmaps/dwell-density/daily/{day.isoformat().split(\"T\")[0]}'\n + \"?tiles=\"\n + \"&tiles=\".join(map(str, tiles_subset))\n )\n data = oauth.get(api_request, headers=headers).json()\n if data.get(\"tiles\") != None:\n for t in data[\"tiles\"]:\n tileID.append(t['tileId'])\n score.append(t[\"score\"])\n return pd.DataFrame(data={'tileID': tileID, 'score':score}).set_index(\"tileID\")\n\n\n\n\ndef get_hourly_density_dataframe(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):\n \"\"\"Fetches the hourly density of tiles for 24 hours.\n\n Fetches the hourly density of the tiles and creates a dataframe of the fetched data.\n\n Args:\n tiles: Array of tile id's that daily density data needs to be fetched.\n day: Day to fetch the density data for.\n\n Returns:\n DataFrame containg the tileId and the score. The name of the collumns are:\n [score]\n The identifier of the row is bassed on the [tileID, time]\n \n +----------+---------------------+-------+\n | | | score |\n +----------+---------------------+-------+\n | tileID | time | |\n +----------+---------------------+-------+\n | 44394309 | 2020-01-27T00:00:00 | 52 |\n | +---------------------+-------+\n | | 2020-01-27T01:00:00 | 68 |\n | +---------------------+-------+\n | | 2020-01-27T02:00:00 | 69 |\n | +---------------------+-------+\n | | 2020-01-27T03:00:00 | 69 |\n | +---------------------+-------+\n | | 2020-01-27T04:00:00 | 69 |\n +----------+---------------------+-------+\n\n Tile with k-anonymized dwell density score. If tile not present Swisscom is\n unable to provide a value due to k-anonymization. To find out more on density\n scores read the Heatmap FAQ. \n \"\"\"\n \n def get_hourly_density(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):\n dates = [(day + timedelta(hours=delta)) for delta in range(24)]\n date2score = dict()\n print(\"getHourlyDensity\")\n for dt in tqdm(dates, desc=\"get_hourly_density: hours\", leave=True):\n for tiles_subset in [tiles[i:i + 100] for i in range(0, len(tiles), 100)]:\n api_request = (\n BASE_URL\n + f'/heatmaps/dwell-density/hourly/{dt.isoformat()}'\n + \"?tiles=\"\n + \"&tiles=\".join(map(str, tiles_subset))\n )\n for t in oauth.get(api_request, headers=headers).json().get(\"tiles\",[]):\n if date2score.get(t['tileId']) == None:\n date2score[t['tileId']] = dict()\n date2score.get(t['tileId'])[dt.isoformat()] = t['score']\n\n return date2score\n \n \n tiles_data = []\n time_data = []\n score = []\n data = get_hourly_density(tiles, day)\n for t in data:\n for time in data[t]:\n time_data.append(time)\n tiles_data.append(t)\n score.append(data[t][time])\n return pd.DataFrame(data={'tileID': tiles_data, 'score':score, 'time': time_data}).set_index(['tileID', 'time'])\n\n\n\n\ndef fetch_data_city(city: str) -> None:\n \"\"\"Fetches the data for a city if the data is not yet cashed on the computer.\n \"\"\"\n compression = \".xz\"\n folder = os.path.join(\".\",\"data\")\n def file_path(file_name: str) -> str:\n return os.path.join(folder, file_name)\n\n if not(os.path.exists(folder)):\n os.mkdir(folder)\n \n \n tiles_path = file_path(f'{city}Tiles.pkl{compression}')\n hourly_dem_path = file_path(f'{city}HourlyDemographics.pkl{compression}')\n hourly_density_path = file_path(f'{city}HourlyDensity.pkl{compression}')\n daily_density_path = file_path(f'{city}DensityDaily.pkl{compression}')\n daily_demographics_path = file_path(f'{city}DemographicsDaily.pkl{compression}')\n\n\n if not(os.path.isfile(tiles_path)):\n tiles = get_tiles(get_municipalityID(city)[0])\n tiles.to_pickle(tiles_path)\n else:\n tiles = pd.read_pickle(tiles_path)\n if not(os.path.isfile(hourly_dem_path)):\n hourly_dem = get_hourly_demographics_dataframe(tiles['tileID'].to_numpy())\n hourly_dem.to_pickle(hourly_dem_path)\n if not(os.path.isfile(hourly_density_path)):\n hourly_dens = get_hourly_density_dataframe(tiles['tileID'].to_numpy())\n hourly_dens.to_pickle(hourly_density_path)\n if not(os.path.isfile(daily_density_path)):\n get_daily_density(tiles['tileID'].to_numpy()).to_pickle(daily_density_path)\n if not(os.path.isfile(daily_demographics_path)):\n get_daily_demographics(tiles['tileID'].to_numpy()).to_pickle(daily_demographics_path)\n\n\ndef clean_cities_list(cities: [str]) -> [str]:\n \"\"\"Cleans the list of cities by removing all the cities that are not found in the \n official list of cities provided by the Federal Statisitics Office.\n \n Args:\n List of cities to check and clean.\n \n Return:\n List containing a subset of the input list such that all elements are valid.\n \"\"\"\n invalid_cities = []\n #validation that the cities names are valid\n for c in cities:\n if len(commune.loc[commune.GDENAME == c].GDENR.to_numpy()) == 0:\n city = []\n sim_value = []\n for f in commune.GDENAME:\n r = SequenceMatcher(None, c, f).ratio()\n if r > 0.5:\n city.append(f)\n sim_value.append(r)\n\n d = pd.DataFrame(data={\"city\": city, \"value\": sim_value})\n \n potential_cities = d.sort_values(\"value\", ascending=False).head(5).city.to_numpy()\n print(f\"City nammed: {c} cannot be found in official records. Did you mean: {potential_cities} ? {c} will be ignored.\")\n invalid_cities.append(c)\n return [c for c in cities if not(c in invalid_cities)]\n\n\n# Multithread fetch implementation\n\nclass DownloadWorker(Thread):\n\n def __init__(self, queue):\n Thread.__init__(self)\n self.queue = queue\n\n def run(self):\n while True:\n # Get the work from the queue and expand the tuple\n city = self.queue.get()\n if city == -1:\n self.queue.put(-1)\n break\n try:\n fetch_data_city(city)\n finally:\n self.queue.task_done()\n\n\ndef download_commune_excel() -> None:\n '''\n Downloads the excel spreadsheet from the Swiss Federal Statistical Office that maps the town name to unique ID\n '''\n \n print('Beginning commune file download with requests')\n\n folder = os.path.join(\".\",\"data\")\n if not(os.path.exists(folder)):\n os.mkdir(folder)\n \n url = 'https://www.bfs.admin.ch/bfsstatic/dam/assets/11467406/master'\n r = requests.get(url)\n\n with open(os.path.join(\".\", \"data\", 'commune.xlsx'), 'wb') as f:\n f.write(r.content)\n print(\"End of commune file download\")\n \n\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nBASE_URL = \"https://api.swisscom.com/layer/heatmaps/demo\"\nTOKEN_URL = \"https://consent.swisscom.com/o/oauth2/token\"\nMAX_NB_TILES_REQUEST = 100\nheaders = {\"scs-version\": \"2\"}\nclient_id = \"\" # customer key in the Swisscom digital market place\nclient_secret = \"\" # customer secret in the Swisscom digital market place\n\nif client_id == \"\":\n client_id = os.environ.get(\"CLIENT_ID\", \"\")\n if client_id == \"\":\n client_id = input(\"Enter MIP Client ID: \")\n os.environ[\"CLIENT_ID\"] = client_id\nif client_secret == \"\":\n client_secret = os.environ.get(\"CLIENT_SECRET\", \"\")\n if client_secret == \"\":\n client_secret = getpass.getpass('Enter MIP client secret:')\n os.environ[\"CLIENT_SECRET\"] = client_secret\n\n# Fetch an access token\nclient = BackendApplicationClient(client_id=client_id)\noauth = OAuth2Session(client=client)\noauth.fetch_token(token_url=TOKEN_URL, client_id=client_id,\n client_secret=client_secret)\n\n\ndef main():\n ts = time()\n\n if not(os.path.exists(os.path.join(\".\", \"data\", 'commune.xlsx'))):\n download_commune_excel()\n global commune\n commune = pd.read_excel(os.path.join(\".\", \"data\", 'commune.xlsx'), sheet_name='GDE')\n \n cities = [\"Saas-Fee\", \"Arosa\", \"Bulle\", \"Laax\",\"Belp\" ,\"Saanen\",\"Adelboden\", \"Andermatt\", \"Davos\", \"Bulle\", \"Bern\", \"Genève\", \"Lausanne\", \"Zürich\", \"Neuchâtel\", \"Sion\", \"St. Gallen\", \"Appenzell\", \"Solothurn\", \"Zug\", \"Fribourg\", \"Luzern\", \"Ecublens (VD)\", \"Kloten\", \"Le Grand-Saconnex\", \"Nyon\", \"Zermatt\", \"Lugano\"]\n cities = clean_cities_list(cities)\n queue = Queue()\n for x in range(2):\n worker = DownloadWorker(queue)\n worker.deamen = True\n worker.start()\n for c in cities:\n logger.info('Queueing {}'.format(c))\n queue.put(c)\n queue.join()\n\n queue.put(-1)\n logger.info('Took %s', time() - ts)\n\n\n list_of_cities_path = os.path.join(\".\", \"data\",\"CityList.json\")\n cityList=[]\n if os.path.isfile(list_of_cities_path):\n with open(list_of_cities_path, \"r\") as filehandle:\n cityList = json.load(filehandle)\n with open(list_of_cities_path, \"w\") as filehandle:\n for city in cities:\n if not(city in cityList):\n cityList.append(city)\n json.dump(cityList, filehandle)\n \n \nif __name__ == \"__main__\":\n main()\n\n\n \n# Other functions not currently used\n\ndef get_daily_demographics_male(tiles: np.array(int), day=datetime(year=2020, month=1, day=27)) -> pd.DataFrame:\n \"\"\"Fetches Daily demographics.\n \n Fetches the daily male proportion of the tiles and creates a dataframe of the fetched data.\n \n Args:\n tiles: Array of tile id's, what will be used to querry demographic data.\n day: date of the data to be fetched.\n \n Returns:\n DataFrame containing the tileId and the proportion of male. The name of the collumns are:\n [tileID, maleProportion]\n The data is k-anonymized. Therefor is some tiles are not present in the output dataframe it \n means that the data is not available. To find out more about demographics visit the Heatmap FAQ.\n \"\"\"\n \n tileID = []\n maleProportion = []\n\n for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:\n api_request = (\n BASE_URL\n + f'/heatmaps/dwell-demographics/daily/{day.isoformat().split(\"T\")[0]}'\n + \"?tiles=\"\n + \"&tiles=\".join(map(str, tiles_subset))\n )\n data = oauth.get(api_request, headers=headers).json()\n if data.get(\"tiles\") != None:\n for t in data[\"tiles\"]:\n if t.get(\"maleProportion\") != None:\n tileID.append(t['tileId'])\n maleProportion.append(t[\"maleProportion\"])\n return pd.DataFrame(data={'tileID': tileID, 'maleProportion':maleProportion})\n\n\n\ndef get_daily_demographics_age(tiles: np.array(int), day=datetime(year=2020, month=1, day=27)) -> pd.DataFrame:\n \"\"\"Fetches daily demographics of age categories\n \n Fetches the daily demographics, age distribution, of the tiles and creates a dataframe of the fetched data.\n \n Args:\n tiles: Array of tile id's, what will be used to querry demographic data.\n day: date of the data to be fetched.\n \n Returns:\n DataFrame containing the tileId and a array of values corresponding to the age distribution. The name\n of the collumns are:\n [tileID, ageDistribution]\n The data is k-anonymized. Therefor is some tiles are not present in the output dataframe it \n means that the data is not available. To find out more about demographics visit the Heatmap FAQ.\n \"\"\"\n tileID = []\n ageDistribution = []\n \n for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:\n api_request = (\n BASE_URL\n + f'/heatmaps/dwell-demographics/daily/{day.isoformat().split(\"T\")[0]}'\n + \"?tiles=\"\n + \"&tiles=\".join(map(str, tiles_subset))\n )\n data = oauth.get(api_request, headers=headers).json()\n for t in data.get(\"tiles\", []):\n if t.get(\"ageDistribution\") != None:\n tileID.append(t['tileId'])\n ageDistribution.append(t[\"ageDistribution\"])\n return pd.DataFrame(data={'tileID': tileID, 'ageDistribution':ageDistribution})\n\n"
] | [
[
"numpy.array",
"pandas.read_pickle",
"pandas.DataFrame",
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
acmlia/ann_training | [
"8cb39123203445cf79c4bd65350fa4063705a518",
"8cb39123203445cf79c4bd65350fa4063705a518"
] | [
"security/training_ann_3.py",
"src/training_ann_6.py"
] | [
"from __future__ import absolute_import, division, print_function\n\nimport os\nimport time\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nfrom sklearn.externals import joblib\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import QuantileTransformer\nfrom sklearn.decomposition import PCA\nfrom src.meteoro_skills import CategoricalScores\nfrom src.meteoro_skills import ContinuousScores\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras import backend\nfrom tensorflow.keras import layers\nfrom keras.layers import GaussianNoise\nfrom keras.layers import GaussianDropout\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.wrappers.scikit_learn import KerasClassifier\n#from keras.models import model_from_yaml\nfrom keras.models import load_model\n\nprint('TF version '+tf.__version__)\n\n# ------------------------------------------------------------------------------\n\ndef tic():\n global _start_time\n _start_time = time.time()\n\n\ndef tac():\n t_sec = round(time.time() - _start_time)\n (t_min, t_sec) = divmod(t_sec, 60)\n (t_hour, t_min) = divmod(t_min, 60)\n print('Time passed: {}hour:{}min:{}sec'.format(t_hour, t_min, t_sec))\n\ndef mean_squared_error(y_test, y_pred):\n return K.mean(K.square(y_pred - y_test), axis=-1)\n# ------------------------------------------------------------------------------\n\n\nclass Training:\n \"\"\"\n This module is intended to automate the TensorFlow Neural Network training.\n \"\"\"\n PCA = PCA()\n seed = 0\n run_prefix = ''\n version = ''\n vernick = ''\n file = ''\n path = ''\n fig_title = ''\n path_fig = ''\n mod_out_pth = ''\n mod_out_name = ''\n\n def __init__(self, random_seed=0,\n run_prefix='',\n version='',\n version_nickname='',\n csv_entry='',\n csv_path='',\n figure_path='',\n model_out_path='',\n model_out_name=''):\n\n self.run_prefix = run_prefix\n self.seed = random_seed\n self.ver = version\n self.vernick = version_nickname\n self.file = csv_entry\n self.path = csv_path\n self.path_fig = figure_path\n self.fig_title = run_prefix + version + version_nickname\n self.mod_out_pth = model_out_path\n self.mod_out_name = model_out_name\n # -------------------------------------------------------------------------\n # DROP DATA OUTSIDE INTERVAL\n # -------------------------------------------------------------------------\n \n @staticmethod\n def keep_interval(keepfrom: 0.0, keepto: 1.0, dataframe, target_col: str):\n keepinterval = np.where((dataframe[target_col] >= keepfrom) &\n (dataframe[target_col] <= keepto))\n result = dataframe.iloc[keepinterval]\n return result\n\n # -------------------------------------------------------------------------\n # BUILD MODELS DEFINITIONS : CLAS = CLASSIFICATION and REG = REGRESSION\n # -------------------------------------------------------------------------\n\n @staticmethod\n def build_class_model():\n '''\n Fucntion to create the instance and configuration of the keras\n model(Sequential and Dense).\n '''\n # Create the Keras model:\n model = Sequential()\n model.add(Dense(8, input_dim=4, kernel_initializer='uniform', activation='relu'))\n model.add(Dense(2, kernel_initializer='uniform', activation='relu'))\n model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))\n # Compile model\n model.compile(loss='binary_crossentropy', optimizer='SGD', metrics=['accuracy'],)\n return model\n\n @staticmethod\n def build_reg_model(input_size):\n '''\n Fucntion to create the instance and configuration of the keras\n model(Sequential and Dense).\n '''\n model = Sequential()\n model.add(GaussianNoise(0.01, input_shape=(input_size,)))\n model.add(Dense(33, activation='relu'))\n model.add(Dense(12, activation='relu'))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error',\n optimizer='adam',\n metrics=['mean_absolute_error', 'mean_squared_error'])\n return model\n\n # -------------------------------------------------------------------------\n # EXECUTION OF READING INPUT ATTRIBUTES, SCALING, PCA, SPLIT AND RUN MODEL!\n # -------------------------------------------------------------------------\n\n def autoExecClass(self):\n\n # Fix random seed for reproducibility:\n np.random.seed(self.seed)\n\n # Load dataset:\n df = pd.read_csv(os.path.join(self.path, self.file), sep=',', decimal='.')\n x, y= df.loc[:,['36V', '89V', '166V', '190V']], df.loc[:,['TagRain']]\n \n x_arr = np.asanyarray(x)\n y_arr = np.asanyarray(y)\n y_arr = np.ravel(y_arr)\n\n # Scaling the input paramaters:\n# scaler_min_max = MinMaxScaler()\n norm_sc = Normalizer()\n x_normalized= norm_sc.fit_transform(x_arr)\n\n # Split the dataset in test and train samples:\n x_train, x_test, y_train, y_test = train_test_split(x_normalized,\n y_arr, test_size=0.10,\n random_state=101)\n\n # Create the instance for KerasRegressor:\n model=self.build_class_model()\n tic()\n#------------------------------------------------------------------------------\n # Display training progress by printing a single dot for each completed epoch\n\n class PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n if epoch % 100 == 0: print('')\n print('.', end='')\n\n EPOCHS = 1000\n\n history = model.fit(x_train, y_train,\n epochs=EPOCHS, validation_split=0.2, batch_size=10,\n verbose=0, callbacks=[PrintDot()])\n print(history.history.keys())\n\n# ------------------------------------------------------------------------------\n # Visualize the model's training progress using the stats\n # stored in the history object.\n\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n hist.tail()\n# ------------------------------------------------------------------------------\n # Saving model to YAML:\n\n# model_yaml = model.to_yaml()\n# with open(self.mod_out_pth + self.mod_out_name + '.yaml', 'w') as yaml_file:\n# yaml_file.write(model_yaml)\n#\n# # serialize weights to HDF5\n# model.save_weights(self.mod_out_pth + self.mod_out_name + '.h5')\n# print(\"Saved model to disk\")\n# tac()\n\n # Saving the complete model in HDF5:\n model.save(self.mod_out_pth + self.mod_out_name + '.h5')\n\n # ------------------------------------------------------------------------------\n #\n # ------------------------------------------------------------------------------\n\n def autoExecReg(self):\n\n # Fix random seed for reproducibility:\n np.random.seed(self.seed)\n# ------------------------------------------------------------------------------\n\n df_orig = pd.read_csv(os.path.join(self.path, self.file), sep=',', decimal='.')\n\n df_input = df_orig.loc[:, ['10V', '10H', '18V', '18H', '36V', '36H', '89V', '89H',\n '166V', '166H', '183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',\n 'lat']]\n\n colunas = ['10V', '10H', '18V', '18H', '36V', '36H', '89V', '89H',\n '166V', '166H', '183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',\n 'lat']\n\n scaler = StandardScaler()\n\n normed_input = scaler.fit_transform(df_input)\n df_normed_input = pd.DataFrame(normed_input[:],\n columns=colunas)\n ancillary = df_normed_input.loc[:, ['183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',\n 'lat']]\n # regions=df_orig.loc[:,['R1','R2','R3','R4','R5']]\n # ------------------------------------------------------------------------------\n # Choosing the number of components:\n\n TB1 = df_normed_input.loc[:, ['10V', '10H', '18V', '18H']]\n TB2 = df_normed_input.loc[:, ['36V', '36H', '89V', '89H', '166V', '166H']]\n\n # ------------------------------------------------------------------------------\n # Verifying the number of components that most contribute:\n pca = self.PCA\n pca1 = pca.fit(TB1)\n plt.plot(np.cumsum(pca1.explained_variance_ratio_))\n plt.xlabel('Number of components for TB1')\n plt.ylabel('Cumulative explained variance');\n plt.savefig(self.path_fig + self.version + 'PCA_TB1.png')\n # ---\n pca_trans1 = PCA(n_components=2)\n pca1 = pca_trans1.fit(TB1)\n TB1_transformed = pca_trans1.transform(TB1)\n print(\"original shape: \", TB1.shape)\n print(\"transformed shape:\", TB1_transformed.shape)\n # ------------------------------------------------------------------------------\n pca = PCA()\n pca2 = pca.fit(TB2)\n plt.plot(np.cumsum(pca2.explained_variance_ratio_))\n plt.xlabel('Number of components for TB2')\n plt.ylabel('Cumulative explained variance');\n plt.savefig(self.path_fig + self.version + 'PCA_TB2.png')\n # ---\n pca_trans2 = PCA(n_components=2)\n pca2 = pca_trans2.fit(TB2)\n TB2_transformed = pca_trans2.transform(TB2)\n print(\"original shape: \", TB2.shape)\n print(\"transformed shape:\", TB2_transformed.shape)\n # ------------------------------------------------------------------------------\n # JOIN THE TREATED VARIABLES IN ONE SINGLE DATASET AGAIN:\n\n PCA1 = pd.DataFrame(TB1_transformed[:],\n columns=['pca1_1', 'pca_2'])\n PCA2 = pd.DataFrame(TB2_transformed[:],\n columns=['pca2_1', 'pca2_2'])\n\n dataset = PCA1.join(PCA2, how='right')\n dataset = dataset.join(ancillary, how='right')\n dataset = dataset.join(df_orig.loc[:, ['sfcprcp']], how='right')\n # ------------------------------------------------------------------------------\n\n dataset = self.keep_interval(0.2, 110.0, dataset, 'sfcprcp')\n\n # ----------------------------------------\n # SUBSET BY SPECIFIC CLASS (UNDERSAMPLING)\n# n = 0.98\n# to_remove = np.random.choice(\n# dataset.index,\n# size=int(dataset.shape[0] * n),\n# replace=False)\n# dataset = dataset.drop(to_remove)\n\n # ------------------------------------------------------------------------------\n # Split the data into train and test\n # Now split the dataset into a training set and a test set.\n # We will use the test set in the final evaluation of our model.\n\n train_dataset = dataset.sample(frac=0.8, random_state=0)\n test_dataset = dataset.drop(train_dataset.index)\n\n # ------------------------------------------------------------------------------\n # Inspect the data:\n # Have a quick look at the joint distribution of a few pairs of columns from the training set.\n\n colunas = list(dataset.columns.values)\n\n # ------------------------------------------------------------------------------\n # Also look at the overall statistics:\n train_stats = train_dataset.describe()\n train_stats.pop(\"sfcprcp\")\n train_stats = train_stats.transpose()\n\n # ------------------------------------------------------------------------------\n # Split features from labels:\n # Separate the target value, or \"label\", from the features.\n # This label is the value that you will train the model to predict.\n\n y_train = train_dataset.pop('sfcprcp')\n y_test = test_dataset.pop('sfcprcp')\n\n # ------------------------------------------------------------------------------\n # Normalize the data:\n\n scaler = StandardScaler()\n normed_train_data = scaler.fit_transform(train_dataset)\n normed_test_data = scaler.fit_transform(test_dataset)\n\n # ------------------------------------------------------------------------------\n # Build the model:\n\n model = self.build_reg_model(len(train_dataset.keys()))\n # ------------------------------------------------------------------------------\n # Inspect the model:\n # Use the .summary method to print a simple description of the model\n\n model.summary()\n\n # ------------------------------------------------------------------------------\n # It seems to be working, and it produces a result\n # of the expected shape and type.\n\n # Train the model:\n # Train the model for 1000 epochs, and record the training\n # and validation accuracy in the history object.\n\n # ------------------------------------------------------------------------------\n # Display training progress by printing a single dot for each completed epoch\n\n class PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n if epoch % 100 == 0: print('')\n print('.', end='')\n\n EPOCHS = 1000\n\n history = model.fit(\n normed_train_data, y_train,\n epochs=EPOCHS, validation_split=0.2, verbose=0,\n callbacks=[PrintDot()])\n print(history.history.keys())\n\n # ------------------------------------------------------------------------------\n # Visualize the model's training progress using the stats\n # stored in the history object.\n\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n hist.tail()\n\n self.plot_history(history)\n # ------------------------------------------------------------------------------\n\n model = self.build_reg_model(len(train_dataset.keys()))\n\n # The patience parameter is the amount of epochs to check for improvement\n early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)\n\n history = model.fit(normed_train_data, y_train, epochs=EPOCHS,\n validation_split=0.2, verbose=0, callbacks=[early_stop, PrintDot()])\n\n # ------------------------------------------------------------------------------\n # Ploting again, but with the EarlyStopping apllied:\n\n self.plot_history_EarlyStopping(history)\n\n # The graph shows that on the validation set, the average error\n # is usually around +/- 2 MPG. Is this good?\n # We'll leave that decision up to you.\n # ------------------------------------------------------------------------------\n # Let's see how well the model generalizes by using\n # the test set, which we did not use when training the model.\n # This tells us how well we can expect the model to predict\n # when we use it in the real world.\n\n loss, mae, mse = model.evaluate(normed_test_data, y_test, verbose=0)\n\n print(\"Testing set Mean Abs Error: {:5.2f} sfcprcp\".format(mae))\n #------------------------------------------------------------------------------\n # -----------------------------------------------------------------------------\n # Make predictions\n # Finally, predict SFCPRCP values using data in the testing set:\n\n test_predictions = model.predict(normed_test_data).flatten()\n\n # Appplying meteorological skills to verify the performance of the TRAIN/TESTE model, in this case, continous scores:\n\n skills = ContinuousScores()\n val_y_pred_mean, val_y_test_mean, val_mae, val_rmse, val_std, val_fseperc, val_fse, val_corr, val_num_pixels = skills.metrics(y_test, test_predictions)\n \n #converting to text file\n print(\"converting arrays to text files\")\n my_scores = {'val_y_pred_mean': val_y_pred_mean,\n 'val_y_test_mean': val_y_test_mean,\n 'val_mae': val_mae,\n 'val_rmse': val_rmse,\n 'val_std': val_std,\n 'val_fseperc': val_fseperc,\n 'val_fse': val_fse,\n 'val_corr': val_corr,\n 'val_num_pixels': val_num_pixels}\n\n with open(self.path_fig+'continuous_scores_TEST_TRAIN_'+self.version+'.txt', 'w') as myfile:\n myfile.write(str(my_scores))\n print(\"Text file saved!\")\n\n plt.figure()\n plt.scatter(y_test, test_predictions)\n plt.xlabel('True Values [sfcprcp]')\n plt.ylabel('Predictions [sfcprcp]')\n plt.axis('equal')\n plt.axis('square')\n plt.xlim([0, plt.xlim()[1]])\n plt.ylim([0, plt.ylim()[1]])\n plt.plot([-100, 100], [-100, 100])\n fig_name = self.fig_title + \"_plot_scatter_y_test_vs_y_pred.png\"\n plt.savefig(self.path_fig + fig_name)\n plt.clf()\n\n #------------------------------------------------------------------------------\n ax = plt.gca()\n ax.plot(y_test,test_predictions, 'o', c='blue', alpha=0.07, markeredgecolor='none')\n ax.set_yscale('log')\n ax.set_xscale('log')\n ax.set_xlabel('True Values [sfcprcp]')\n ax.set_ylabel('Predictions [sfcprcp]')\n plt.plot([-100, 100], [-100, 100])\n fig_name = self.fig_title + \"_plot_scatter_LOG_y_test_vs_y_pred.png\"\n plt.savefig(self.path_fig+fig_name)\n plt.clf()\n #------------------------------------------------------------------------------\n # ------------------------------------------------------------------------------\n # It looks like our model predicts reasonably well.\n # Let's take a look at the error distribution.\n\n error = test_predictions - y_test\n plt.hist(error, bins=25)\n plt.xlabel(\"Prediction Error [sfcprcp]\")\n plt.ylabel(\"Count\")\n fig_name = self.fig_title + \"_prediction_error.png\"\n plt.savefig(self.path_fig + fig_name)\n plt.clf()\n \n # ------------------------------------------------------------------------------\n # HISTROGRAM 2D\n\n plt.hist2d(y_test, test_predictions, cmin=1, bins=(50, 50), cmap=plt.cm.jet, range=np.array([(0.2, 110), (0.2, 110)]))\n plt.axis('equal')\n plt.axis('square')\n plt.plot([0, 100], [0, 100], ls=\"--\", c=\".3\")\n plt.xlim([0, max(y_test)])\n plt.ylim([0, max(y_test)])\n plt.colorbar()\n plt.xlabel(\"Observed rain rate (mm/h) - Training\")\n plt.ylabel(\"Predicted rain rate (mm/h) - Training\")\n fig_name = self.fig_title + \"_hist2D.png\"\n plt.savefig(self.path_fig + fig_name)\n plt.clf()\n\n # ------------------------------------------------------------------------------\n # Saving model to YAML:\n\n model_yaml = model.to_yaml()\n with open(self.mod_out_pth + self.mod_out_name + '.yaml', 'w') as yaml_file:\n yaml_file.write(model_yaml)\n\n # serialize weights to HDF5\n model.save_weights(self.mod_out_pth + self.mod_out_name + '.h5')\n print(\"Saved model to disk\")\n\n # Saving the complete model in HDF5:\n model.save(self.mod_out_pth + self.mod_out_name + '_tf.h5')\n\n # -------------------------------------------------------------------------\n # FUNCTIONS TO MAKE PLOTS ABOUT TRAINING:\n # -------------------------------------------------------------------------\n def plot_history(self, history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n\n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [sfcprcp]')\n plt.plot(hist['epoch'], hist['mean_absolute_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'],\n label='Val Error')\n ylim_max = hist.val_mean_absolute_error.max() + 10\n plt.ylim([0, ylim_max])\n plt.legend()\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$scfprcp^2$]')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label='Val Error')\n ylim_max = hist.val_mean_squared_error.max() + 10\n plt.ylim([0, ylim_max])\n plt.legend()\n # plt.show()\n fig_name = self.fig_title + \"_error_per_epochs_history.png\"\n plt.savefig(self.path_fig + fig_name)\n\n def plot_history_EarlyStopping(self, history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [sfcprcp]')\n plt.plot(hist['epoch'], hist['mean_absolute_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'],\n label='Val Error')\n ylim_max = hist.val_mean_absolute_error.max() + 10\n plt.ylim([0, ylim_max])\n\n plt.legend()\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$sfcprcp^2$]')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label='Val Error')\n ylim_max = hist.val_mean_squared_error.max() + 10\n plt.ylim([0, ylim_max])\n\n plt.legend()\n\n fig_name = self.fig_title + \"_error_per_epochs_EarlyStopping.png\"\n plt.savefig(self.path_fig + fig_name)\n",
"from __future__ import absolute_import, division, print_function\n\nimport os\nimport time\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nfrom sklearn.externals import joblib\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import QuantileTransformer\nfrom sklearn.decomposition import PCA\nfrom src.meteoro_skills import CategoricalScores\nfrom src.meteoro_skills import ContinuousScores\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras import backend\nfrom tensorflow.keras import layers\nfrom keras.layers import GaussianNoise\nfrom keras.layers import GaussianDropout\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.wrappers.scikit_learn import KerasClassifier\n#from keras.models import model_from_yaml\nfrom keras.models import load_model\n\nprint('TF version '+tf.__version__)\n\n# ------------------------------------------------------------------------------\n\ndef tic():\n global _start_time\n _start_time = time.time()\n\n\ndef tac():\n t_sec = round(time.time() - _start_time)\n (t_min, t_sec) = divmod(t_sec, 60)\n (t_hour, t_min) = divmod(t_min, 60)\n print('Time passed: {}hour:{}min:{}sec'.format(t_hour, t_min, t_sec))\n\ndef mean_squared_error(y_test, y_pred):\n return K.mean(K.square(y_pred - y_test), axis=-1)\n# ------------------------------------------------------------------------------\n\n\nclass Training:\n \"\"\"\n This module is intended to automate the TensorFlow Neural Network training.\n \"\"\"\n PCA = PCA()\n seed = 0\n run_prefix = ''\n version = ''\n vernick = ''\n file = ''\n path = ''\n fig_title = ''\n path_fig = ''\n mod_out_pth = ''\n mod_out_name = ''\n\n def __init__(self, random_seed=0,\n run_prefix='',\n version='',\n version_nickname='',\n csv_entry='',\n csv_path='',\n figure_path='',\n model_out_path='',\n model_out_name=''):\n\n self.run_prefix = run_prefix\n self.seed = random_seed\n self.version = version\n self.vernick = version_nickname\n self.file = csv_entry\n self.path = csv_path\n self.path_fig = figure_path\n self.fig_title = run_prefix + version + version_nickname\n self.mod_out_pth = model_out_path\n self.mod_out_name = model_out_name\n # -------------------------------------------------------------------------\n # DROP DATA OUTSIDE INTERVAL\n # -------------------------------------------------------------------------\n \n @staticmethod\n def keep_interval(keepfrom: 0.0, keepto: 1.0, dataframe, target_col: str):\n keepinterval = np.where((dataframe[target_col] >= keepfrom) &\n (dataframe[target_col] <= keepto))\n result = dataframe.iloc[keepinterval]\n return result\n\n # -------------------------------------------------------------------------\n # BUILD MODELS DEFINITIONS : CLAS = CLASSIFICATION and REG = REGRESSION\n # -------------------------------------------------------------------------\n\n @staticmethod\n def build_class_model():\n '''\n Fucntion to create the instance and configuration of the keras\n model(Sequential and Dense).\n '''\n # Create the Keras model:\n model = Sequential()\n model.add(Dense(8, input_dim=4, kernel_initializer='uniform', activation='relu'))\n model.add(Dense(2, kernel_initializer='uniform', activation='relu'))\n model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))\n # Compile model\n model.compile(loss='binary_crossentropy', optimizer='SGD', metrics=['accuracy'],)\n return model\n\n @staticmethod\n def build_reg_model(input_size):\n '''\n Fucntion to create the instance and configuration of the keras\n model(Sequential and Dense).\n '''\n model = Sequential()\n model.add(GaussianNoise(0.01, input_shape=(input_size,)))\n model.add(Dense(33, activation='relu'))\n model.add(Dense(12, activation='relu'))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error',\n optimizer='adam',\n metrics=['mean_absolute_error', 'mean_squared_error'])\n return model\n\n # -------------------------------------------------------------------------\n # EXECUTION OF READING INPUT ATTRIBUTES, SCALING, PCA, SPLIT AND RUN MODEL!\n # -------------------------------------------------------------------------\n\n def autoExecClass(self):\n\n # Fix random seed for reproducibility:\n np.random.seed(self.seed)\n\n # Load dataset:\n df = pd.read_csv(os.path.join(self.path, self.file), sep=',', decimal='.')\n x, y= df.loc[:,['36V', '89V', '166V', '190V']], df.loc[:,['TagRain']]\n \n x_arr = np.asanyarray(x)\n y_arr = np.asanyarray(y)\n y_arr = np.ravel(y_arr)\n\n # Scaling the input paramaters:\n# scaler_min_max = MinMaxScaler()\n norm_sc = Normalizer()\n x_normalized= norm_sc.fit_transform(x_arr)\n\n # Split the dataset in test and train samples:\n x_train, x_test, y_train, y_test = train_test_split(x_normalized,\n y_arr, test_size=0.10,\n random_state=101)\n\n # Create the instance for KerasRegressor:\n model=self.build_class_model()\n tic()\n#------------------------------------------------------------------------------\n # Display training progress by printing a single dot for each completed epoch\n\n class PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n if epoch % 100 == 0: print('')\n print('.', end='')\n\n EPOCHS = 1000\n\n history = model.fit(x_train, y_train,\n epochs=EPOCHS, validation_split=0.2, batch_size=10,\n verbose=0, callbacks=[PrintDot()])\n print(history.history.keys())\n\n# ------------------------------------------------------------------------------\n # Visualize the model's training progress using the stats\n # stored in the history object.\n\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n hist.tail()\n# ------------------------------------------------------------------------------\n # Saving model to YAML:\n\n# model_yaml = model.to_yaml()\n# with open(self.mod_out_pth + self.mod_out_name + '.yaml', 'w') as yaml_file:\n# yaml_file.write(model_yaml)\n#\n# # serialize weights to HDF5\n# model.save_weights(self.mod_out_pth + self.mod_out_name + '.h5')\n# print(\"Saved model to disk\")\n# tac()\n\n # Saving the complete model in HDF5:\n model.save(self.mod_out_pth + self.mod_out_name + '.h5')\n\n # ------------------------------------------------------------------------------\n #\n # ------------------------------------------------------------------------------\n\n def autoExecReg(self):\n\n # Fix random seed for reproducibility:\n np.random.seed(self.seed)\n# ------------------------------------------------------------------------------\n\n df_orig = pd.read_csv(os.path.join(self.path, self.file), sep=',', decimal='.')\n\n df_input = df_orig.loc[:, ['10V', '10H', '18V', '18H', '36V', '36H', '89V', '89H',\n '166V', '166H', '183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',\n 'lat']]\n\n colunas = ['10V', '10H', '18V', '18H', '36V', '36H', '89V', '89H',\n '166V', '166H', '183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',\n 'lat']\n\n scaler = StandardScaler()\n\n normed_input = scaler.fit_transform(df_input)\n df_normed_input = pd.DataFrame(normed_input[:],\n columns=colunas)\n ancillary = df_normed_input.loc[:, ['183VH', 'sfccode', 'T2m', 'tcwv', 'PCT36', 'PCT89', '89VH',\n 'lat']]\n # regions=df_orig.loc[:,['R1','R2','R3','R4','R5']]\n # ------------------------------------------------------------------------------\n # Choosing the number of components:\n\n TB1 = df_normed_input.loc[:, ['10V', '10H', '18V', '18H']]\n TB2 = df_normed_input.loc[:, ['36V', '36H', '89V', '89H', '166V', '166H']]\n\n # ------------------------------------------------------------------------------\n # Verifying the number of components that most contribute:\n pca = self.PCA\n pca1 = pca.fit(TB1)\n plt.plot(np.cumsum(pca1.explained_variance_ratio_))\n plt.xlabel('Number of components for TB1')\n plt.ylabel('Cumulative explained variance');\n plt.savefig(self.path_fig + self.version + 'PCA_TB1.png')\n # ---\n pca_trans1 = PCA(n_components=2)\n pca1 = pca_trans1.fit(TB1)\n TB1_transformed = pca_trans1.transform(TB1)\n print(\"original shape: \", TB1.shape)\n print(\"transformed shape:\", TB1_transformed.shape)\n # ------------------------------------------------------------------------------\n pca = PCA()\n pca2 = pca.fit(TB2)\n plt.plot(np.cumsum(pca2.explained_variance_ratio_))\n plt.xlabel('Number of components for TB2')\n plt.ylabel('Cumulative explained variance');\n plt.savefig(self.path_fig + self.version + 'PCA_TB2.png')\n # ---\n pca_trans2 = PCA(n_components=2)\n pca2 = pca_trans2.fit(TB2)\n TB2_transformed = pca_trans2.transform(TB2)\n print(\"original shape: \", TB2.shape)\n print(\"transformed shape:\", TB2_transformed.shape)\n # ------------------------------------------------------------------------------\n # JOIN THE TREATED VARIABLES IN ONE SINGLE DATASET AGAIN:\n\n PCA1 = pd.DataFrame(TB1_transformed[:],\n columns=['pca1_1', 'pca_2'])\n PCA2 = pd.DataFrame(TB2_transformed[:],\n columns=['pca2_1', 'pca2_2'])\n\n dataset = PCA1.join(PCA2, how='right')\n dataset = dataset.join(ancillary, how='right')\n dataset = dataset.join(df_orig.loc[:, ['sfcprcp']], how='right')\n # ------------------------------------------------------------------------------\n\n dataset = self.keep_interval(0.2, 75, dataset, 'sfcprcp')\n\n # ----------------------------------------\n# SUBSET BY SPECIFIC CLASS (UNDERSAMPLING)\n# n = 0.98\n# to_remove = np.random.choice(\n# dataset.index,\n# size=int(dataset.shape[0] * n),\n# replace=False)\n# dataset = dataset.drop(to_remove)\n\n # ------------------------------------------------------------------------------\n # Split the data into train and test\n # Now split the dataset into a training set and a test set.\n # We will use the test set in the final evaluation of our model.\n\n train_dataset = dataset.sample(frac=0.8, random_state=0)\n test_dataset = dataset.drop(train_dataset.index)\n\n # ------------------------------------------------------------------------------\n # Inspect the data:\n # Have a quick look at the joint distribution of a few pairs of columns from the training set.\n\n colunas = list(dataset.columns.values)\n\n # ------------------------------------------------------------------------------\n # Also look at the overall statistics:\n train_stats = train_dataset.describe()\n train_stats.pop(\"sfcprcp\")\n train_stats = train_stats.transpose()\n\n # ------------------------------------------------------------------------------\n # Split features from labels:\n # Separate the target value, or \"label\", from the features.\n # This label is the value that you will train the model to predict.\n\n y_train = train_dataset.pop('sfcprcp')\n y_test = test_dataset.pop('sfcprcp')\n\n # ------------------------------------------------------------------------------\n # Normalize the data:\n\n scaler = StandardScaler()\n normed_train_data = scaler.fit_transform(train_dataset)\n normed_test_data = scaler.fit_transform(test_dataset)\n\n # ------------------------------------------------------------------------------\n # Build the model:\n\n model = self.build_reg_model(len(train_dataset.keys()))\n # ------------------------------------------------------------------------------\n # Inspect the model:\n # Use the .summary method to print a simple description of the model\n\n model.summary()\n\n # ------------------------------------------------------------------------------\n # It seems to be working, and it produces a result\n # of the expected shape and type.\n\n # Train the model:\n # Train the model for 1000 epochs, and record the training\n # and validation accuracy in the history object.\n\n # ------------------------------------------------------------------------------\n # Display training progress by printing a single dot for each completed epoch\n\n class PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n if epoch % 100 == 0: print('')\n print('.', end='')\n\n EPOCHS = 1000\n\n history = model.fit(\n normed_train_data, y_train,\n epochs=EPOCHS, validation_split=0.2, verbose=0,\n callbacks=[PrintDot()])\n print(history.history.keys())\n\n # ------------------------------------------------------------------------------\n # Visualize the model's training progress using the stats\n # stored in the history object.\n\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n hist.tail()\n\n self.plot_history(history)\n # ------------------------------------------------------------------------------\n\n model = self.build_reg_model(len(train_dataset.keys()))\n\n # The patience parameter is the amount of epochs to check for improvement\n early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)\n\n history = model.fit(normed_train_data, y_train, epochs=EPOCHS,\n validation_split=0.2, verbose=0, callbacks=[early_stop, PrintDot()])\n\n # ------------------------------------------------------------------------------\n # Ploting again, but with the EarlyStopping apllied:\n\n self.plot_history_EarlyStopping(history)\n\n # The graph shows that on the validation set, the average error\n # is usually around +/- 2 MPG. Is this good?\n # We'll leave that decision up to you.\n # ------------------------------------------------------------------------------\n # Let's see how well the model generalizes by using\n # the test set, which we did not use when training the model.\n # This tells us how well we can expect the model to predict\n # when we use it in the real world.\n\n loss, mae, mse = model.evaluate(normed_test_data, y_test, verbose=0)\n\n print(\"Testing set Mean Abs Error: {:5.2f} sfcprcp\".format(mae))\n #------------------------------------------------------------------------------\n # -----------------------------------------------------------------------------\n # Make predictions\n # Finally, predict SFCPRCP values using data in the testing set:\n\n test_predictions = model.predict(normed_test_data).flatten()\n\n # Appplying meteorological skills to verify the performance of the TRAIN/TESTE model, in this case, continous scores:\n\n skills = ContinuousScores()\n val_y_pred_mean, val_y_test_mean, val_mae, val_rmse, val_std, val_fseperc, val_fse, val_corr, val_num_pixels = skills.metrics(y_test, test_predictions)\n \n #converting to text file\n print(\"converting arrays to text files\")\n my_scores = {'val_y_pred_mean': val_y_pred_mean,\n 'val_y_test_mean': val_y_test_mean,\n 'val_mae': val_mae,\n 'val_rmse': val_rmse,\n 'val_std': val_std,\n 'val_fseperc': val_fseperc,\n 'val_fse': val_fse,\n 'val_corr': val_corr,\n 'val_num_pixels': val_num_pixels}\n\n with open(self.path_fig+'continuous_scores_TEST_TRAIN_'+self.version+'.txt', 'w') as myfile:\n myfile.write(str(my_scores))\n print(\"Text file saved!\")\n\n plt.figure()\n plt.scatter(y_test, test_predictions)\n plt.xlabel('True Values [sfcprcp]')\n plt.ylabel('Predictions [sfcprcp]')\n plt.axis('equal')\n plt.axis('square')\n plt.xlim([0, plt.xlim()[1]])\n plt.ylim([0, plt.ylim()[1]])\n plt.plot([-100, 100], [-100, 100])\n fig_name = self.fig_title + \"_plot_scatter_y_test_vs_y_pred.png\"\n plt.savefig(self.path_fig + fig_name)\n plt.clf()\n\n #------------------------------------------------------------------------------\n ax = plt.gca()\n ax.plot(y_test,test_predictions, 'o', c='blue', alpha=0.07, markeredgecolor='none')\n ax.set_yscale('log')\n ax.set_xscale('log')\n ax.set_xlabel('True Values [sfcprcp]')\n ax.set_ylabel('Predictions [sfcprcp]')\n plt.plot([-100, 100], [-100, 100])\n fig_name = self.fig_title + \"_plot_scatter_LOG_y_test_vs_y_pred.png\"\n plt.savefig(self.path_fig+fig_name)\n plt.clf()\n #------------------------------------------------------------------------------\n # ------------------------------------------------------------------------------\n # It looks like our model predicts reasonably well.\n # Let's take a look at the error distribution.\n\n error = test_predictions - y_test\n plt.hist(error, bins=25)\n plt.xlabel(\"Prediction Error [sfcprcp]\")\n plt.ylabel(\"Count\")\n fig_name = self.fig_title + \"_prediction_error.png\"\n plt.savefig(self.path_fig + fig_name)\n plt.clf()\n \n # ------------------------------------------------------------------------------\n # HISTROGRAM 2D\n\n plt.hist2d(y_test, test_predictions, cmin=1, bins=(50, 50), cmap=plt.cm.jet, range=np.array([(0.2, 60), (0.2, 60)]))\n plt.axis('equal')\n plt.axis('square')\n plt.plot([0, 100], [0, 100], ls=\"--\", c=\".3\")\n plt.xlim([0, max(y_test)])\n plt.ylim([0, max(y_test)])\n plt.colorbar()\n plt.xlabel(\"Observed rain rate (mm/h) - Training\")\n plt.ylabel(\"Predicted rain rate (mm/h) - Training\")\n fig_name = self.fig_title + \"_hist2D.png\"\n plt.savefig(self.path_fig + fig_name)\n plt.clf()\n # ------------------------------------------------------------------------------\n # Saving model to YAML:\n\n model_yaml = model.to_yaml()\n with open(self.mod_out_pth + self.mod_out_name + '.yaml', 'w') as yaml_file:\n yaml_file.write(model_yaml)\n\n # serialize weights to HDF5\n model.save_weights(self.mod_out_pth + self.mod_out_name + '.h5')\n print(\"Saved model to disk\")\n\n # Saving the complete model in HDF5:\n model.save(self.mod_out_pth + self.mod_out_name + '_tf.h5')\n\n # -------------------------------------------------------------------------\n # FUNCTIONS TO MAKE PLOTS ABOUT TRAINING:\n # -------------------------------------------------------------------------\n def plot_history(self, history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n\n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [sfcprcp]')\n plt.plot(hist['epoch'], hist['mean_absolute_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'],\n label='Val Error')\n ylim_max = hist.val_mean_absolute_error.max() + 10\n plt.ylim([0, ylim_max])\n plt.legend()\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$scfprcp^2$]')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label='Val Error')\n ylim_max = hist.val_mean_squared_error.max() + 10\n plt.ylim([0, ylim_max])\n plt.legend()\n # plt.show()\n fig_name = self.fig_title + \"_error_per_epochs_history.png\"\n plt.savefig(self.path_fig + fig_name)\n\n def plot_history_EarlyStopping(self, history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [sfcprcp]')\n plt.plot(hist['epoch'], hist['mean_absolute_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_absolute_error'],\n label='Val Error')\n ylim_max = hist.val_mean_absolute_error.max() + 10\n plt.ylim([0, ylim_max])\n\n plt.legend()\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$sfcprcp^2$]')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label='Val Error')\n ylim_max = hist.val_mean_squared_error.max() + 10\n plt.ylim([0, ylim_max])\n\n plt.legend()\n\n fig_name = self.fig_title + \"_error_per_epochs_EarlyStopping.png\"\n plt.savefig(self.path_fig + fig_name)\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.cumsum",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"numpy.where",
"matplotlib.pyplot.gca",
"numpy.asanyarray",
"matplotlib.pyplot.axis",
"numpy.ravel",
"tensorflow.keras.callbacks.EarlyStopping",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylim",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.savefig",
"numpy.array",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.clf",
"sklearn.preprocessing.Normalizer",
"matplotlib.pyplot.xlabel",
"sklearn.preprocessing.StandardScaler"
],
[
"matplotlib.pyplot.legend",
"numpy.cumsum",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"numpy.where",
"matplotlib.pyplot.gca",
"numpy.asanyarray",
"matplotlib.pyplot.axis",
"numpy.ravel",
"tensorflow.keras.callbacks.EarlyStopping",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylim",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.savefig",
"numpy.array",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.clf",
"sklearn.preprocessing.Normalizer",
"matplotlib.pyplot.xlabel",
"sklearn.preprocessing.StandardScaler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
vballoli/flower | [
"e8c58c09a8fd4d29186b2f590b0cbb44bb022e9a",
"e8c58c09a8fd4d29186b2f590b0cbb44bb022e9a"
] | [
"src/py/flwr_experimental/baseline/tf_fashion_mnist/gen_plots.py",
"src/py/flwr_example/pytorch/server.py"
] | [
"# Copyright 2020 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Generate plots for Fashion-MNIST results.\"\"\"\n\n\nfrom typing import List, Tuple\n\nimport numpy as np\n\nfrom flwr_experimental.baseline.plot import bar_chart, line_chart\n\nRESULTS = {\n \"fedavg-t10\": [\n (0, 0.03759999945759773),\n (1, 0.03759999945759773),\n (2, 0.03759999945759773),\n (3, 0.03759999945759773),\n (4, 0.03759999945759773),\n (5, 0.03759999945759773),\n (6, 0.03759999945759773),\n (7, 0.03759999945759773),\n (8, 0.03759999945759773),\n (9, 0.03759999945759773),\n (10, 0.03759999945759773),\n (11, 0.03759999945759773),\n (12, 0.03759999945759773),\n (13, 0.03759999945759773),\n (14, 0.03759999945759773),\n (15, 0.03759999945759773),\n (16, 0.03759999945759773),\n (17, 0.03759999945759773),\n (18, 0.03759999945759773),\n (19, 0.03759999945759773),\n (20, 0.03759999945759773),\n ],\n \"fedavg-t12\": [\n (0, 0.03759999945759773),\n (1, 0.03759999945759773),\n (2, 0.03759999945759773),\n (3, 0.03759999945759773),\n (4, 0.03759999945759773),\n (5, 0.03759999945759773),\n (6, 0.03759999945759773),\n (7, 0.03759999945759773),\n (8, 0.03759999945759773),\n (9, 0.03759999945759773),\n (10, 0.03759999945759773),\n (11, 0.03759999945759773),\n (12, 0.03759999945759773),\n (13, 0.03759999945759773),\n (14, 0.03759999945759773),\n (15, 0.03759999945759773),\n (16, 0.03759999945759773),\n (17, 0.03759999945759773),\n (18, 0.03759999945759773),\n (19, 0.03759999945759773),\n (20, 0.03759999945759773),\n ],\n \"fedavg-t14\": [\n (0, 0.03759999945759773),\n (1, 0.03759999945759773),\n (2, 0.6743999719619751),\n (3, 0.6802999973297119),\n (4, 0.6802999973297119),\n (5, 0.6802999973297119),\n (6, 0.6802999973297119),\n (7, 0.7853999733924866),\n (8, 0.7853999733924866),\n (9, 0.7876999974250793),\n (10, 0.7642999887466431),\n (11, 0.8054999709129333),\n (12, 0.8181999921798706),\n (13, 0.8108999729156494),\n (14, 0.7907000184059143),\n (15, 0.763700008392334),\n (16, 0.8091999888420105),\n (17, 0.8296999931335449),\n (18, 0.8123999834060669),\n (19, 0.8123999834060669),\n (20, 0.8101999759674072),\n ],\n \"fedavg-t16\": [\n (0, 0.03759999945759773),\n (1, 0.7197999954223633),\n (2, 0.7720999717712402),\n (3, 0.7900999784469604),\n (4, 0.7811999917030334),\n (5, 0.7724000215530396),\n (6, 0.8023999929428101),\n (7, 0.8043000102043152),\n (8, 0.8230999708175659),\n (9, 0.8327999711036682),\n (10, 0.8299000263214111),\n (11, 0.8402000069618225),\n (12, 0.853600025177002),\n (13, 0.8370000123977661),\n (14, 0.83160001039505),\n (15, 0.8424000144004822),\n (16, 0.830299973487854),\n (17, 0.8476999998092651),\n (18, 0.8632000088691711),\n (19, 0.8636999726295471),\n (20, 0.8657000064849854),\n ],\n \"fedfs-t10\": [\n (0, 0.03759999945759773),\n (1, 0.7343000173568726),\n (2, 0.7664999961853027),\n (3, 0.7900000214576721),\n (4, 0.805899977684021),\n (5, 0.8237000107765198),\n (6, 0.8406999707221985),\n (7, 0.8263000249862671),\n (8, 0.8442999720573425),\n (9, 0.8564000129699707),\n (10, 0.8651999831199646),\n (11, 0.8375999927520752),\n (12, 0.8646000027656555),\n (13, 0.8669999837875366),\n (14, 0.861299991607666),\n (15, 0.8773999810218811),\n (16, 0.800599992275238),\n (17, 0.8676999807357788),\n (18, 0.8763999938964844),\n (19, 0.8695999979972839),\n (20, 0.873199999332428),\n ],\n \"fedfs-t12\": [\n (0, 0.03759999945759773),\n (1, 0.7153000235557556),\n (2, 0.7835999727249146),\n (3, 0.8083999752998352),\n (4, 0.816100001335144),\n (5, 0.8215000033378601),\n (6, 0.8429999947547913),\n (7, 0.8464000225067139),\n (8, 0.8603000044822693),\n (9, 0.8482999801635742),\n (10, 0.8450000286102295),\n (11, 0.866599977016449),\n (12, 0.863099992275238),\n (13, 0.8709999918937683),\n (14, 0.873199999332428),\n (15, 0.8701000213623047),\n (16, 0.8600000143051147),\n (17, 0.8766999840736389),\n (18, 0.8697999715805054),\n (19, 0.8795999884605408),\n (20, 0.8830999732017517),\n ],\n \"fedfs-t14\": [\n (0, 0.03759999945759773),\n (1, 0.7245000004768372),\n (2, 0.7972000241279602),\n (3, 0.8059999942779541),\n (4, 0.8252999782562256),\n (5, 0.8334000110626221),\n (6, 0.8560000061988831),\n (7, 0.8510000109672546),\n (8, 0.8650000095367432),\n (9, 0.8621000051498413),\n (10, 0.866599977016449),\n (11, 0.8615999817848206),\n (12, 0.8636999726295471),\n (13, 0.8740000128746033),\n (14, 0.866100013256073),\n (15, 0.867900013923645),\n (16, 0.83160001039505),\n (17, 0.8741999864578247),\n (18, 0.8736000061035156),\n (19, 0.8810999989509583),\n (20, 0.8762000203132629),\n ],\n \"fedfs-t16\": [\n (0, 0.03759999945759773),\n (1, 0.7476999759674072),\n (2, 0.7982000112533569),\n (3, 0.8276000022888184),\n (4, 0.8256999850273132),\n (5, 0.8312000036239624),\n (6, 0.8536999821662903),\n (7, 0.8483999967575073),\n (8, 0.85589998960495),\n (9, 0.8687000274658203),\n (10, 0.8664000034332275),\n (11, 0.8586999773979187),\n (12, 0.8662999868392944),\n (13, 0.8754000067710876),\n (14, 0.878600001335144),\n (15, 0.8763999938964844),\n (16, 0.748199999332428),\n (17, 0.8806999921798706),\n (18, 0.8794000148773193),\n (19, 0.8813999891281128),\n (20, 0.8708000183105469),\n ],\n}\n\nRESULTS_WALL_CLOCK_TIME = {\n \"fedavg-14\": 218.49,\n \"fedfs-14\": 61.16,\n \"fedavg-16\": 153.56,\n \"fedfs-16\": 66.84,\n}\n\n\ndef accuracy_t10() -> None:\n \"\"\"Generate plots.\"\"\"\n lines = [\n (\"FedAvg, t=10\", RESULTS[\"fedavg-t10\"]),\n (\"FedFS, t=10\", RESULTS[\"fedfs-t10\"]),\n ]\n plot(lines, \"fmnist-progress-t10\")\n\n\ndef accuracy_t12() -> None:\n \"\"\"Generate plots.\"\"\"\n lines = [\n (\"FedAvg, t=12\", RESULTS[\"fedavg-t12\"]),\n (\"FedFS, t=12\", RESULTS[\"fedfs-t12\"]),\n ]\n plot(lines, \"fmnist-progress-t12\")\n\n\ndef accuracy_t14() -> None:\n \"\"\"Generate plots.\"\"\"\n lines = [\n (\"FedAvg, t=14\", RESULTS[\"fedavg-t14\"]),\n (\"FedFS, t=14\", RESULTS[\"fedfs-t14\"]),\n ]\n plot(lines, \"fmnist-progress-t14\")\n\n\ndef accuracy_t16() -> None:\n \"\"\"Generate plots.\"\"\"\n lines = [\n (\"FedAvg, t=16\", RESULTS[\"fedavg-t16\"]),\n (\"FedFS, t=16\", RESULTS[\"fedfs-t16\"]),\n ]\n plot(lines, \"fmnist-progress-t16\")\n\n\ndef accuracy_fedavg_vs_fedfs() -> None:\n \"\"\"Comparision of FedAvg vs FedFS.\"\"\"\n fedavg = [\n RESULTS[\"fedavg-t10\"][-1][1],\n RESULTS[\"fedavg-t12\"][-1][1],\n RESULTS[\"fedavg-t14\"][-1][1],\n RESULTS[\"fedavg-t16\"][-1][1],\n ]\n fedfs = [\n RESULTS[\"fedfs-t10\"][-1][1],\n RESULTS[\"fedfs-t12\"][-1][1],\n RESULTS[\"fedfs-t14\"][-1][1],\n RESULTS[\"fedfs-t16\"][-1][1],\n ]\n bar_chart(\n y_values=[\n np.array([x * 100 for x in fedavg]),\n np.array([x * 100 for x in fedfs]),\n ],\n bar_labels=[\"FedAvg\", \"FedFS\"],\n x_label=\"Timeout\",\n x_tick_labels=[\"T=10\", \"T=12\", \"T=14\", \"T=16\"],\n y_label=\"Accuracy\",\n filename=\"fmnist-accuracy_fedavg_vs_fedfs\",\n )\n\n\ndef wall_clock_time_fedavg_vs_fedfs() -> None:\n \"\"\"Comparision of FedAvg vs FedFS.\"\"\"\n\n bar_chart(\n y_values=[\n np.array(\n [\n RESULTS_WALL_CLOCK_TIME[\"fedavg-14\"],\n RESULTS_WALL_CLOCK_TIME[\"fedavg-16\"],\n ]\n ),\n np.array(\n [\n RESULTS_WALL_CLOCK_TIME[\"fedfs-t14\"],\n RESULTS_WALL_CLOCK_TIME[\"fedfs-16\"],\n ]\n ),\n ],\n bar_labels=[\"FedAvg\", \"FedFS\"],\n x_label=\"Timeout\",\n x_tick_labels=[\"T=14\", \"T=16\"],\n y_label=\"Completion time\",\n filename=\"fmnist-time_fedavg_vs_fedfs\",\n )\n\n\ndef plot(lines: List[Tuple[str, List[Tuple[int, float]]]], filename: str) -> None:\n \"\"\"Plot a single line chart.\"\"\"\n values = [np.array([x * 100 for _, x in val]) for _, val in lines]\n labels = [label for label, _ in lines]\n line_chart(\n values, labels, \"Round\", \"Accuracy\", filename=filename, y_floor=0, y_ceil=100,\n )\n\n\ndef main() -> None:\n \"\"\"Call all plot functions.\"\"\"\n accuracy_t10()\n accuracy_t12()\n accuracy_t14()\n accuracy_t16()\n accuracy_fedavg_vs_fedfs()\n wall_clock_time_fedavg_vs_fedfs()\n\n\nif __name__ == \"__main__\":\n main()\n",
"# Copyright 2020 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Minimal example on how to start a simple Flower server.\"\"\"\n\n\nimport argparse\nfrom typing import Callable, Dict, Optional, Tuple\n\nimport torch\nimport torchvision\n\nimport flwr as fl\n\nfrom . import DEFAULT_SERVER_ADDRESS, cifar\n\n# pylint: disable=no-member\nDEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n# pylint: enable=no-member\n\n\ndef main() -> None:\n \"\"\"Start server and train five rounds.\"\"\"\n parser = argparse.ArgumentParser(description=\"Flower\")\n parser.add_argument(\n \"--server_address\",\n type=str,\n default=DEFAULT_SERVER_ADDRESS,\n help=f\"gRPC server address (default: {DEFAULT_SERVER_ADDRESS})\",\n )\n parser.add_argument(\n \"--rounds\",\n type=int,\n default=1,\n help=\"Number of rounds of federated learning (default: 1)\",\n )\n parser.add_argument(\n \"--sample_fraction\",\n type=float,\n default=1.0,\n help=\"Fraction of available clients used for fit/evaluate (default: 1.0)\",\n )\n parser.add_argument(\n \"--min_sample_size\",\n type=int,\n default=2,\n help=\"Minimum number of clients used for fit/evaluate (default: 2)\",\n )\n parser.add_argument(\n \"--min_num_clients\",\n type=int,\n default=2,\n help=\"Minimum number of available clients required for sampling (default: 2)\",\n )\n parser.add_argument(\n \"--log_host\", type=str, help=\"Logserver address (no default)\",\n )\n args = parser.parse_args()\n\n # Configure logger\n fl.common.logger.configure(\"server\", host=args.log_host)\n\n # Load evaluation data\n _, testset = cifar.load_data()\n\n # Create client_manager, strategy, and server\n client_manager = fl.server.SimpleClientManager()\n strategy = fl.server.strategy.DefaultStrategy(\n fraction_fit=args.sample_fraction,\n min_fit_clients=args.min_sample_size,\n min_available_clients=args.min_num_clients,\n eval_fn=get_eval_fn(testset),\n on_fit_config_fn=fit_config,\n )\n server = fl.server.Server(client_manager=client_manager, strategy=strategy)\n\n # Run server\n fl.server.start_server(\n args.server_address, server, config={\"num_rounds\": args.rounds},\n )\n\n\ndef fit_config(rnd: int) -> Dict[str, str]:\n \"\"\"Return a configuration with static batch size and (local) epochs.\"\"\"\n config = {\n \"epoch_global\": str(rnd),\n \"epochs\": str(1),\n \"batch_size\": str(32),\n }\n return config\n\n\ndef get_eval_fn(\n testset: torchvision.datasets.CIFAR10,\n) -> Callable[[fl.common.Weights], Optional[Tuple[float, float]]]:\n \"\"\"Return an evaluation function for centralized evaluation.\"\"\"\n\n def evaluate(weights: fl.common.Weights) -> Optional[Tuple[float, float]]:\n \"\"\"Use the entire CIFAR-10 test set for evaluation.\"\"\"\n model = cifar.load_model()\n model.set_weights(weights)\n model.to(DEVICE)\n testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False)\n return cifar.test(model, testloader, device=DEVICE)\n\n return evaluate\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.array"
],
[
"torch.utils.data.DataLoader",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
strawlab/flyvr | [
"335892cae740e53e82e07b526e1ba53fbd34b0ce",
"335892cae740e53e82e07b526e1ba53fbd34b0ce",
"335892cae740e53e82e07b526e1ba53fbd34b0ce"
] | [
"src/freemovr_engine/cvnumpy.py",
"src/freemovr_engine/plot_utils.py",
"src/freemovr_engine/simple_geom.py"
] | [
"import numpy as np\nimport cv2\n\ndef rodrigues2matrix_cv(params):\n rvec = np.array(params,dtype=np.float64)\n rvec.shape = (1,3)\n Rmat, jacobian = cv2.Rodrigues(rvec)\n return Rmat\n\ndef rodrigues2matrix(params):\n # Written after the docs at\n # http://opencv.itseez.com/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#rodrigues\n\n try:\n rvec = np.array(params,dtype=np.float)\n rvec.shape = (1,3)\n except:\n print('bad rvec',rvec)\n raise\n\n theta = np.sqrt(np.sum(rvec**2))\n if theta==0:\n rvec = rvec\n else:\n rvec = rvec/theta\n r = rvec[0] # drop dim\n\n s = np.sin(theta)\n c = np.cos(theta)\n R = c*np.eye(3) + (1-c)*rvec*rvec.T + s*np.array([[0, -r[2], r[1]],\n [r[2], 0, -r[0]],\n [-r[1], r[0], 0]])\n\n # -R.T might also be considered a valid rotation matrix, but it\n # -does not have an eigenvector of 1.\n\n return R\n\ndef matrix2rodrigues(R):\n Rmat = np.array(R,dtype=np.float64)\n assert Rmat.shape == (3,3)\n rvec, jacobian = cv2.Rodrigues(Rmat)\n return rvec\n\ndef rodrigues2angle_axis(params):\n rvec = np.array(params)\n rvec.shape = (1,3)\n\n theta = np.sqrt(np.sum(rvec**2))\n if theta==0:\n rvec = rvec\n else:\n rvec = rvec/theta\n r = rvec[0] # drop dim\n return theta, r\n",
"import numpy as np\n\nimport matplotlib.pyplot as plt\n\nfrom pymvg.plot_utils import plot_camera\n\ndef get_3d_verts(geom):\n allw = []\n res_u = 32\n res_v = 5\n for tc1 in np.linspace(0,1,res_v):\n tc = np.vstack( (\n np.linspace(0,1.,res_u),\n tc1*np.ones( (res_u,) ),\n )).T\n world = geom.model.texcoord2worldcoord(tc)\n allw.append(world)\n\n allw = np.concatenate(allw)\n return allw\n\ndef plot_camera( ax, display, scale=0.2):\n C = display.get_camcenter()\n C.shape=(3,)\n ax.plot( [C[0]], [C[1]], [C[2]], 'ko', ms=5 )\n\n world_coords = display.project_camera_frame_to_3d( [[scale,0,0],\n [0,scale,0],\n [0,0,scale],\n [0,0,-scale],\n [0,0,0],\n [0,scale,0],\n [0,0,scale]] )\n\n for i in range(3):\n c = 'rgb'[i]\n vv = world_coords[i]\n v = np.vstack( ([C],[vv]) )\n ax.plot( v[:,0], v[:,1], v[:,2], c+'-' )\n\n uv_raw = np.array([[0,0],\n [0,display.height],\n [display.width, display.height],\n [display.width, 0],\n [0,0]])\n pts3d_near = display.project_pixel_to_3d_ray( uv_raw, distorted=True, distance=0.1*scale)\n pts3d_far = display.project_pixel_to_3d_ray( uv_raw, distorted=True, distance=scale)\n # ring at near depth\n ax.plot( pts3d_near[:,0], pts3d_near[:,1], pts3d_near[:,2], 'k-' )\n # ring at far depth\n ax.plot( pts3d_far[:,0], pts3d_far[:,1], pts3d_far[:,2], 'k-' )\n # connectors\n for i in range(len(pts3d_near)-1):\n pts3d = np.vstack((pts3d_near[i,:],pts3d_far[i,:]))\n ax.plot( pts3d[:,0], pts3d[:,1], pts3d[:,2], 'k-' )\n\n ax.text( C[0], C[1], C[2], display.name )\n ax.text( pts3d_far[0,0], pts3d_far[0,1], pts3d_far[0,2], 'UL' )\n",
"# -*- Mode: python; tab-width: 4; indent-tabs-mode: nil -*-\n\n# ROS imports\nimport roslib; roslib.load_manifest('freemovr_engine')\nimport rosbag\n\n# standard Python stuff\nimport json\nimport numpy as np\nimport freemovr_engine.fixup_path as fixup_path\n\nclass Vec3:\n def __init__(self,x=0, y=0, z=0):\n self.x=x\n self.y=y\n self.z=z\n\n def to_dict(self):\n #this dict is usually used for serializing, and some libraries have trouble serializing\n #numpy types (im looking at you ROS parameter server API)\n return dict(x=float(self.x),y=float(self.y),z=float(self.z))\n\ndef point_dict_to_vec(d):\n return Vec3(**d)\n\ndef range_0_2pi(angle):\n \"\"\"put angle in range [0,2*pi]\"\"\"\n # Given: np.fmod( -1, 3) == -1\n pi2 = 2*np.pi\n return np.fmod((np.fmod(angle,pi2) + pi2),pi2)\n\nclass ModelBase(object):\n def get_relative_distance_to_first_surface(self, a, b):\n \"\"\"return relative distance to surface from point a in direction of point b.\n\n a is Nx3 array of points\n b is Nx3 array of points\n\n Given point A and point B, the vector S is B-A. Find t such\n that tS + A is on the surface of the model.\n\n return length N vector of relative distances\n \"\"\"\n raise NotImplementedError(\n 'derived class must provide implementation in %r'%self)\n\n def get_first_surface(self, a, b):\n \"\"\"return point on surface closest to point a in direction of point b.\n\n a is Nx3 array of points\n b is Nx3 array of points\n\n return Nx3 array of points\n \"\"\"\n raise NotImplementedError(\n 'derived class must provide implementation in %r'%self)\n\n def to_geom_dict(self):\n raise NotImplementedError(\n 'derived class must provide implementation in %r'%self)\n\n def get_center(self):\n return self.center_arr\n\nclass Cylinder(ModelBase):\n def __init__(self, base=None, axis=None, radius=None):\n self.base = point_dict_to_vec(base)\n self.axis = point_dict_to_vec(axis)\n self.radius = radius\n if self.base.x != 0 or self.base.y != 0 or self.base.z != 0:\n raise NotImplementedError(\"not tested when cylinder not at origin\")\n if self.axis.x != 0 or self.axis.y != 0:\n raise NotImplementedError(\"only right cylinder currently supported\")\n if self.axis.z <= 0:\n raise NotImplementedError(\"only cylinder z>0 currently supported\")\n\n\n # keep in sync with DisplaySurfaceGeometry.cpp\n self._radius = radius\n self._matrix = np.eye(3) # currently we're forcing vertical cylinder, so this is OK\n self._height = self.axis.z - self.base.z\n self._base = np.expand_dims(np.array( (self.base.x, self.base.y, self.base.z) ),1)\n self.center_arr = self._base[:,0] + np.array((0,0,self._height*0.5))\n super(Cylinder,self).__init__()\n\n def __repr__(self):\n return 'Cylinder( base=%r, axis=%r, radius=%r )'%(self._base[:,0].tolist(), self.axis, self.radius )\n\n def to_geom_dict(self):\n return dict(\n axis=self.axis.to_dict(),\n base=self.base.to_dict(),\n radius=float(self.radius),\n model=\"cylinder\")\n\n def texcoord2worldcoord(self,tc):\n # Parse inputs\n tc = np.array(tc,copy=False)\n assert tc.ndim==2\n assert tc.shape[1]==2\n tc = tc.T\n\n # keep in sync with DisplaySurfaceGeometry.cpp\n frac_theta = tc[0]\n frac_height = tc[1]\n\n angle = frac_theta * 2.0*np.pi + np.pi\n c = np.cos(angle)\n s = np.sin(angle)\n r = self._radius\n\n vec = np.vstack((c*r, s*r, frac_height*self._height))\n result = np.dot( self._matrix, vec ) + self._base\n return result.T\n\n def worldcoord2texcoord(self,wc):\n # Parse inputs\n wc = np.array(wc,copy=False)\n assert wc.ndim==2\n assert wc.shape[1]==3\n wc = wc.T\n\n x,y,z = wc\n x0 = x-self.base.x\n y0 = y-self.base.y\n z0 = z-self.base.z\n\n angle = np.arctan2( y0, x0 )\n height = z0\n\n tc0 = range_0_2pi(angle-np.pi)/(2*np.pi)\n tc1 = z0/self._height\n result = np.vstack((tc0,tc1))\n return result.T\n\n def worldcoord2normal(self,wc):\n wc = np.array(wc,copy=False)\n assert wc.ndim==2\n assert wc.shape[1]==3\n wc = wc.T\n\n x,y,z = wc\n x0 = x-self.base.x\n y0 = y-self.base.y\n r = np.sqrt( x0**2 + y0**2 )\n\n result = np.vstack( (x0/r, y0/r, z*0) )\n return result.T\n\n def get_relative_distance_to_first_surface(self, a, b):\n # See ModelBase.get_relative_distance_to_first_surface for docstring\n a = np.array(a,copy=False)\n assert a.ndim==2\n assert a.shape[1]==3\n inshape = a.shape\n\n b = np.array(b,copy=False)\n assert b.ndim==2\n assert b.shape[1]==3\n assert b.shape==inshape\n\n # Since our cylinder is upright, we project our line into 2D,\n # solve for the intersection with the circle.\n\n a = a.T\n b = b.T\n\n # Move so that cylinder base is at (0,0).\n ax = a[0] - self.base.x\n ay = a[1] - self.base.y\n az = a[2] - self.base.z\n\n bx = b[0] - self.base.x\n by = b[1] - self.base.y\n bz = b[2] - self.base.z\n\n del a, b\n\n # Now create vector between points a and b\n sx = bx-ax\n sy = by-ay\n sz = bz-az\n r = self.radius\n\n old_settings = np.seterr(invalid='ignore') # we expect some nans below\n # Solve for the intersections between line and circle (see sympy_line_circle.py for math)\n t0 = (-ax*sx - ay*sy + (-ax**2*sy**2 + 2*ax*ay*sx*sy - ay**2*sx**2 + r**2*sx**2 + r**2*sy**2)**(0.5))/(sx**2 + sy**2)\n t1 = (ax*sx + ay*sy + (-ax**2*sy**2 + 2*ax*ay*sx*sy - ay**2*sx**2 + r**2*sx**2 + r**2*sy**2)**(0.5))/(-sx**2 - sy**2)\n tt = np.vstack((t0,t1))\n np.seterr(**old_settings)\n\n # We want t to be > 0 (in direction from camera center to\n # point) but the closest one, so the smallest value meeting\n # this criterion.\n\n tt[tt <= 0] = np.nan # behind camera - invalid\n\n # find Z coordinate of each intersection\n zz = az+sz*tt\n\n # intersections not on cylinder are invalid\n tt[zz < 0] = np.nan\n tt[zz > self.axis.z] = np.nan\n\n tmin = np.nanmin(tt, axis=0) # find closest to camera\n return tmin\n get_relative_distance_to_first_surface.__doc__ = ModelBase.get_relative_distance_to_first_surface.__doc__ # inherit docstring\n\n def get_first_surface(self,a,b):\n # See ModelBase.get_first_surface for docstring\n tmin = self.get_relative_distance_to_first_surface(a,b)\n\n a = np.array(a,copy=False)\n b = np.array(b,copy=False)\n inshape = a.shape\n\n a = a.T\n b = b.T\n\n # Move so that cylinder base is at (0,0).\n ax = a[0] - self.base.x\n ay = a[1] - self.base.y\n az = a[2] - self.base.z\n\n bx = b[0] - self.base.x\n by = b[1] - self.base.y\n bz = b[2] - self.base.z\n\n del a, b\n\n # Now create vector between points a and b\n sx = bx-ax\n sy = by-ay\n sz = bz-az\n\n x = ax+sx*tmin\n y = ay+sy*tmin\n z = az+sz*tmin\n\n result = np.vstack((x,y,z)).T\n assert result.shape==inshape\n return result\n get_first_surface.__doc__ = ModelBase.get_first_surface.__doc__ # inherit docstring\n\nclass Sphere(ModelBase):\n def __init__(self, center=None, radius=None):\n self.center = point_dict_to_vec(center)\n self.radius = radius\n\n # keep in sync with DisplaySurfaceGeometry.cpp\n self._radius = radius\n self._center = np.expand_dims(np.array( (self.center.x, self.center.y, self.center.z) ),1)\n self.center_arr = self._center[:,0]\n super(Sphere,self).__init__()\n\n def __repr__(self):\n return 'Sphere( center=%r, radius=%r )'%(self._center[:,0].tolist(), self.radius )\n\n def to_geom_dict(self):\n return dict(\n center=self.center.to_dict(),\n radius=float(self.radius),\n model=\"sphere\")\n\n def texcoord2worldcoord(self,tc):\n # Parse inputs\n tc = np.array(tc,copy=False)\n assert tc.ndim==2\n assert tc.shape[1]==2\n tc = tc.T\n\n # keep in sync with DisplaySurfaceGeometry.cpp\n frac_az = tc[0]\n frac_el = tc[1]\n\n az = frac_az * 2.0*np.pi # 0 - 2pi\n el = frac_el*np.pi - np.pi/2 # -pi/2 - pi/2\n\n ca = np.cos(az)\n sa = np.sin(az)\n\n ce = np.cos(el)\n se = np.sin(el)\n\n r = self._radius\n\n vec = np.vstack((r*ca*ce, r*sa*ce, r*se))\n result = vec + self._center\n return result.T\n\n def worldcoord2texcoord(self,wc):\n # Parse inputs\n wc = np.array(wc,copy=False)\n assert wc.ndim==2\n assert wc.shape[1]==3\n wc = wc.T\n\n x,y,z = wc\n x0 = x-self.center.x\n y0 = y-self.center.y\n z0 = z-self.center.z\n r = np.sqrt( x0**2 + y0**2 + z0**2 )\n\n az = np.arctan2( y0, x0 )\n el_rad = np.arcsin( z0/r )\n el = el_rad / np.pi + 0.5\n\n tc0 = range_0_2pi(az)/(2*np.pi)\n tc1 = el\n result = np.vstack((tc0,tc1))\n return result.T\n\n def worldcoord2normal(self,wc):\n wc = np.array(wc,copy=False)\n assert wc.ndim==2\n assert wc.shape[1]==3\n wc = wc.T\n\n x,y,z = wc\n x0 = x-self.center.x\n y0 = y-self.center.y\n z0 = z-self.center.z\n r = np.sqrt( x0**2 + y0**2 + z0**2 )\n\n result = np.vstack( (x0/r, y0/r, z0/r) )\n return result.T\n\n def get_relative_distance_to_first_surface(self, a, b):\n # See ModelBase.get_relative_distance_to_first_surface for docstring\n a = np.array(a,copy=False)\n assert a.ndim==2\n assert a.shape[1]==3\n inshape = a.shape\n\n b = np.array(b,copy=False)\n assert b.ndim==2\n assert b.shape[1]==3\n assert b.shape==inshape\n\n a = a.T\n b = b.T\n\n # Move so that sphere center is at (0,0).\n ax = a[0] - self.center.x\n ay = a[1] - self.center.y\n az = a[2] - self.center.z\n\n bx = b[0] - self.center.x\n by = b[1] - self.center.y\n bz = b[2] - self.center.z\n\n del a, b\n\n # Now create vector between points a and b\n sx = bx-ax\n sy = by-ay\n sz = bz-az\n r = self.radius\n\n\n old_settings = np.seterr(invalid='ignore') # we expect some nans below\n # Solve for the intersections between line and sphere (see sympy_line_sphere.py for math)\n t0,t1 = [(ax*sx + ay*sy + az*sz + (-ax**2*sy**2 - ax**2*sz**2 + 2*ax*ay*sx*sy + 2*ax*az*sx*sz - ay**2*sx**2 - ay**2*sz**2 + 2*ay*az*sy*sz - az**2*sx**2 - az**2*sy**2 + r**2*sx**2 + r**2*sy**2 + r**2*sz**2)**(0.5))/(-sx**2 - sy**2 - sz**2), (-ax*sx - ay*sy - az*sz + (-ax**2*sy**2 - ax**2*sz**2 + 2*ax*ay*sx*sy + 2*ax*az*sx*sz - ay**2*sx**2 - ay**2*sz**2 + 2*ay*az*sy*sz - az**2*sx**2 - az**2*sy**2 + r**2*sx**2 + r**2*sy**2 + r**2*sz**2)**(0.5))/(sx**2 + sy**2 + sz**2)]\n np.seterr(**old_settings)\n\n tt = np.vstack((t0,t1))\n\n # We want t to be > 0 (in direction from camera center to\n # point) but the closest one, so the smallest value meeting\n # this criterion.\n\n tt[tt <= 0] = np.nan # behind camera - invalid\n\n tmin = np.nanmin(tt, axis=0) # find closest to camera\n return tmin\n get_relative_distance_to_first_surface.__doc__ = ModelBase.get_relative_distance_to_first_surface.__doc__ # inherit docstring\n\n def get_first_surface(self,a,b):\n # See ModelBase.get_first_surface for docstring\n tmin = self.get_relative_distance_to_first_surface(a,b)\n\n a = np.array(a,copy=False)\n inshape = a.shape\n b = np.array(b,copy=False)\n\n a = a.T\n b = b.T\n\n # Move so that sphere center is at (0,0).\n ax = a[0] - self.center.x\n ay = a[1] - self.center.y\n az = a[2] - self.center.z\n\n bx = b[0] - self.center.x\n by = b[1] - self.center.y\n bz = b[2] - self.center.z\n\n del a, b\n\n # Now create vector between points a and b\n sx = bx-ax\n sy = by-ay\n sz = bz-az\n\n x = ax+sx*tmin\n y = ay+sy*tmin\n z = az+sz*tmin\n\n result = np.vstack((x+self.center.x,y+self.center.y,z+self.center.z)).T\n assert result.shape==inshape\n return result\n get_first_surface.__doc__ = ModelBase.get_first_surface.__doc__ # inherit docstring\n\nclass PlanarRectangle(ModelBase):\n def __init__(self, lowerleft=None, upperleft=None, lowerright=None):\n self.left_lower_corner = point_dict_to_vec(lowerleft)\n self.left_upper_corner = point_dict_to_vec(upperleft)\n self.right_lower_corner = point_dict_to_vec(lowerright)\n\n # keep in sync with DisplaySurfaceGeometry.cpp\n self._left_lower_corner = np.array( (self.left_lower_corner.x,\n self.left_lower_corner.y,\n self.left_lower_corner.z),\n dtype=np.float )\n self._left_upper_corner = np.array( (self.left_upper_corner.x,\n self.left_upper_corner.y,\n self.left_upper_corner.z),\n dtype=np.float )\n self._right_lower_corner = np.array( (self.right_lower_corner.x,\n self.right_lower_corner.y,\n self.right_lower_corner.z),\n dtype=np.float )\n\n self._dir_u = self._right_lower_corner - self._left_lower_corner\n self._dir_v = self._left_upper_corner - self._left_lower_corner\n self.center_arr = self._left_lower_corner + 0.5*self._dir_u + 0.5*self._dir_v\n self._normal = np.cross( self._dir_u, self._dir_v )\n super(PlanarRectangle,self).__init__()\n\n def __repr__(self):\n return 'PlanarRectangle( lowerleft=%r, upperleft=%r, lowerright=%r )'%(\n self._left_lower_corner[:,0].tolist(),\n self._left_upper_corner[:,0].tolist(),\n self._right_lower_corner[:,0].tolist(),\n )\n\n def to_geom_dict(self):\n return dict(\n lowerleft=self.left_lower_corner.to_dict(),\n upperleft=self.left_upper_corner.to_dict(),\n lowerright=self.right_lower_corner.to_dict(),\n model=\"planar_rectangle\")\n\n def texcoord2worldcoord(self,tc):\n # Parse inputs\n tc = np.array(tc,copy=False)\n assert tc.ndim==2\n assert tc.shape[1]==2\n tex_u,tex_v = tc.T\n\n # keep in sync with DisplaySurfaceGeometry.cpp\n self._dir_u[:,np.newaxis] * tex_u[np.newaxis]\n self._dir_v[:,np.newaxis] * tex_v[np.newaxis]\n\n result = self._left_lower_corner[:,np.newaxis] \\\n + self._dir_u[:,np.newaxis] * tex_u[np.newaxis] \\\n + self._dir_v[:,np.newaxis] * tex_v[np.newaxis]\n return result.T\n\n def worldcoord2texcoord(self,wc):\n # Parse inputs\n wc = np.array(wc,copy=False)\n assert wc.ndim==2\n assert wc.shape[1]==3\n wc = wc.T\n\n x,y,z = wc\n x0 = x-self.left_lower_corner.x\n y0 = y-self.left_lower_corner.y\n z0 = z-self.left_lower_corner.z\n wc = np.vstack((x0,y0,z0))\n u = np.dot( self._dir_u, wc )\n v = np.dot( self._dir_v, wc )\n result = np.vstack((u,v))\n return result.T\n\n def worldcoord2normal(self,wc):\n wc = np.array(wc,copy=False)\n assert wc.ndim==2\n assert wc.shape[1]==3\n N = wc.shape[0]\n\n one_sz = np.ones((1,N))\n bad = np.isnan(wc[:,0])\n result = (self._normal[:,np.newaxis]*one_sz).T\n result[bad] = np.nan\n return result\n\n def get_relative_distance_to_first_surface(self, a, b):\n # See ModelBase.get_relative_distance_to_first_surface for docstring\n a = np.array(a,copy=False)\n assert a.ndim==2\n assert a.shape[1]==3\n inshape = a.shape\n\n b = np.array(b,copy=False)\n assert b.ndim==2\n assert b.shape[1]==3\n assert b.shape==inshape\n\n # See http://en.wikipedia.org/wiki/Line-plane_intersection\n # especially the \"Algebraic form\" section.\n\n # Create variables according to wikipedia notation linked above\n l = b-a\n l0 = a\n n = self._normal\n p0 = np.array( [self.left_lower_corner.x,\n self.left_lower_corner.y,\n self.left_lower_corner.z],\n dtype=np.float)\n\n # Now, do the math...\n\n old_settings = np.seterr(invalid='ignore',divide='ignore') # we expect some nans below\n d = np.dot((p0-l0),n)/np.dot(l,n)\n d[np.isinf(d)] = np.nan # don't let infinity in\n d[d<0] = np.nan # don't look backwards, either\n np.seterr(**old_settings)\n return d\n get_relative_distance_to_first_surface.__doc__ = ModelBase.get_relative_distance_to_first_surface.__doc__ # inherit docstring\n\n def get_first_surface(self,a,b):\n # See ModelBase.get_first_surface for docstring\n d = self.get_relative_distance_to_first_surface(a,b)\n\n a = np.array(a,copy=False)\n assert a.ndim==2\n assert a.shape[1]==3\n inshape = a.shape\n\n b = np.array(b,copy=False)\n assert b.ndim==2\n assert b.shape[1]==3\n assert b.shape==inshape\n\n l = b-a\n l0 = a\n d = d[:,np.newaxis]\n pt = d*l+l0\n return pt\n get_first_surface.__doc__ = ModelBase.get_first_surface.__doc__ # inherit docstring\n\ndef get_distance_between_point_and_ray( c, a, b ):\n \"\"\"return distance between point c and ray from a in direction of point b.\n\n c is Nx3 array of points\n a is Nx3 array of points\n b is Nx3 array of points\n\n return Nx3 array of points\n \"\"\"\n c = np.array(c,copy=False)\n assert c.ndim==2\n assert c.shape[1]==3\n inshape = c.shape\n\n a = np.array(a,copy=False)\n assert a.ndim==2\n assert a.shape[1]==3\n assert a.shape==inshape\n\n b = np.array(b,copy=False)\n assert b.ndim==2\n assert b.shape[1]==3\n assert b.shape==inshape\n\n c = c.T\n a = a.T\n b = b.T\n\n # Move so that sphere center is at (0,0).\n ax = a[0] - c[0]\n ay = a[1] - c[1]\n az = a[2] - c[2]\n\n bx = b[0] - c[0]\n by = b[1] - c[1]\n bz = b[2] - c[2]\n\n del a, b\n\n # Now create vector between points a and b\n sx = bx-ax\n sy = by-ay\n sz = bz-az\n\n # See sympy_line_point.py\n t = -(ax*sx + ay*sy + az*sz)/(sx**2 + sy**2 + sz**2)\n t = np.max(t,0) # get point a if t opposite from b\n\n # find the point\n x = ax+sx*t\n y = ay+sy*t\n z = az+sz*t\n verts = np.vstack((x,y,z))\n\n # now find the distance\n dist = np.sqrt(np.sum((verts-c)**2,axis=0))\n return dist\n\nclass Geometry:\n def __init__(self, filename=None, geom_dict=None):\n if filename and not geom_dict:\n geom_dict = json.loads( open(filename).read() )\n elif geom_dict and not filename:\n pass\n else:\n raise Exception(\"must supply filename OR geometry dict (but not both)\")\n\n if geom_dict['model']=='cylinder':\n self.model = Cylinder(base=geom_dict['base'],\n axis=geom_dict['axis'],\n radius=geom_dict['radius'])\n elif geom_dict['model']=='sphere':\n self.model = Sphere(center=geom_dict['center'],\n radius=geom_dict['radius'])\n elif geom_dict['model']=='planar_rectangle':\n kwargs = geom_dict.copy()\n del kwargs['model']\n self.model = PlanarRectangle(**kwargs)\n elif geom_dict['model']=='from_file':\n import PyDisplaySurfaceArbitraryGeometry as pdsag\n self.model = pdsag.ArbitraryGeometry(\n filename=fixup_path.fixup_path( geom_dict['filename'] ),\n precision=geom_dict.get('precision',1e-6))\n else:\n raise ValueError(\"unknown model type: %s\"%geom_dict['model'])\n\n def compute_for_camera_view(self, camera, what='world_coords'):\n shape = (camera.height, camera.width)\n y = np.expand_dims(np.arange(camera.height),1)\n x = np.expand_dims(np.arange(camera.width),0)\n\n XX, YY = np.broadcast_arrays(x, y)\n assert XX.shape == shape\n assert YY.shape == shape\n\n XX = XX.flatten()\n YY = YY.flatten()\n\n distorted = np.vstack((XX,YY)).T\n\n ray = camera.project_pixel_to_3d_ray(distorted,\n distorted=True,\n distance=1.0 )\n\n camcenter = camera.camcenter_like(ray)\n\n if what=='world_coords':\n world_coords = self.model.get_first_surface(camcenter,ray)\n rx, ry, rz = world_coords.T\n # reshape into image-sized arrays\n rx.shape = (camera.height, camera.width, 1)\n ry.shape = (camera.height, camera.width, 1)\n rz.shape = (camera.height, camera.width, 1)\n output = np.concatenate((rx,ry,rz),axis=2)\n assert output.shape == (camera.height, camera.width, 3)\n\n elif what == 'texture_coords':\n world_coords = self.model.get_first_surface(camcenter,ray)\n texcoords = self.model.worldcoord2texcoord(world_coords)\n tc0, tc1 = texcoords.T\n # reshape into image-sized arrays\n tc0.shape = (camera.height, camera.width, 1)\n tc1.shape = (camera.height, camera.width, 1)\n output = np.concatenate((tc0,tc1),axis=2)\n assert output.shape == (camera.height, camera.width, 2)\n\n elif what == 'distance':\n distance = self.model.get_relative_distance_to_first_surface(camcenter,ray)\n distance.shape = (camera.height, camera.width)\n output = distance\n\n elif what == 'incidence_angle':\n world_coords = self.model.get_first_surface(camcenter,ray)\n surface_normal = self.model.worldcoord2normal(world_coords)\n projector_dir = -ray\n dot_product = np.sum(projector_dir*surface_normal,axis=1)\n angle = np.arccos(dot_product)\n angle.shape = (camera.height, camera.width)\n output = angle\n\n return output\n\ndef angle_between_vectors(v1, v2):\n dot = np.dot(v1, v2)\n len_a = np.sqrt(np.dot(v1, v1))\n len_b = np.sqrt(np.dot(v2, v2))\n if len_a == 0 or len_b == 0:\n return 0\n return np.arccos(dot / (len_a * len_b))\n\ndef tcs_to_beachball(farr):\n assert farr.ndim == 3\n assert farr.shape[2] == 2\n u = farr[:,:,0]\n v = farr[:,:,1]\n\n good = ~np.isnan( u )\n assert np.allclose( good, ~np.isnan(v) )\n\n assert np.all( u[good] >= 0 )\n assert np.all( u[good] <= 1 )\n assert np.all( v[good] >= 0 )\n assert np.all( v[good] <= 1 )\n\n hf = u*4 # horiz float\n vf = v*2 # vert float\n\n hi = np.floor( hf ) # horiz int (0,1,2,3)\n hi[hi==4.0] = 3.0\n vi = np.floor( vf ) # vert int (0,1)\n vi[vi==2.0] = 1.0\n\n iif = hi + 4*vi + 1 # (1,2,3,4,5,6,7,8)\n ii = iif.astype( np.uint8 ) # nan -> 0\n\n colors = np.array( [ (0,0,0), # black\n\n (255,0,0), # red\n (0,255,0), # green\n (0, 0, 255), # blue\n (255, 0, 255),\n\n (255, 0, 255),\n (255,0,0), # red\n (0,255,0), # green\n (0, 0, 255), # blue\n ])\n bbim = colors[ ii ]\n return bbim\n"
] | [
[
"numpy.eye",
"numpy.cos",
"numpy.sin",
"numpy.array",
"numpy.sum"
],
[
"numpy.linspace",
"numpy.ones",
"numpy.concatenate",
"numpy.array",
"numpy.vstack"
],
[
"numpy.dot",
"numpy.sqrt",
"numpy.nanmin",
"numpy.all",
"numpy.max",
"numpy.arctan2",
"numpy.seterr",
"numpy.concatenate",
"numpy.cross",
"numpy.fmod",
"numpy.arcsin",
"numpy.arange",
"numpy.eye",
"numpy.sin",
"numpy.isnan",
"numpy.arccos",
"numpy.floor",
"numpy.broadcast_arrays",
"numpy.array",
"numpy.sum",
"numpy.cos",
"numpy.ones",
"numpy.isinf",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kjordahl/pandas | [
"e660c058a662426afc4d8855aabf4677f01b4a4c"
] | [
"pandas/tests/test_common.py"
] | [
"# -*- coding: utf-8 -*-\nimport collections\nfrom datetime import datetime\nimport re\n\nimport nose\nfrom nose.tools import assert_equal\nimport numpy as np\nfrom pandas.tslib import iNaT, NaT\nfrom pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp, Float64Index\nfrom pandas import compat\nfrom pandas.compat import range, long, lrange, lmap, u\nfrom pandas.core.common import notnull, isnull, array_equivalent\nimport pandas.core.common as com\nimport pandas.util.testing as tm\nimport pandas.core.config as cf\n\n_multiprocess_can_split_ = True\n\n\ndef test_mut_exclusive():\n msg = \"mutually exclusive arguments: '[ab]' and '[ab]'\"\n with tm.assertRaisesRegexp(TypeError, msg):\n com._mut_exclusive(a=1, b=2)\n assert com._mut_exclusive(a=1, b=None) == 1\n assert com._mut_exclusive(major=None, major_axis=None) is None\n\n\ndef test_is_sequence():\n is_seq = com.is_sequence\n assert(is_seq((1, 2)))\n assert(is_seq([1, 2]))\n assert(not is_seq(\"abcd\"))\n assert(not is_seq(u(\"abcd\")))\n assert(not is_seq(np.int64))\n\n class A(object):\n def __getitem__(self):\n return 1\n\n assert(not is_seq(A()))\n\ndef test_get_callable_name():\n from functools import partial\n getname = com._get_callable_name\n\n def fn(x):\n return x\n lambda_ = lambda x: x\n part1 = partial(fn)\n part2 = partial(part1)\n class somecall(object):\n def __call__(self):\n return x\n\n assert getname(fn) == 'fn'\n assert getname(lambda_)\n assert getname(part1) == 'fn'\n assert getname(part2) == 'fn'\n assert getname(somecall()) == 'somecall'\n assert getname(1) is None\n\n\ndef test_notnull():\n assert notnull(1.)\n assert not notnull(None)\n assert not notnull(np.NaN)\n\n with cf.option_context(\"mode.use_inf_as_null\", False):\n assert notnull(np.inf)\n assert notnull(-np.inf)\n\n arr = np.array([1.5, np.inf, 3.5, -np.inf])\n result = notnull(arr)\n assert result.all()\n\n with cf.option_context(\"mode.use_inf_as_null\", True):\n assert not notnull(np.inf)\n assert not notnull(-np.inf)\n\n arr = np.array([1.5, np.inf, 3.5, -np.inf])\n result = notnull(arr)\n assert result.sum() == 2\n\n with cf.option_context(\"mode.use_inf_as_null\", False):\n for s in [tm.makeFloatSeries(),tm.makeStringSeries(),\n tm.makeObjectSeries(),tm.makeTimeSeries(),tm.makePeriodSeries()]:\n assert(isinstance(isnull(s), Series))\n\ndef test_isnull():\n assert not isnull(1.)\n assert isnull(None)\n assert isnull(np.NaN)\n assert not isnull(np.inf)\n assert not isnull(-np.inf)\n\n # series\n for s in [tm.makeFloatSeries(),tm.makeStringSeries(),\n tm.makeObjectSeries(),tm.makeTimeSeries(),tm.makePeriodSeries()]:\n assert(isinstance(isnull(s), Series))\n\n # frame\n for df in [tm.makeTimeDataFrame(),tm.makePeriodFrame(),tm.makeMixedDataFrame()]:\n result = isnull(df)\n expected = df.apply(isnull)\n tm.assert_frame_equal(result, expected)\n\n # panel\n for p in [ tm.makePanel(), tm.makePeriodPanel(), tm.add_nans(tm.makePanel()) ]:\n result = isnull(p)\n expected = p.apply(isnull)\n tm.assert_panel_equal(result, expected)\n\n # panel 4d\n for p in [ tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D()) ]:\n result = isnull(p)\n expected = p.apply(isnull)\n tm.assert_panel4d_equal(result, expected)\n\ndef test_isnull_lists():\n result = isnull([[False]])\n exp = np.array([[False]])\n assert(np.array_equal(result, exp))\n\n result = isnull([[1], [2]])\n exp = np.array([[False], [False]])\n assert(np.array_equal(result, exp))\n\n # list of strings / unicode\n result = isnull(['foo', 'bar'])\n assert(not result.any())\n\n result = isnull([u('foo'), u('bar')])\n assert(not result.any())\n\ndef test_isnull_nat():\n result = isnull([NaT])\n exp = np.array([True])\n assert(np.array_equal(result, exp))\n\n result = isnull(np.array([NaT], dtype=object))\n exp = np.array([True])\n assert(np.array_equal(result, exp))\n\ndef test_isnull_datetime():\n assert (not isnull(datetime.now()))\n assert notnull(datetime.now())\n\n idx = date_range('1/1/1990', periods=20)\n assert(notnull(idx).all())\n\n idx = np.asarray(idx)\n idx[0] = iNaT\n idx = DatetimeIndex(idx)\n mask = isnull(idx)\n assert(mask[0])\n assert(not mask[1:].any())\n\n # GH 9129\n pidx = idx.to_period(freq='M')\n mask = isnull(pidx)\n assert(mask[0])\n assert(not mask[1:].any())\n\n mask = isnull(pidx[1:])\n assert(not mask.any())\n\n\nclass TestIsNull(tm.TestCase):\n def test_0d_array(self):\n self.assertTrue(isnull(np.array(np.nan)))\n self.assertFalse(isnull(np.array(0.0)))\n self.assertFalse(isnull(np.array(0)))\n # test object dtype\n self.assertTrue(isnull(np.array(np.nan, dtype=object)))\n self.assertFalse(isnull(np.array(0.0, dtype=object)))\n self.assertFalse(isnull(np.array(0, dtype=object)))\n\n\ndef test_downcast_conv():\n # test downcasting\n\n arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995])\n result = com._possibly_downcast_to_dtype(arr, 'infer')\n assert (np.array_equal(result, arr))\n\n arr = np.array([8., 8., 8., 8., 8.9999999999995])\n result = com._possibly_downcast_to_dtype(arr, 'infer')\n expected = np.array([8, 8, 8, 8, 9])\n assert (np.array_equal(result, expected))\n\n arr = np.array([8., 8., 8., 8., 9.0000000000005])\n result = com._possibly_downcast_to_dtype(arr, 'infer')\n expected = np.array([8, 8, 8, 8, 9])\n assert (np.array_equal(result, expected))\n\n # conversions\n\n expected = np.array([1,2])\n for dtype in [np.float64,object,np.int64]:\n arr = np.array([1.0,2.0],dtype=dtype)\n result = com._possibly_downcast_to_dtype(arr,'infer')\n tm.assert_almost_equal(result, expected)\n\n expected = np.array([1.0,2.0,np.nan])\n for dtype in [np.float64,object]:\n arr = np.array([1.0,2.0,np.nan],dtype=dtype)\n result = com._possibly_downcast_to_dtype(arr,'infer')\n tm.assert_almost_equal(result, expected)\n\n # empties\n for dtype in [np.int32,np.float64,np.float32,np.bool_,np.int64,object]:\n arr = np.array([],dtype=dtype)\n result = com._possibly_downcast_to_dtype(arr,'int64')\n tm.assert_almost_equal(result, np.array([],dtype=np.int64))\n assert result.dtype == np.int64\n\ndef test_array_equivalent():\n assert array_equivalent(np.array([np.nan, np.nan]),\n np.array([np.nan, np.nan]))\n assert array_equivalent(np.array([np.nan, 1, np.nan]),\n np.array([np.nan, 1, np.nan]))\n assert array_equivalent(np.array([np.nan, None], dtype='object'),\n np.array([np.nan, None], dtype='object'))\n assert array_equivalent(np.array([np.nan, 1+1j], dtype='complex'),\n np.array([np.nan, 1+1j], dtype='complex'))\n assert not array_equivalent(np.array([np.nan, 1+1j], dtype='complex'),\n np.array([np.nan, 1+2j], dtype='complex'))\n assert not array_equivalent(np.array([np.nan, 1, np.nan]),\n np.array([np.nan, 2, np.nan]))\n assert not array_equivalent(np.array(['a', 'b', 'c', 'd']), np.array(['e', 'e']))\n assert array_equivalent(Float64Index([0, np.nan]), Float64Index([0, np.nan]))\n assert not array_equivalent(Float64Index([0, np.nan]), Float64Index([1, np.nan]))\n assert array_equivalent(DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan]))\n assert not array_equivalent(DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan]))\n\ndef test_datetimeindex_from_empty_datetime64_array():\n for unit in [ 'ms', 'us', 'ns' ]:\n idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))\n assert(len(idx) == 0)\n\n\ndef test_nan_to_nat_conversions():\n\n df = DataFrame(dict({\n 'A' : np.asarray(lrange(10),dtype='float64'),\n 'B' : Timestamp('20010101') }))\n df.iloc[3:6,:] = np.nan\n result = df.loc[4,'B'].value\n assert(result == iNaT)\n\n s = df['B'].copy()\n s._data = s._data.setitem(indexer=tuple([slice(8,9)]),value=np.nan)\n assert(isnull(s[8]))\n\n # numpy < 1.7.0 is wrong\n from distutils.version import LooseVersion\n if LooseVersion(np.__version__) >= '1.7.0':\n assert(s[8].value == np.datetime64('NaT').astype(np.int64))\n\n\ndef test_any_none():\n assert(com._any_none(1, 2, 3, None))\n assert(not com._any_none(1, 2, 3, 4))\n\n\ndef test_all_not_none():\n assert(com._all_not_none(1, 2, 3, 4))\n assert(not com._all_not_none(1, 2, 3, None))\n assert(not com._all_not_none(None, None, None, None))\n\n\ndef test_repr_binary_type():\n import string\n letters = string.ascii_letters\n btype = compat.binary_type\n try:\n raw = btype(letters, encoding=cf.get_option('display.encoding'))\n except TypeError:\n raw = btype(letters)\n b = compat.text_type(compat.bytes_to_str(raw))\n res = com.pprint_thing(b, quote_strings=True)\n assert_equal(res, repr(b))\n res = com.pprint_thing(b, quote_strings=False)\n assert_equal(res, b)\n\n\ndef test_adjoin():\n data = [['a', 'b', 'c'],\n ['dd', 'ee', 'ff'],\n ['ggg', 'hhh', 'iii']]\n expected = 'a dd ggg\\nb ee hhh\\nc ff iii'\n\n adjoined = com.adjoin(2, *data)\n\n assert(adjoined == expected)\n\n\ndef test_iterpairs():\n data = [1, 2, 3, 4]\n expected = [(1, 2),\n (2, 3),\n (3, 4)]\n\n result = list(com.iterpairs(data))\n\n assert(result == expected)\n\n\ndef test_split_ranges():\n def _bin(x, width):\n \"return int(x) as a base2 string of given width\"\n return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))\n\n def test_locs(mask):\n nfalse = sum(np.array(mask) == 0)\n\n remaining = 0\n for s, e in com.split_ranges(mask):\n remaining += e - s\n\n assert 0 not in mask[s:e]\n\n # make sure the total items covered by the ranges are a complete cover\n assert remaining + nfalse == len(mask)\n\n # exhaustively test all possible mask sequences of length 8\n ncols = 8\n for i in range(2 ** ncols):\n cols = lmap(int, list(_bin(i, ncols))) # count up in base2\n mask = [cols[i] == 1 for i in range(len(cols))]\n test_locs(mask)\n\n # base cases\n test_locs([])\n test_locs([0])\n test_locs([1])\n\n\ndef test_indent():\n s = 'a b c\\nd e f'\n result = com.indent(s, spaces=6)\n\n assert(result == ' a b c\\n d e f')\n\n\ndef test_banner():\n ban = com.banner('hi')\n assert(ban == ('%s\\nhi\\n%s' % ('=' * 80, '=' * 80)))\n\n\ndef test_map_indices_py():\n data = [4, 3, 2, 1]\n expected = {4: 0, 3: 1, 2: 2, 1: 3}\n\n result = com.map_indices_py(data)\n\n assert(result == expected)\n\n\ndef test_union():\n a = [1, 2, 3]\n b = [4, 5, 6]\n\n union = sorted(com.union(a, b))\n\n assert((a + b) == union)\n\n\ndef test_difference():\n a = [1, 2, 3]\n b = [1, 2, 3, 4, 5, 6]\n\n inter = sorted(com.difference(b, a))\n\n assert([4, 5, 6] == inter)\n\n\ndef test_intersection():\n a = [1, 2, 3]\n b = [1, 2, 3, 4, 5, 6]\n\n inter = sorted(com.intersection(a, b))\n\n assert(a == inter)\n\n\ndef test_groupby():\n values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3']\n expected = {'f': ['foo', 'foo3'],\n 'b': ['bar', 'baz', 'baz2'],\n 'q': ['qux']}\n\n grouped = com.groupby(values, lambda x: x[0])\n\n for k, v in grouped:\n assert v == expected[k]\n\n\ndef test_is_list_like():\n passes = ([], [1], (1,), (1, 2), {'a': 1}, set([1, 'a']), Series([1]),\n Series([]), Series(['a']).str)\n fails = (1, '2', object())\n\n for p in passes:\n assert com.is_list_like(p)\n\n for f in fails:\n assert not com.is_list_like(f)\n\n\ndef test_is_hashable():\n\n # all new-style classes are hashable by default\n class HashableClass(object):\n pass\n\n class UnhashableClass1(object):\n __hash__ = None\n\n class UnhashableClass2(object):\n def __hash__(self):\n raise TypeError(\"Not hashable\")\n\n hashable = (\n 1, 3.14, np.float64(3.14), 'a', tuple(), (1,), HashableClass(),\n )\n not_hashable = (\n [], UnhashableClass1(),\n )\n abc_hashable_not_really_hashable = (\n ([],), UnhashableClass2(),\n )\n\n for i in hashable:\n assert com.is_hashable(i)\n for i in not_hashable:\n assert not com.is_hashable(i)\n for i in abc_hashable_not_really_hashable:\n assert not com.is_hashable(i)\n\n # numpy.array is no longer collections.Hashable as of\n # https://github.com/numpy/numpy/pull/5326, just test\n # pandas.common.is_hashable()\n assert not com.is_hashable(np.array([]))\n\n # old-style classes in Python 2 don't appear hashable to\n # collections.Hashable but also seem to support hash() by default\n if compat.PY2:\n class OldStyleClass():\n pass\n c = OldStyleClass()\n assert not isinstance(c, collections.Hashable)\n assert com.is_hashable(c)\n hash(c) # this will not raise\n\n\ndef test_ensure_int32():\n values = np.arange(10, dtype=np.int32)\n result = com._ensure_int32(values)\n assert(result.dtype == np.int32)\n\n values = np.arange(10, dtype=np.int64)\n result = com._ensure_int32(values)\n assert(result.dtype == np.int32)\n\n\ndef test_ensure_platform_int():\n\n # verify that when we create certain types of indices\n # they remain the correct type under platform conversions\n from pandas.core.index import Int64Index\n\n # int64\n x = Int64Index([1, 2, 3], dtype='int64')\n assert(x.dtype == np.int64)\n\n pi = com._ensure_platform_int(x)\n assert(pi.dtype == np.int_)\n\n # int32\n x = Int64Index([1, 2, 3], dtype='int32')\n assert(x.dtype == np.int32)\n\n pi = com._ensure_platform_int(x)\n assert(pi.dtype == np.int_)\n\n# TODO: fix this broken test\n\n# def test_console_encode():\n# \"\"\"\n# On Python 2, if sys.stdin.encoding is None (IPython with zmq frontend)\n# common.console_encode should encode things as utf-8.\n# \"\"\"\n# if compat.PY3:\n# raise nose.SkipTest\n\n# with tm.stdin_encoding(encoding=None):\n# result = com.console_encode(u\"\\u05d0\")\n# expected = u\"\\u05d0\".encode('utf-8')\n# assert (result == expected)\n\n\ndef test_is_re():\n passes = re.compile('ad'),\n fails = 'x', 2, 3, object()\n\n for p in passes:\n assert com.is_re(p)\n\n for f in fails:\n assert not com.is_re(f)\n\n\ndef test_is_recompilable():\n passes = (r'a', u('x'), r'asdf', re.compile('adsf'),\n u(r'\\u2233\\s*'), re.compile(r''))\n fails = 1, [], object()\n\n for p in passes:\n assert com.is_re_compilable(p)\n\n for f in fails:\n assert not com.is_re_compilable(f)\n\ndef test_random_state():\n import numpy.random as npr\n # Check with seed\n state = com._random_state(5)\n assert_equal(state.uniform(), npr.RandomState(5).uniform())\n\n # Check with random state object\n state2 = npr.RandomState(10)\n assert_equal(com._random_state(state2).uniform(), npr.RandomState(10).uniform())\n\n # check with no arg random state\n assert isinstance(com._random_state(), npr.RandomState)\n\n # Error for floats or strings\n with tm.assertRaises(ValueError):\n com._random_state('test')\n\n with tm.assertRaises(ValueError):\n com._random_state(5.5)\n\n\ndef test_maybe_match_name():\n\n matched = com._maybe_match_name(Series([1], name='x'), Series([2], name='x'))\n assert(matched == 'x')\n\n matched = com._maybe_match_name(Series([1], name='x'), Series([2], name='y'))\n assert(matched is None)\n\n matched = com._maybe_match_name(Series([1]), Series([2], name='x'))\n assert(matched is None)\n\n matched = com._maybe_match_name(Series([1], name='x'), Series([2]))\n assert(matched is None)\n\n matched = com._maybe_match_name(Series([1], name='x'), [2])\n assert(matched == 'x')\n\n matched = com._maybe_match_name([1], Series([2], name='y'))\n assert(matched == 'y')\n\n\nclass TestTake(tm.TestCase):\n # standard incompatible fill error\n fill_error = re.compile(\"Incompatible type for fill_value\")\n\n _multiprocess_can_split_ = True\n\n def test_1d_with_out(self):\n def _test_dtype(dtype, can_hold_na):\n data = np.random.randint(0, 2, 4).astype(dtype)\n\n indexer = [2, 1, 0, 1]\n out = np.empty(4, dtype=dtype)\n com.take_1d(data, indexer, out=out)\n expected = data.take(indexer)\n tm.assert_almost_equal(out, expected)\n\n indexer = [2, 1, 0, -1]\n out = np.empty(4, dtype=dtype)\n if can_hold_na:\n com.take_1d(data, indexer, out=out)\n expected = data.take(indexer)\n expected[3] = np.nan\n tm.assert_almost_equal(out, expected)\n else:\n with tm.assertRaisesRegexp(TypeError, self.fill_error):\n com.take_1d(data, indexer, out=out)\n # no exception o/w\n data.take(indexer, out=out)\n\n _test_dtype(np.float64, True)\n _test_dtype(np.float32, True)\n _test_dtype(np.uint64, False)\n _test_dtype(np.uint32, False)\n _test_dtype(np.uint16, False)\n _test_dtype(np.uint8, False)\n _test_dtype(np.int64, False)\n _test_dtype(np.int32, False)\n _test_dtype(np.int16, False)\n _test_dtype(np.int8, False)\n _test_dtype(np.object_, True)\n _test_dtype(np.bool, False)\n\n def test_1d_fill_nonna(self):\n def _test_dtype(dtype, fill_value, out_dtype):\n data = np.random.randint(0, 2, 4).astype(dtype)\n\n indexer = [2, 1, 0, -1]\n\n result = com.take_1d(data, indexer, fill_value=fill_value)\n assert((result[[0, 1, 2]] == data[[2, 1, 0]]).all())\n assert(result[3] == fill_value)\n assert(result.dtype == out_dtype)\n\n indexer = [2, 1, 0, 1]\n\n result = com.take_1d(data, indexer, fill_value=fill_value)\n assert((result[[0, 1, 2, 3]] == data[indexer]).all())\n assert(result.dtype == dtype)\n\n _test_dtype(np.int8, np.int16(127), np.int8)\n _test_dtype(np.int8, np.int16(128), np.int16)\n _test_dtype(np.int32, 1, np.int32)\n _test_dtype(np.int32, 2.0, np.float64)\n _test_dtype(np.int32, 3.0 + 4.0j, np.complex128)\n _test_dtype(np.int32, True, np.object_)\n _test_dtype(np.int32, '', np.object_)\n _test_dtype(np.float64, 1, np.float64)\n _test_dtype(np.float64, 2.0, np.float64)\n _test_dtype(np.float64, 3.0 + 4.0j, np.complex128)\n _test_dtype(np.float64, True, np.object_)\n _test_dtype(np.float64, '', np.object_)\n _test_dtype(np.complex128, 1, np.complex128)\n _test_dtype(np.complex128, 2.0, np.complex128)\n _test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)\n _test_dtype(np.complex128, True, np.object_)\n _test_dtype(np.complex128, '', np.object_)\n _test_dtype(np.bool_, 1, np.object_)\n _test_dtype(np.bool_, 2.0, np.object_)\n _test_dtype(np.bool_, 3.0 + 4.0j, np.object_)\n _test_dtype(np.bool_, True, np.bool_)\n _test_dtype(np.bool_, '', np.object_)\n\n def test_2d_with_out(self):\n def _test_dtype(dtype, can_hold_na, writeable=True):\n data = np.random.randint(0, 2, (5, 3)).astype(dtype)\n data.flags.writeable = writeable\n\n indexer = [2, 1, 0, 1]\n out0 = np.empty((4, 3), dtype=dtype)\n out1 = np.empty((5, 4), dtype=dtype)\n com.take_nd(data, indexer, out=out0, axis=0)\n com.take_nd(data, indexer, out=out1, axis=1)\n expected0 = data.take(indexer, axis=0)\n expected1 = data.take(indexer, axis=1)\n tm.assert_almost_equal(out0, expected0)\n tm.assert_almost_equal(out1, expected1)\n\n indexer = [2, 1, 0, -1]\n out0 = np.empty((4, 3), dtype=dtype)\n out1 = np.empty((5, 4), dtype=dtype)\n if can_hold_na:\n com.take_nd(data, indexer, out=out0, axis=0)\n com.take_nd(data, indexer, out=out1, axis=1)\n expected0 = data.take(indexer, axis=0)\n expected1 = data.take(indexer, axis=1)\n expected0[3, :] = np.nan\n expected1[:, 3] = np.nan\n tm.assert_almost_equal(out0, expected0)\n tm.assert_almost_equal(out1, expected1)\n else:\n for i, out in enumerate([out0, out1]):\n with tm.assertRaisesRegexp(TypeError, self.fill_error):\n com.take_nd(data, indexer, out=out, axis=i)\n # no exception o/w\n data.take(indexer, out=out, axis=i)\n\n for writeable in [True, False]:\n # Check that take_nd works both with writeable arrays (in which\n # case fast typed memoryviews implementation) and read-only\n # arrays alike.\n _test_dtype(np.float64, True, writeable=writeable)\n _test_dtype(np.float32, True, writeable=writeable)\n _test_dtype(np.uint64, False, writeable=writeable)\n _test_dtype(np.uint32, False, writeable=writeable)\n _test_dtype(np.uint16, False, writeable=writeable)\n _test_dtype(np.uint8, False, writeable=writeable)\n _test_dtype(np.int64, False, writeable=writeable)\n _test_dtype(np.int32, False, writeable=writeable)\n _test_dtype(np.int16, False, writeable=writeable)\n _test_dtype(np.int8, False, writeable=writeable)\n _test_dtype(np.object_, True, writeable=writeable)\n _test_dtype(np.bool, False, writeable=writeable)\n\n def test_2d_fill_nonna(self):\n def _test_dtype(dtype, fill_value, out_dtype):\n data = np.random.randint(0, 2, (5, 3)).astype(dtype)\n\n indexer = [2, 1, 0, -1]\n\n result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)\n assert((result[[0, 1, 2], :] == data[[2, 1, 0], :]).all())\n assert((result[3, :] == fill_value).all())\n assert(result.dtype == out_dtype)\n\n result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)\n assert((result[:, [0, 1, 2]] == data[:, [2, 1, 0]]).all())\n assert((result[:, 3] == fill_value).all())\n assert(result.dtype == out_dtype)\n\n indexer = [2, 1, 0, 1]\n\n result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)\n assert((result[[0, 1, 2, 3], :] == data[indexer, :]).all())\n assert(result.dtype == dtype)\n\n result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)\n assert((result[:, [0, 1, 2, 3]] == data[:, indexer]).all())\n assert(result.dtype == dtype)\n\n _test_dtype(np.int8, np.int16(127), np.int8)\n _test_dtype(np.int8, np.int16(128), np.int16)\n _test_dtype(np.int32, 1, np.int32)\n _test_dtype(np.int32, 2.0, np.float64)\n _test_dtype(np.int32, 3.0 + 4.0j, np.complex128)\n _test_dtype(np.int32, True, np.object_)\n _test_dtype(np.int32, '', np.object_)\n _test_dtype(np.float64, 1, np.float64)\n _test_dtype(np.float64, 2.0, np.float64)\n _test_dtype(np.float64, 3.0 + 4.0j, np.complex128)\n _test_dtype(np.float64, True, np.object_)\n _test_dtype(np.float64, '', np.object_)\n _test_dtype(np.complex128, 1, np.complex128)\n _test_dtype(np.complex128, 2.0, np.complex128)\n _test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)\n _test_dtype(np.complex128, True, np.object_)\n _test_dtype(np.complex128, '', np.object_)\n _test_dtype(np.bool_, 1, np.object_)\n _test_dtype(np.bool_, 2.0, np.object_)\n _test_dtype(np.bool_, 3.0 + 4.0j, np.object_)\n _test_dtype(np.bool_, True, np.bool_)\n _test_dtype(np.bool_, '', np.object_)\n\n def test_3d_with_out(self):\n def _test_dtype(dtype, can_hold_na):\n data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)\n\n indexer = [2, 1, 0, 1]\n out0 = np.empty((4, 4, 3), dtype=dtype)\n out1 = np.empty((5, 4, 3), dtype=dtype)\n out2 = np.empty((5, 4, 4), dtype=dtype)\n com.take_nd(data, indexer, out=out0, axis=0)\n com.take_nd(data, indexer, out=out1, axis=1)\n com.take_nd(data, indexer, out=out2, axis=2)\n expected0 = data.take(indexer, axis=0)\n expected1 = data.take(indexer, axis=1)\n expected2 = data.take(indexer, axis=2)\n tm.assert_almost_equal(out0, expected0)\n tm.assert_almost_equal(out1, expected1)\n tm.assert_almost_equal(out2, expected2)\n\n indexer = [2, 1, 0, -1]\n out0 = np.empty((4, 4, 3), dtype=dtype)\n out1 = np.empty((5, 4, 3), dtype=dtype)\n out2 = np.empty((5, 4, 4), dtype=dtype)\n if can_hold_na:\n com.take_nd(data, indexer, out=out0, axis=0)\n com.take_nd(data, indexer, out=out1, axis=1)\n com.take_nd(data, indexer, out=out2, axis=2)\n expected0 = data.take(indexer, axis=0)\n expected1 = data.take(indexer, axis=1)\n expected2 = data.take(indexer, axis=2)\n expected0[3, :, :] = np.nan\n expected1[:, 3, :] = np.nan\n expected2[:, :, 3] = np.nan\n tm.assert_almost_equal(out0, expected0)\n tm.assert_almost_equal(out1, expected1)\n tm.assert_almost_equal(out2, expected2)\n else:\n for i, out in enumerate([out0, out1, out2]):\n with tm.assertRaisesRegexp(TypeError, self.fill_error):\n com.take_nd(data, indexer, out=out, axis=i)\n # no exception o/w\n data.take(indexer, out=out, axis=i)\n\n _test_dtype(np.float64, True)\n _test_dtype(np.float32, True)\n _test_dtype(np.uint64, False)\n _test_dtype(np.uint32, False)\n _test_dtype(np.uint16, False)\n _test_dtype(np.uint8, False)\n _test_dtype(np.int64, False)\n _test_dtype(np.int32, False)\n _test_dtype(np.int16, False)\n _test_dtype(np.int8, False)\n _test_dtype(np.object_, True)\n _test_dtype(np.bool, False)\n\n def test_3d_fill_nonna(self):\n def _test_dtype(dtype, fill_value, out_dtype):\n data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)\n\n indexer = [2, 1, 0, -1]\n\n result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)\n assert((result[[0, 1, 2], :, :] == data[[2, 1, 0], :, :]).all())\n assert((result[3, :, :] == fill_value).all())\n assert(result.dtype == out_dtype)\n\n result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)\n assert((result[:, [0, 1, 2], :] == data[:, [2, 1, 0], :]).all())\n assert((result[:, 3, :] == fill_value).all())\n assert(result.dtype == out_dtype)\n\n result = com.take_nd(data, indexer, axis=2, fill_value=fill_value)\n assert((result[:, :, [0, 1, 2]] == data[:, :, [2, 1, 0]]).all())\n assert((result[:, :, 3] == fill_value).all())\n assert(result.dtype == out_dtype)\n\n indexer = [2, 1, 0, 1]\n\n result = com.take_nd(data, indexer, axis=0, fill_value=fill_value)\n assert((result[[0, 1, 2, 3], :, :] == data[indexer, :, :]).all())\n assert(result.dtype == dtype)\n\n result = com.take_nd(data, indexer, axis=1, fill_value=fill_value)\n assert((result[:, [0, 1, 2, 3], :] == data[:, indexer, :]).all())\n assert(result.dtype == dtype)\n\n result = com.take_nd(data, indexer, axis=2, fill_value=fill_value)\n assert((result[:, :, [0, 1, 2, 3]] == data[:, :, indexer]).all())\n assert(result.dtype == dtype)\n\n _test_dtype(np.int8, np.int16(127), np.int8)\n _test_dtype(np.int8, np.int16(128), np.int16)\n _test_dtype(np.int32, 1, np.int32)\n _test_dtype(np.int32, 2.0, np.float64)\n _test_dtype(np.int32, 3.0 + 4.0j, np.complex128)\n _test_dtype(np.int32, True, np.object_)\n _test_dtype(np.int32, '', np.object_)\n _test_dtype(np.float64, 1, np.float64)\n _test_dtype(np.float64, 2.0, np.float64)\n _test_dtype(np.float64, 3.0 + 4.0j, np.complex128)\n _test_dtype(np.float64, True, np.object_)\n _test_dtype(np.float64, '', np.object_)\n _test_dtype(np.complex128, 1, np.complex128)\n _test_dtype(np.complex128, 2.0, np.complex128)\n _test_dtype(np.complex128, 3.0 + 4.0j, np.complex128)\n _test_dtype(np.complex128, True, np.object_)\n _test_dtype(np.complex128, '', np.object_)\n _test_dtype(np.bool_, 1, np.object_)\n _test_dtype(np.bool_, 2.0, np.object_)\n _test_dtype(np.bool_, 3.0 + 4.0j, np.object_)\n _test_dtype(np.bool_, True, np.bool_)\n _test_dtype(np.bool_, '', np.object_)\n\n def test_1d_other_dtypes(self):\n arr = np.random.randn(10).astype(np.float32)\n\n indexer = [1, 2, 3, -1]\n result = com.take_1d(arr, indexer)\n expected = arr.take(indexer)\n expected[-1] = np.nan\n tm.assert_almost_equal(result, expected)\n\n def test_2d_other_dtypes(self):\n arr = np.random.randn(10, 5).astype(np.float32)\n\n indexer = [1, 2, 3, -1]\n\n # axis=0\n result = com.take_nd(arr, indexer, axis=0)\n expected = arr.take(indexer, axis=0)\n expected[-1] = np.nan\n tm.assert_almost_equal(result, expected)\n\n # axis=1\n result = com.take_nd(arr, indexer, axis=1)\n expected = arr.take(indexer, axis=1)\n expected[:, -1] = np.nan\n tm.assert_almost_equal(result, expected)\n\n def test_1d_bool(self):\n arr = np.array([0, 1, 0], dtype=bool)\n\n result = com.take_1d(arr, [0, 2, 2, 1])\n expected = arr.take([0, 2, 2, 1])\n self.assert_numpy_array_equal(result, expected)\n\n result = com.take_1d(arr, [0, 2, -1])\n self.assertEqual(result.dtype, np.object_)\n\n def test_2d_bool(self):\n arr = np.array([[0, 1, 0],\n [1, 0, 1],\n [0, 1, 1]], dtype=bool)\n\n result = com.take_nd(arr, [0, 2, 2, 1])\n expected = arr.take([0, 2, 2, 1], axis=0)\n self.assert_numpy_array_equal(result, expected)\n\n result = com.take_nd(arr, [0, 2, 2, 1], axis=1)\n expected = arr.take([0, 2, 2, 1], axis=1)\n self.assert_numpy_array_equal(result, expected)\n\n result = com.take_nd(arr, [0, 2, -1])\n self.assertEqual(result.dtype, np.object_)\n\n def test_2d_float32(self):\n arr = np.random.randn(4, 3).astype(np.float32)\n indexer = [0, 2, -1, 1, -1]\n\n # axis=0\n result = com.take_nd(arr, indexer, axis=0)\n result2 = np.empty_like(result)\n com.take_nd(arr, indexer, axis=0, out=result2)\n tm.assert_almost_equal(result, result2)\n\n expected = arr.take(indexer, axis=0)\n expected[[2, 4], :] = np.nan\n tm.assert_almost_equal(result, expected)\n\n #### this now accepts a float32! # test with float64 out buffer\n out = np.empty((len(indexer), arr.shape[1]), dtype='float32')\n com.take_nd(arr, indexer, out=out) # it works!\n\n # axis=1\n result = com.take_nd(arr, indexer, axis=1)\n result2 = np.empty_like(result)\n com.take_nd(arr, indexer, axis=1, out=result2)\n tm.assert_almost_equal(result, result2)\n\n expected = arr.take(indexer, axis=1)\n expected[:, [2, 4]] = np.nan\n tm.assert_almost_equal(result, expected)\n\n def test_2d_datetime64(self):\n # 2005/01/01 - 2006/01/01\n arr = np.random.randint(long(11045376), long(11360736), (5,3))*100000000000\n arr = arr.view(dtype='datetime64[ns]')\n indexer = [0, 2, -1, 1, -1]\n\n # axis=0\n result = com.take_nd(arr, indexer, axis=0)\n result2 = np.empty_like(result)\n com.take_nd(arr, indexer, axis=0, out=result2)\n tm.assert_almost_equal(result, result2)\n\n expected = arr.take(indexer, axis=0)\n expected.view(np.int64)[[2, 4], :] = iNaT\n tm.assert_almost_equal(result, expected)\n\n result = com.take_nd(arr, indexer, axis=0,\n fill_value=datetime(2007, 1, 1))\n result2 = np.empty_like(result)\n com.take_nd(arr, indexer, out=result2, axis=0,\n fill_value=datetime(2007, 1, 1))\n tm.assert_almost_equal(result, result2)\n\n expected = arr.take(indexer, axis=0)\n expected[[2, 4], :] = datetime(2007, 1, 1)\n tm.assert_almost_equal(result, expected)\n\n # axis=1\n result = com.take_nd(arr, indexer, axis=1)\n result2 = np.empty_like(result)\n com.take_nd(arr, indexer, axis=1, out=result2)\n tm.assert_almost_equal(result, result2)\n\n expected = arr.take(indexer, axis=1)\n expected.view(np.int64)[:, [2, 4]] = iNaT\n tm.assert_almost_equal(result, expected)\n\n result = com.take_nd(arr, indexer, axis=1,\n fill_value=datetime(2007, 1, 1))\n result2 = np.empty_like(result)\n com.take_nd(arr, indexer, out=result2, axis=1,\n fill_value=datetime(2007, 1, 1))\n tm.assert_almost_equal(result, result2)\n\n expected = arr.take(indexer, axis=1)\n expected[:, [2, 4]] = datetime(2007, 1, 1)\n tm.assert_almost_equal(result, expected)\n\n\nclass TestMaybe(tm.TestCase):\n\n def test_maybe_convert_string_to_array(self):\n result = com._maybe_convert_string_to_object('x')\n tm.assert_numpy_array_equal(result, np.array(['x'], dtype=object))\n self.assertTrue(result.dtype == object)\n\n result = com._maybe_convert_string_to_object(1)\n self.assertEqual(result, 1)\n\n arr = np.array(['x', 'y'], dtype=str)\n result = com._maybe_convert_string_to_object(arr)\n tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))\n self.assertTrue(result.dtype == object)\n\n # unicode\n arr = np.array(['x', 'y']).astype('U')\n result = com._maybe_convert_string_to_object(arr)\n tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))\n self.assertTrue(result.dtype == object)\n\n # object\n arr = np.array(['x', 2], dtype=object)\n result = com._maybe_convert_string_to_object(arr)\n tm.assert_numpy_array_equal(result, np.array(['x', 2], dtype=object))\n self.assertTrue(result.dtype == object)\n\n\ndef test_dict_compat():\n data_datetime64 = {np.datetime64('1990-03-15'): 1,\n np.datetime64('2015-03-15'): 2}\n data_unchanged = {1: 2, 3: 4, 5: 6}\n expected = {Timestamp('1990-3-15'): 1, Timestamp('2015-03-15'): 2}\n assert(com._dict_compat(data_datetime64) == expected)\n assert(com._dict_compat(expected) == expected)\n assert(com._dict_compat(data_unchanged) == data_unchanged)\n\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n"
] | [
[
"pandas.core.common.is_list_like",
"pandas.core.common._all_not_none",
"pandas.core.common.adjoin",
"pandas.Series",
"numpy.asarray",
"pandas.util.testing.makeObjectSeries",
"pandas.core.common.intersection",
"pandas.core.index.Int64Index",
"pandas.core.common.indent",
"pandas.util.testing.assert_frame_equal",
"pandas.util.testing.makePanel",
"pandas.core.common._ensure_platform_int",
"numpy.random.randn",
"pandas.core.common.notnull",
"pandas.util.testing.makeTimeDataFrame",
"pandas.util.testing.makePanel4D",
"pandas.core.common.is_hashable",
"pandas.core.common._maybe_convert_string_to_object",
"pandas.core.config.get_option",
"pandas.core.common._ensure_int32",
"pandas.core.common.take_nd",
"numpy.random.randint",
"numpy.arange",
"numpy.empty_like",
"pandas.DatetimeIndex",
"pandas.util.testing.assert_panel4d_equal",
"pandas.core.config.option_context",
"pandas.util.testing.makePeriodSeries",
"pandas.util.testing.assert_panel_equal",
"pandas.core.common.take_1d",
"pandas.core.common._dict_compat",
"pandas.compat.long",
"pandas.core.common.groupby",
"pandas.core.common.banner",
"pandas.compat.u",
"pandas.util.testing.assert_almost_equal",
"pandas.Float64Index",
"pandas.core.common._possibly_downcast_to_dtype",
"pandas.core.common._any_none",
"pandas.date_range",
"pandas.core.common.union",
"pandas.core.common.difference",
"numpy.array",
"numpy.random.RandomState",
"pandas.core.common._mut_exclusive",
"pandas.util.testing.makePeriodPanel",
"pandas.core.common._random_state",
"pandas.core.common.iterpairs",
"pandas.util.testing.makeTimeSeries",
"numpy.array_equal",
"pandas.core.common.map_indices_py",
"pandas.util.testing.assertRaisesRegexp",
"pandas.util.testing.makeFloatSeries",
"pandas.util.testing.makeMixedDataFrame",
"pandas.compat.bytes_to_str",
"pandas.core.common.split_ranges",
"pandas.util.testing.assertRaises",
"pandas.util.testing.makeStringSeries",
"pandas.core.common.is_re",
"numpy.datetime64",
"numpy.int16",
"pandas.util.testing.makePeriodFrame",
"pandas.core.common.isnull",
"pandas.core.common.pprint_thing",
"numpy.float64",
"pandas.core.common.is_re_compilable",
"pandas.compat.lrange",
"pandas.Timestamp",
"numpy.empty",
"pandas.compat.range"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ieliz/openvino | [
"403339f8f470c90dee6f6d94ed58644b2787f66b",
"403339f8f470c90dee6f6d94ed58644b2787f66b"
] | [
"tools/mo/openvino/tools/mo/ops/Reverse.py",
"tools/mo/openvino/tools/mo/ops/slice_like.py"
] | [
"# Copyright (C) 2018-2022 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\n\nfrom openvino.tools.mo.front.common.partial_infer.utils import int64_array\nfrom openvino.tools.mo.graph.graph import Graph\nfrom openvino.tools.mo.ops.op import Op\n\n\nclass Reverse(Op):\n op = 'Reverse'\n\n def __init__(self, graph: Graph, attrs: dict):\n mandatory_props = {\n 'type': None,\n 'axis': None,\n 'op': self.op,\n 'in_ports_count': 2,\n 'out_ports_count': 1,\n 'infer': self.infer,\n }\n super().__init__(graph, mandatory_props, attrs)\n\n @staticmethod\n def infer(node):\n input_shape = node.in_port(0).data.get_shape()\n input_value = node.in_port(0).data.get_value()\n assert input_shape is not None\n if not node.has_valid('axis'):\n assert 1 in node.in_nodes()\n assert node.in_node(1).has_valid('value')\n assert node.in_node(1).value.size == 1\n\n node['axis'] = node.in_node(1).value.item()\n node.in_port(1).disconnect()\n\n assert node.has_valid('axis')\n\n assert len(node.out_nodes()) == 1\n if input_value is not None:\n node.out_port(0).data.set_value(np.flip(input_value, node.axis))\n else:\n node.out_port(0).data.set_shape(input_shape)\n",
"# Copyright (C) 2018-2022 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\n\nfrom openvino.tools.mo.front.caffe.extractors.utils import get_canonical_axis_index\nfrom openvino.tools.mo.front.common.partial_infer.utils import int64_array\nfrom openvino.tools.mo.graph.graph import Graph\nfrom openvino.tools.mo.ops.op import Op\n\n\nclass SliceLike(Op):\n op = 'slice_like'\n enabled = True\n\n def __init__(self, graph: Graph, attrs: dict):\n assert 'axes' in attrs, 'Please set mandatory `axes` attribute for `slice_like` operation'\n super().__init__(graph, {\n 'type': None,\n 'op': self.op,\n 'in_ports_count': 2,\n 'out_ports_count': 1,\n 'infer': self.infer,\n }, attrs)\n\n @staticmethod\n def infer(node):\n input_shape = node.in_port(0).data.get_shape()\n input_value = node.in_port(0).data.get_value()\n shape_like = node.in_port(1).data.get_shape()\n\n new_shape = np.copy(input_shape)\n if node.axes is not None:\n node.axes = sorted([get_canonical_axis_index(input_shape, i) for i in node.axes])\n for i in node.axes:\n new_shape[i] = shape_like[i]\n else:\n assert input_shape.size == shape_like.size,\\\n 'Input shape ranks are inconsistent: {} and {}'.format(input_shape.size, shape_like.size)\n node.axes = int64_array(range(shape_like.size))\n new_shape = np.copy(shape_like)\n node.out_port(0).data.set_shape(new_shape)\n\n if input_value is not None:\n out_value = np.copy(input_value)\n\n slice_indexes = []\n for s in out_value.shape:\n slice_indexes.append(slice(0, s))\n\n for axis in node.axes:\n slice_indexes[axis] = slice(0, new_shape[axis])\n out_value = out_value[tuple(slice_indexes)]\n node.out_port(0).data.set_value(out_value)\n"
] | [
[
"numpy.flip"
],
[
"numpy.copy"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
intel/neural-compressor | [
"16a4a12045fcb468da4d33769aff2c1a5e2ba6ba"
] | [
"examples/baremetal/nlp/sst2/bert_mini/bert_mini_export.py"
] | [
"import argparse\n\nimport torch\nfrom transformers import BertForSequenceClassification\n\ndef export_onnx_model(args, model, onnx_model_path):\n with torch.no_grad():\n inputs = {'input_ids': torch.ones(1,args.max_len, dtype=torch.int32),\n 'attention_mask': torch.ones(1,args.max_len, dtype=torch.int32),\n 'token_type_ids': torch.ones(1,args.max_len, dtype=torch.int32)}\n outputs = model(**inputs)\n\n symbolic_names = {0: 'batch_size', 1: 'max_seq_len'}\n torch.onnx.export(model, # model being run\n (inputs['input_ids'], \n inputs['attention_mask'],\n inputs['token_type_ids']), # model input (or a tuple for\n # multiple inputs)\n onnx_model_path, # where to save the model (can be a file\n # or file-like object)\n opset_version=11, # the ONNX version to export the model\n do_constant_folding=True, # whether to execute constant folding\n input_names=['input_ids', # the model's input names\n 'input_mask',\n 'segment_ids'],\n output_names=['output'], # the model's output names\n dynamic_axes={'input_ids': symbolic_names, # variable length axes\n 'input_mask' : symbolic_names,\n 'segment_ids' : symbolic_names})\n print(\"ONNX Model exported to {0}\".format(onnx_model_path))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Export bert onnx model',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n '--input_dir',\n type=str,\n help='input_dir of bert model, must contain config.json')\n parser.add_argument(\n '--task_name',\n type=str,\n choices=[\"MRPC\", \"MNLI\", \"SST-2\"],\n help='tasks names of bert model')\n parser.add_argument(\n '--max_len',\n type=int,\n default=128,\n help='Maximum length of the sentence pairs')\n parser.add_argument(\n '--do_lower_case',\n type=bool,\n default=True,\n help='whether lower the tokenizer')\n parser.add_argument(\n '--output_model',\n type=str,\n default='bert_mini_sst2.onnx',\n help='path to exported model file')\n args = parser.parse_args()\n\n model = BertForSequenceClassification.from_pretrained(args.input_dir)\n export_onnx_model(args, model, args.output_model)"
] | [
[
"torch.onnx.export",
"torch.no_grad",
"torch.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
knightvishal/tensorflow | [
"5d3dd19b7146d954fc1b4e9e44e9881e75d363c1",
"5d3dd19b7146d954fc1b4e9e44e9881e75d363c1",
"5d3dd19b7146d954fc1b4e9e44e9881e75d363c1",
"5d3dd19b7146d954fc1b4e9e44e9881e75d363c1",
"5d3dd19b7146d954fc1b4e9e44e9881e75d363c1",
"07da23bfa2a9ca10cd7c1dd6bea0f85d981c013e"
] | [
"tensorflow/python/estimator/canned/dnn_linear_combined.py",
"tensorflow/python/kernel_tests/manip_ops_test.py",
"tensorflow/python/data/experimental/kernel_tests/prefetch_to_device_test.py",
"tensorflow/python/training/proximal_gradient_descent_test.py",
"tensorflow/contrib/hadoop/python/kernel_tests/hadoop_test.py",
"tensorflow/python/grappler/memory_optimizer_test.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow estimators for Linear and DNN joined training models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nimport six\n\nfrom tensorflow.python.estimator import estimator\nfrom tensorflow.python.estimator.canned import dnn\nfrom tensorflow.python.estimator.canned import head as head_lib\nfrom tensorflow.python.estimator.canned import linear\nfrom tensorflow.python.estimator.canned import optimizers\nfrom tensorflow.python.feature_column import feature_column_v2\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import partitioned_variables\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops.losses import losses\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import sync_replicas_optimizer\nfrom tensorflow.python.training import training_util\nfrom tensorflow.python.util.tf_export import estimator_export\n\n# The default learning rates are a historical artifact of the initial\n# implementation.\n_DNN_LEARNING_RATE = 0.001\n_LINEAR_LEARNING_RATE = 0.005\n\n\ndef _check_no_sync_replicas_optimizer(optimizer):\n if isinstance(optimizer, sync_replicas_optimizer.SyncReplicasOptimizer):\n raise ValueError(\n 'SyncReplicasOptimizer does not support multi optimizers case. '\n 'Therefore, it is not supported in DNNLinearCombined model. '\n 'If you want to use this optimizer, please use either DNN or Linear '\n 'model.')\n\n\ndef _linear_learning_rate(num_linear_feature_columns):\n \"\"\"Returns the default learning rate of the linear model.\n\n The calculation is a historical artifact of this initial implementation, but\n has proven a reasonable choice.\n\n Args:\n num_linear_feature_columns: The number of feature columns of the linear\n model.\n\n Returns:\n A float.\n \"\"\"\n default_learning_rate = 1. / math.sqrt(num_linear_feature_columns)\n return min(_LINEAR_LEARNING_RATE, default_learning_rate)\n\n\ndef _add_layer_summary(value, tag):\n summary.scalar('%s/fraction_of_zero_values' % tag, nn.zero_fraction(value))\n summary.histogram('%s/activation' % tag, value)\n\n\ndef _dnn_linear_combined_model_fn(features,\n labels,\n mode,\n head,\n linear_feature_columns=None,\n linear_optimizer='Ftrl',\n dnn_feature_columns=None,\n dnn_optimizer='Adagrad',\n dnn_hidden_units=None,\n dnn_activation_fn=nn.relu,\n dnn_dropout=None,\n input_layer_partitioner=None,\n config=None,\n batch_norm=False,\n linear_sparse_combiner='sum'):\n \"\"\"Deep Neural Net and Linear combined model_fn.\n\n Args:\n features: dict of `Tensor`.\n labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype\n `int32` or `int64` in the range `[0, n_classes)`.\n mode: Defines whether this is training, evaluation or prediction.\n See `ModeKeys`.\n head: A `Head` instance.\n linear_feature_columns: An iterable containing all the feature columns used\n by the Linear model.\n linear_optimizer: string, `Optimizer` object, or callable that defines the\n optimizer to use for training the Linear model. Defaults to the Ftrl\n optimizer.\n dnn_feature_columns: An iterable containing all the feature columns used by\n the DNN model.\n dnn_optimizer: string, `Optimizer` object, or callable that defines the\n optimizer to use for training the DNN model. Defaults to the Adagrad\n optimizer.\n dnn_hidden_units: List of hidden units per DNN layer.\n dnn_activation_fn: Activation function applied to each DNN layer. If `None`,\n will use `tf.nn.relu`.\n dnn_dropout: When not `None`, the probability we will drop out a given DNN\n coordinate.\n input_layer_partitioner: Partitioner for input layer.\n config: `RunConfig` object to configure the runtime settings.\n batch_norm: Whether to use batch normalization after each hidden layer.\n linear_sparse_combiner: A string specifying how to reduce the linear model\n if a categorical column is multivalent. One of \"mean\", \"sqrtn\", and\n \"sum\".\n Returns:\n An `EstimatorSpec` instance.\n\n Raises:\n ValueError: If both `linear_feature_columns` and `dnn_features_columns`\n are empty at the same time, or `input_layer_partitioner` is missing,\n or features has the wrong type.\n \"\"\"\n if not isinstance(features, dict):\n raise ValueError('features should be a dictionary of `Tensor`s. '\n 'Given type: {}'.format(type(features)))\n if not linear_feature_columns and not dnn_feature_columns:\n raise ValueError(\n 'Either linear_feature_columns or dnn_feature_columns must be defined.')\n\n num_ps_replicas = config.num_ps_replicas if config else 0\n input_layer_partitioner = input_layer_partitioner or (\n partitioned_variables.min_max_variable_partitioner(\n max_partitions=num_ps_replicas,\n min_slice_size=64 << 20))\n\n shared_state_manager = feature_column_v2.maybe_create_shared_state_manager(\n list(linear_feature_columns) + list(dnn_feature_columns))\n\n # Build DNN Logits.\n dnn_parent_scope = 'dnn'\n\n if not dnn_feature_columns:\n dnn_logits = None\n else:\n dnn_optimizer = optimizers.get_optimizer_instance(\n dnn_optimizer, learning_rate=_DNN_LEARNING_RATE)\n _check_no_sync_replicas_optimizer(dnn_optimizer)\n if not dnn_hidden_units:\n raise ValueError(\n 'dnn_hidden_units must be defined when dnn_feature_columns is '\n 'specified.')\n dnn_partitioner = (\n partitioned_variables.min_max_variable_partitioner(\n max_partitions=num_ps_replicas))\n with variable_scope.variable_scope(\n dnn_parent_scope,\n values=tuple(six.itervalues(features)),\n partitioner=dnn_partitioner) as scope:\n dnn_absolute_scope = scope.name\n dnn_logit_fn = dnn._dnn_logit_fn_builder( # pylint: disable=protected-access\n units=head.logits_dimension,\n hidden_units=dnn_hidden_units,\n feature_columns=dnn_feature_columns,\n activation_fn=dnn_activation_fn,\n dropout=dnn_dropout,\n batch_norm=batch_norm,\n input_layer_partitioner=input_layer_partitioner,\n shared_state_manager=shared_state_manager)\n dnn_logits = dnn_logit_fn(features=features, mode=mode)\n\n linear_parent_scope = 'linear'\n\n if not linear_feature_columns:\n linear_logits = None\n else:\n linear_optimizer = optimizers.get_optimizer_instance(\n linear_optimizer,\n learning_rate=_linear_learning_rate(len(linear_feature_columns)))\n _check_no_sync_replicas_optimizer(linear_optimizer)\n with variable_scope.variable_scope(\n linear_parent_scope,\n values=tuple(six.itervalues(features)),\n partitioner=input_layer_partitioner) as scope:\n linear_absolute_scope = scope.name\n logit_fn = linear._linear_logit_fn_builder( # pylint: disable=protected-access\n units=head.logits_dimension,\n feature_columns=linear_feature_columns,\n sparse_combiner=linear_sparse_combiner)\n linear_logits = logit_fn(features=features)\n _add_layer_summary(linear_logits, scope.name)\n\n # Combine logits and build full model.\n if dnn_logits is not None and linear_logits is not None:\n logits = dnn_logits + linear_logits\n elif dnn_logits is not None:\n logits = dnn_logits\n else:\n logits = linear_logits\n\n def _train_op_fn(loss):\n \"\"\"Returns the op to optimize the loss.\"\"\"\n train_ops = []\n global_step = training_util.get_global_step()\n if dnn_logits is not None:\n train_ops.append(\n dnn_optimizer.minimize(\n loss,\n var_list=ops.get_collection(\n ops.GraphKeys.TRAINABLE_VARIABLES,\n scope=dnn_absolute_scope)))\n if linear_logits is not None:\n train_ops.append(\n linear_optimizer.minimize(\n loss,\n var_list=ops.get_collection(\n ops.GraphKeys.TRAINABLE_VARIABLES,\n scope=linear_absolute_scope)))\n\n train_op = control_flow_ops.group(*train_ops)\n with ops.control_dependencies([train_op]):\n return state_ops.assign_add(global_step, 1).op\n\n return head.create_estimator_spec(\n features=features,\n mode=mode,\n labels=labels,\n train_op_fn=_train_op_fn,\n logits=logits)\n\n\n@estimator_export('estimator.DNNLinearCombinedClassifier')\nclass DNNLinearCombinedClassifier(estimator.Estimator):\n \"\"\"An estimator for TensorFlow Linear and DNN joined classification models.\n\n Note: This estimator is also known as wide-n-deep.\n\n Example:\n\n ```python\n numeric_feature = numeric_column(...)\n categorical_column_a = categorical_column_with_hash_bucket(...)\n categorical_column_b = categorical_column_with_hash_bucket(...)\n\n categorical_feature_a_x_categorical_feature_b = crossed_column(...)\n categorical_feature_a_emb = embedding_column(\n categorical_column=categorical_feature_a, ...)\n categorical_feature_b_emb = embedding_column(\n categorical_id_column=categorical_feature_b, ...)\n\n estimator = DNNLinearCombinedClassifier(\n # wide settings\n linear_feature_columns=[categorical_feature_a_x_categorical_feature_b],\n linear_optimizer=tf.train.FtrlOptimizer(...),\n # deep settings\n dnn_feature_columns=[\n categorical_feature_a_emb, categorical_feature_b_emb,\n numeric_feature],\n dnn_hidden_units=[1000, 500, 100],\n dnn_optimizer=tf.train.ProximalAdagradOptimizer(...),\n # warm-start settings\n warm_start_from=\"/path/to/checkpoint/dir\")\n\n # To apply L1 and L2 regularization, you can set dnn_optimizer to:\n tf.train.ProximalAdagradOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.001,\n l2_regularization_strength=0.001)\n # To apply learning rate decay, you can set dnn_optimizer to a callable:\n lambda: tf.AdamOptimizer(\n learning_rate=tf.exponential_decay(\n learning_rate=0.1,\n global_step=tf.get_global_step(),\n decay_steps=10000,\n decay_rate=0.96)\n # It is the same for linear_optimizer.\n\n # Input builders\n def input_fn_train: # returns x, y\n pass\n estimator.train(input_fn=input_fn_train, steps=100)\n\n def input_fn_eval: # returns x, y\n pass\n metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)\n def input_fn_predict: # returns x, None\n pass\n predictions = estimator.predict(input_fn=input_fn_predict)\n ```\n\n Input of `train` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n\n * for each `column` in `dnn_feature_columns` + `linear_feature_columns`:\n - if `column` is a `_CategoricalColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `_WeightedCategoricalColumn`, two features: the first\n with `key` the id column name, the second with `key` the weight column\n name. Both features' `value` must be a `SparseTensor`.\n - if `column` is a `_DenseColumn`, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n\n Loss is calculated by using softmax cross entropy.\n\n @compatibility(eager)\n Estimators can be used while eager execution is enabled. Note that `input_fn`\n and all hooks are executed inside a graph context, so they have to be written\n to be compatible with graph mode. Note that `input_fn` code using `tf.data`\n generally works in both graph and eager modes.\n @end_compatibility\n \"\"\"\n\n def __init__(self,\n model_dir=None,\n linear_feature_columns=None,\n linear_optimizer='Ftrl',\n dnn_feature_columns=None,\n dnn_optimizer='Adagrad',\n dnn_hidden_units=None,\n dnn_activation_fn=nn.relu,\n dnn_dropout=None,\n n_classes=2,\n weight_column=None,\n label_vocabulary=None,\n input_layer_partitioner=None,\n config=None,\n warm_start_from=None,\n loss_reduction=losses.Reduction.SUM,\n batch_norm=False,\n linear_sparse_combiner='sum'):\n \"\"\"Initializes a DNNLinearCombinedClassifier instance.\n\n Args:\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator\n to continue training a previously saved model.\n linear_feature_columns: An iterable containing all the feature columns\n used by linear part of the model. All items in the set must be\n instances of classes derived from `FeatureColumn`.\n linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to\n the linear part of the model. Can also be a string (one of 'Adagrad',\n 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to FTRL\n optimizer.\n dnn_feature_columns: An iterable containing all the feature columns used\n by deep part of the model. All items in the set must be instances of\n classes derived from `FeatureColumn`.\n dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to\n the deep part of the model. Can also be a string (one of 'Adagrad',\n 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to Adagrad\n optimizer.\n dnn_hidden_units: List of hidden units per layer. All layers are fully\n connected.\n dnn_activation_fn: Activation function applied to each layer. If None,\n will use `tf.nn.relu`.\n dnn_dropout: When not None, the probability we will drop out\n a given coordinate.\n n_classes: Number of label classes. Defaults to 2, namely binary\n classification. Must be > 1.\n weight_column: A string or a `_NumericColumn` created by\n `tf.feature_column.numeric_column` defining feature column representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example. If it is a string, it is\n used as a key to fetch weight tensor from the `features`. If it is a\n `_NumericColumn`, raw tensor is fetched by key `weight_column.key`,\n then weight_column.normalizer_fn is applied on it to get weight tensor.\n label_vocabulary: A list of strings represents possible label values. If\n given, labels must be string type and have any value in\n `label_vocabulary`. If it is not given, that means labels are\n already encoded as integer or float within [0, 1] for `n_classes=2` and\n encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .\n Also there will be errors if vocabulary is not provided and labels are\n string.\n input_layer_partitioner: Partitioner for input layer. Defaults to\n `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\n config: RunConfig object to configure the runtime settings.\n warm_start_from: A string filepath to a checkpoint to warm-start from, or\n a `WarmStartSettings` object to fully configure warm-starting. If the\n string filepath is provided instead of a `WarmStartSettings`, then all\n weights are warm-started, and it is assumed that vocabularies and Tensor\n names are unchanged.\n loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how\n to reduce training loss over batch. Defaults to `SUM`.\n batch_norm: Whether to use batch normalization after each hidden layer.\n linear_sparse_combiner: A string specifying how to reduce the linear model\n if a categorical column is multivalent. One of \"mean\", \"sqrtn\", and\n \"sum\" -- these are effectively different ways to do example-level\n normalization, which can be useful for bag-of-words features. For more\n details, see `tf.feature_column.linear_model`.\n\n Raises:\n ValueError: If both linear_feature_columns and dnn_features_columns are\n empty at the same time.\n \"\"\"\n linear_feature_columns = linear_feature_columns or []\n dnn_feature_columns = dnn_feature_columns or []\n self._feature_columns = (\n list(linear_feature_columns) + list(dnn_feature_columns))\n if not self._feature_columns:\n raise ValueError('Either linear_feature_columns or dnn_feature_columns '\n 'must be defined.')\n if n_classes == 2:\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint: disable=protected-access\n weight_column=weight_column,\n label_vocabulary=label_vocabulary,\n loss_reduction=loss_reduction)\n else:\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access\n n_classes,\n weight_column=weight_column,\n label_vocabulary=label_vocabulary,\n loss_reduction=loss_reduction)\n\n def _model_fn(features, labels, mode, config):\n \"\"\"Call the _dnn_linear_combined_model_fn.\"\"\"\n return _dnn_linear_combined_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head,\n linear_feature_columns=linear_feature_columns,\n linear_optimizer=linear_optimizer,\n dnn_feature_columns=dnn_feature_columns,\n dnn_optimizer=dnn_optimizer,\n dnn_hidden_units=dnn_hidden_units,\n dnn_activation_fn=dnn_activation_fn,\n dnn_dropout=dnn_dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n batch_norm=batch_norm,\n linear_sparse_combiner=linear_sparse_combiner)\n\n super(DNNLinearCombinedClassifier, self).__init__(\n model_fn=_model_fn, model_dir=model_dir, config=config,\n warm_start_from=warm_start_from)\n\n\n@estimator_export('estimator.DNNLinearCombinedRegressor')\nclass DNNLinearCombinedRegressor(estimator.Estimator):\n \"\"\"An estimator for TensorFlow Linear and DNN joined models for regression.\n\n Note: This estimator is also known as wide-n-deep.\n\n Example:\n\n ```python\n numeric_feature = numeric_column(...)\n categorical_column_a = categorical_column_with_hash_bucket(...)\n categorical_column_b = categorical_column_with_hash_bucket(...)\n\n categorical_feature_a_x_categorical_feature_b = crossed_column(...)\n categorical_feature_a_emb = embedding_column(\n categorical_column=categorical_feature_a, ...)\n categorical_feature_b_emb = embedding_column(\n categorical_column=categorical_feature_b, ...)\n\n estimator = DNNLinearCombinedRegressor(\n # wide settings\n linear_feature_columns=[categorical_feature_a_x_categorical_feature_b],\n linear_optimizer=tf.train.FtrlOptimizer(...),\n # deep settings\n dnn_feature_columns=[\n categorical_feature_a_emb, categorical_feature_b_emb,\n numeric_feature],\n dnn_hidden_units=[1000, 500, 100],\n dnn_optimizer=tf.train.ProximalAdagradOptimizer(...),\n # warm-start settings\n warm_start_from=\"/path/to/checkpoint/dir\")\n\n # To apply L1 and L2 regularization, you can set dnn_optimizer to:\n tf.train.ProximalAdagradOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.001,\n l2_regularization_strength=0.001)\n # To apply learning rate decay, you can set dnn_optimizer to a callable:\n lambda: tf.AdamOptimizer(\n learning_rate=tf.exponential_decay(\n learning_rate=0.1,\n global_step=tf.get_global_step(),\n decay_steps=10000,\n decay_rate=0.96)\n # It is the same for linear_optimizer.\n\n # Input builders\n def input_fn_train: # returns x, y\n pass\n estimator.train(input_fn=input_fn_train, steps=100)\n\n def input_fn_eval: # returns x, y\n pass\n metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)\n def input_fn_predict: # returns x, None\n pass\n predictions = estimator.predict(input_fn=input_fn_predict)\n ```\n\n Input of `train` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n\n * for each `column` in `dnn_feature_columns` + `linear_feature_columns`:\n - if `column` is a `_CategoricalColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `_WeightedCategoricalColumn`, two features: the first\n with `key` the id column name, the second with `key` the weight column\n name. Both features' `value` must be a `SparseTensor`.\n - if `column` is a `_DenseColumn`, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n\n Loss is calculated by using mean squared error.\n\n @compatibility(eager)\n Estimators can be used while eager execution is enabled. Note that `input_fn`\n and all hooks are executed inside a graph context, so they have to be written\n to be compatible with graph mode. Note that `input_fn` code using `tf.data`\n generally works in both graph and eager modes.\n @end_compatibility\n \"\"\"\n\n def __init__(self,\n model_dir=None,\n linear_feature_columns=None,\n linear_optimizer='Ftrl',\n dnn_feature_columns=None,\n dnn_optimizer='Adagrad',\n dnn_hidden_units=None,\n dnn_activation_fn=nn.relu,\n dnn_dropout=None,\n label_dimension=1,\n weight_column=None,\n input_layer_partitioner=None,\n config=None,\n warm_start_from=None,\n loss_reduction=losses.Reduction.SUM,\n batch_norm=False,\n linear_sparse_combiner='sum'):\n \"\"\"Initializes a DNNLinearCombinedRegressor instance.\n\n Args:\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator\n to continue training a previously saved model.\n linear_feature_columns: An iterable containing all the feature columns\n used by linear part of the model. All items in the set must be\n instances of classes derived from `FeatureColumn`.\n linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to\n the linear part of the model. Can also be a string (one of 'Adagrad',\n 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to FTRL\n optimizer.\n dnn_feature_columns: An iterable containing all the feature columns used\n by deep part of the model. All items in the set must be instances of\n classes derived from `FeatureColumn`.\n dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to\n the deep part of the model. Can also be a string (one of 'Adagrad',\n 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to Adagrad\n optimizer.\n dnn_hidden_units: List of hidden units per layer. All layers are fully\n connected.\n dnn_activation_fn: Activation function applied to each layer. If None,\n will use `tf.nn.relu`.\n dnn_dropout: When not None, the probability we will drop out\n a given coordinate.\n label_dimension: Number of regression targets per example. This is the\n size of the last dimension of the labels and logits `Tensor` objects\n (typically, these have shape `[batch_size, label_dimension]`).\n weight_column: A string or a `_NumericColumn` created by\n `tf.feature_column.numeric_column` defining feature column representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example. If it is a string, it is\n used as a key to fetch weight tensor from the `features`. If it is a\n `_NumericColumn`, raw tensor is fetched by key `weight_column.key`,\n then weight_column.normalizer_fn is applied on it to get weight tensor.\n input_layer_partitioner: Partitioner for input layer. Defaults to\n `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\n config: RunConfig object to configure the runtime settings.\n warm_start_from: A string filepath to a checkpoint to warm-start from, or\n a `WarmStartSettings` object to fully configure warm-starting. If the\n string filepath is provided instead of a `WarmStartSettings`, then all\n weights are warm-started, and it is assumed that vocabularies and Tensor\n names are unchanged.\n loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how\n to reduce training loss over batch. Defaults to `SUM`.\n batch_norm: Whether to use batch normalization after each hidden layer.\n linear_sparse_combiner: A string specifying how to reduce the linear model\n if a categorical column is multivalent. One of \"mean\", \"sqrtn\", and\n \"sum\" -- these are effectively different ways to do example-level\n normalization, which can be useful for bag-of-words features. For more\n details, see `tf.feature_column.linear_model`.\n\n Raises:\n ValueError: If both linear_feature_columns and dnn_features_columns are\n empty at the same time.\n \"\"\"\n linear_feature_columns = linear_feature_columns or []\n dnn_feature_columns = dnn_feature_columns or []\n self._feature_columns = (\n list(linear_feature_columns) + list(dnn_feature_columns))\n if not self._feature_columns:\n raise ValueError('Either linear_feature_columns or dnn_feature_columns '\n 'must be defined.')\n\n def _model_fn(features, labels, mode, config):\n \"\"\"Call the _dnn_linear_combined_model_fn.\"\"\"\n return _dnn_linear_combined_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head_lib._regression_head( # pylint: disable=protected-access\n label_dimension=label_dimension, weight_column=weight_column,\n loss_reduction=loss_reduction),\n linear_feature_columns=linear_feature_columns,\n linear_optimizer=linear_optimizer,\n dnn_feature_columns=dnn_feature_columns,\n dnn_optimizer=dnn_optimizer,\n dnn_hidden_units=dnn_hidden_units,\n dnn_activation_fn=dnn_activation_fn,\n dnn_dropout=dnn_dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n batch_norm=batch_norm,\n linear_sparse_combiner=linear_sparse_combiner)\n\n super(DNNLinearCombinedRegressor, self).__init__(\n model_fn=_model_fn, model_dir=model_dir, config=config,\n warm_start_from=warm_start_from)\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for manip_ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import manip_ops\nfrom tensorflow.python.platform import test as test_lib\n\n# pylint: disable=g-import-not-at-top\ntry:\n from distutils.version import StrictVersion as Version\n # numpy.roll for multiple shifts was introduced in numpy version 1.12.0\n NP_ROLL_CAN_MULTISHIFT = Version(np.version.version) >= Version(\"1.12.0\")\nexcept ImportError:\n NP_ROLL_CAN_MULTISHIFT = False\n# pylint: enable=g-import-not-at-top\n\n\nclass RollTest(test_util.TensorFlowTestCase):\n\n def _testRoll(self, np_input, shift, axis):\n expected_roll = np.roll(np_input, shift, axis)\n with self.cached_session():\n roll = manip_ops.roll(np_input, shift, axis)\n self.assertAllEqual(roll.eval(), expected_roll)\n\n def _testGradient(self, np_input, shift, axis):\n with self.cached_session():\n inx = constant_op.constant(np_input.tolist())\n xs = list(np_input.shape)\n y = manip_ops.roll(inx, shift, axis)\n # Expected y's shape to be the same\n ys = xs\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n inx, xs, y, ys, x_init_value=np_input)\n self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)\n\n def _testAll(self, np_input, shift, axis):\n self._testRoll(np_input, shift, axis)\n if np_input.dtype == np.float32:\n self._testGradient(np_input, shift, axis)\n\n def testIntTypes(self):\n for t in [np.int32, np.int64]:\n self._testAll(np.random.randint(-100, 100, (5)).astype(t), 3, 0)\n if NP_ROLL_CAN_MULTISHIFT:\n self._testAll(\n np.random.randint(-100, 100, (4, 4, 3)).astype(t), [1, -2, 3],\n [0, 1, 2])\n self._testAll(\n np.random.randint(-100, 100, (4, 2, 1, 3)).astype(t), [0, 1, -2],\n [1, 2, 3])\n\n def testFloatTypes(self):\n for t in [np.float32, np.float64]:\n self._testAll(np.random.rand(5).astype(t), 2, 0)\n if NP_ROLL_CAN_MULTISHIFT:\n self._testAll(np.random.rand(3, 4).astype(t), [1, 2], [1, 0])\n self._testAll(np.random.rand(1, 3, 4).astype(t), [1, 0, -3], [0, 1, 2])\n\n def testComplexTypes(self):\n for t in [np.complex64, np.complex128]:\n x = np.random.rand(4, 4).astype(t)\n self._testAll(x + 1j * x, 2, 0)\n if NP_ROLL_CAN_MULTISHIFT:\n x = np.random.rand(2, 5).astype(t)\n self._testAll(x + 1j * x, [1, 2], [1, 0])\n x = np.random.rand(3, 2, 1, 1).astype(t)\n self._testAll(x + 1j * x, [2, 1, 1, 0], [0, 3, 1, 2])\n\n def testNegativeAxis(self):\n self._testAll(np.random.randint(-100, 100, (5)).astype(np.int32), 3, -1)\n self._testAll(np.random.randint(-100, 100, (4, 4)).astype(np.int32), 3, -2)\n # Make sure negative axis should be 0 <= axis + dims < dims\n with self.cached_session():\n with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"is out of range\"):\n manip_ops.roll(np.random.randint(-100, 100, (4, 4)).astype(np.int32),\n 3, -10).eval()\n\n def testInvalidInputShape(self):\n # The input should be 1-D or higher, checked in shape function.\n with self.assertRaisesRegexp(\n ValueError, \"Shape must be at least rank 1 but is rank 0\"):\n manip_ops.roll(7, 1, 0)\n\n def testRollInputMustVectorHigherRaises(self):\n # The input should be 1-D or higher, checked in kernel.\n tensor = array_ops.placeholder(dtype=dtypes.int32)\n shift = 1\n axis = 0\n with self.cached_session():\n with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"input must be 1-D or higher\"):\n manip_ops.roll(tensor, shift, axis).eval(feed_dict={tensor: 7})\n\n def testInvalidAxisShape(self):\n # The axis should be a scalar or 1-D, checked in shape function.\n with self.assertRaisesRegexp(\n ValueError, \"Shape must be at most rank 1 but is rank 2\"):\n manip_ops.roll([[1, 2], [3, 4]], 1, [[0, 1]])\n\n def testRollAxisMustBeScalarOrVectorRaises(self):\n # The axis should be a scalar or 1-D, checked in kernel.\n tensor = [[1, 2], [3, 4]]\n shift = 1\n axis = array_ops.placeholder(dtype=dtypes.int32)\n with self.cached_session():\n with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"axis must be a scalar or a 1-D vector\"):\n manip_ops.roll(tensor, shift, axis).eval(feed_dict={axis: [[0, 1]]})\n\n def testInvalidShiftShape(self):\n # The shift should be a scalar or 1-D, checked in shape function.\n with self.assertRaisesRegexp(\n ValueError, \"Shape must be at most rank 1 but is rank 2\"):\n manip_ops.roll([[1, 2], [3, 4]], [[0, 1]], 1)\n\n def testRollShiftMustBeScalarOrVectorRaises(self):\n # The shift should be a scalar or 1-D, checked in kernel.\n tensor = [[1, 2], [3, 4]]\n shift = array_ops.placeholder(dtype=dtypes.int32)\n axis = 1\n with self.cached_session():\n with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"shift must be a scalar or a 1-D vector\"):\n manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [[0, 1]]})\n\n def testInvalidShiftAndAxisNotEqualShape(self):\n # The shift and axis must be same size, checked in shape function.\n with self.assertRaisesRegexp(ValueError, \"both shapes must be equal\"):\n manip_ops.roll([[1, 2], [3, 4]], [1], [0, 1])\n\n def testRollShiftAndAxisMustBeSameSizeRaises(self):\n # The shift and axis must be same size, checked in kernel.\n tensor = [[1, 2], [3, 4]]\n shift = array_ops.placeholder(dtype=dtypes.int32)\n axis = [0, 1]\n with self.cached_session():\n with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"shift and axis must have the same size\"):\n manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [1]})\n\n def testRollAxisOutOfRangeRaises(self):\n tensor = [1, 2]\n shift = 1\n axis = 1\n with self.cached_session():\n with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"is out of range\"):\n manip_ops.roll(tensor, shift, axis).eval()\n\n\nif __name__ == \"__main__\":\n test_lib.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `tf.data.experimental.prefetch_to_device()`.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.data.experimental.ops import prefetching_ops\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import test\n\n\nclass PrefetchToDeviceTest(test_base.DatasetTestBase):\n\n def testPrefetchToDevice(self):\n host_dataset = dataset_ops.Dataset.range(10)\n device_dataset = host_dataset.apply(\n prefetching_ops.prefetch_to_device(\"/cpu:1\"))\n\n # NOTE(mrry): This device block creates the \"host\" dataset and iterator on\n # /cpu:0, and ensures that the prefetching is across devices. In typical use\n # this would not be necessary, because the GPU device would not support any\n # of the dataset-related ops.\n with ops.device(\"/cpu:0\"):\n iterator = device_dataset.make_one_shot_iterator()\n\n self.assertEqual(host_dataset.output_types, device_dataset.output_types)\n self.assertEqual(host_dataset.output_types, iterator.output_types)\n self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)\n self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)\n self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)\n self.assertEqual(host_dataset.output_classes, iterator.output_classes)\n\n next_element = iterator.get_next()\n self.assertEqual(dtypes.int64, next_element.dtype)\n self.assertEqual([], next_element.shape)\n\n worker_config = config_pb2.ConfigProto(device_count={\"CPU\": 2})\n with self.test_session(config=worker_config) as sess:\n for i in range(10):\n self.assertEqual(i, sess.run(next_element))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(next_element)\n\n def testPrefetchToSameDevice(self):\n host_dataset = dataset_ops.Dataset.range(10)\n device_dataset = host_dataset.apply(\n prefetching_ops.prefetch_to_device(\n \"/job:localhost/replica:0/task:0/device:CPU:0\"))\n\n # NOTE(mrry): This device block creates the \"host\" dataset and iterator on\n # /cpu:0, and ensures that the prefetching is across devices. In typical use\n # this would not be necessary, because the GPU device would not support any\n # of the dataset-related ops.\n with ops.device(\"/cpu:0\"):\n iterator = device_dataset.make_one_shot_iterator()\n\n self.assertEqual(host_dataset.output_types, device_dataset.output_types)\n self.assertEqual(host_dataset.output_types, iterator.output_types)\n self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)\n self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)\n self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)\n self.assertEqual(host_dataset.output_classes, iterator.output_classes)\n\n next_element = iterator.get_next()\n self.assertEqual(dtypes.int64, next_element.dtype)\n self.assertEqual([], next_element.shape)\n\n with self.cached_session() as sess:\n for i in range(10):\n self.assertEqual(i, sess.run(next_element))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(next_element)\n\n def testPrefetchDictToDevice(self):\n host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {\"a\": x})\n device_dataset = host_dataset.apply(\n prefetching_ops.prefetch_to_device(\"/cpu:1\"))\n\n # NOTE(mrry): This device block creates the \"host\" dataset and iterator on\n # /cpu:0, and ensures that the prefetching is across devices. In typical use\n # this would not be necessary, because the GPU device would not support any\n # of the dataset-related ops.\n with ops.device(\"/cpu:0\"):\n iterator = device_dataset.make_one_shot_iterator()\n\n self.assertEqual(host_dataset.output_types, device_dataset.output_types)\n self.assertEqual(host_dataset.output_types, iterator.output_types)\n self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)\n self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)\n self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)\n self.assertEqual(host_dataset.output_classes, iterator.output_classes)\n\n next_element = iterator.get_next()\n self.assertEqual(dtypes.int64, next_element[\"a\"].dtype)\n self.assertEqual([], next_element[\"a\"].shape)\n\n worker_config = config_pb2.ConfigProto(device_count={\"CPU\": 2})\n with self.test_session(config=worker_config) as sess:\n for i in range(10):\n self.assertEqual({\"a\": i}, sess.run(next_element))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(next_element)\n\n def testPrefetchSparseTensorsToDevice(self):\n def make_tensor(i):\n return sparse_tensor.SparseTensorValue(\n indices=[[0, 0]], values=(i*[1]), dense_shape=[2, 2])\n host_dataset = dataset_ops.Dataset.range(10).map(make_tensor)\n\n device_dataset = host_dataset.apply(\n prefetching_ops.prefetch_to_device(\"/cpu:1\"))\n\n # NOTE(mrry): This device block creates the \"host\" dataset and iterator on\n # /cpu:0, and ensures that the prefetching is across devices. In typical use\n # this would not be necessary, because the GPU device would not support any\n # of the dataset-related ops.\n with ops.device(\"/cpu:0\"):\n iterator = device_dataset.make_one_shot_iterator()\n\n self.assertEqual(host_dataset.output_types, device_dataset.output_types)\n self.assertEqual(host_dataset.output_types, iterator.output_types)\n self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)\n self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)\n self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)\n self.assertEqual(host_dataset.output_classes, iterator.output_classes)\n\n next_element = iterator.get_next()\n self.assertEqual(dtypes.int64, next_element.dtype)\n\n worker_config = config_pb2.ConfigProto(device_count={\"CPU\": 2})\n with self.test_session(config=worker_config) as sess:\n for i in range(10):\n actual = sess.run(next_element)\n self.assertAllEqual([i], actual.values)\n self.assertAllEqual([[0, 0]], actual.indices)\n self.assertAllEqual([2, 2], actual.dense_shape)\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(next_element)\n\n def testPrefetchToDeviceGpu(self):\n if not test_util.is_gpu_available():\n self.skipTest(\"No GPU available\")\n\n host_dataset = dataset_ops.Dataset.range(10)\n device_dataset = host_dataset.apply(\n prefetching_ops.prefetch_to_device(\"/gpu:0\"))\n\n iterator = device_dataset.make_one_shot_iterator()\n next_element = iterator.get_next()\n\n with self.cached_session() as sess:\n for i in range(10):\n self.assertEqual(i, sess.run(next_element))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(next_element)\n\n def testPrefetchToDeviceWithReInit(self):\n host_dataset = dataset_ops.Dataset.range(10)\n device_dataset = host_dataset.apply(\n prefetching_ops.prefetch_to_device(\"/cpu:1\"))\n\n # NOTE(mrry): This device block creates the \"host\" dataset and iterator on\n # /cpu:0, and ensures that the prefetching is across devices. In typical use\n # this would not be necessary, because the GPU device would not support any\n # of the dataset-related ops.\n with ops.device(\"/cpu:0\"):\n iterator = device_dataset.make_initializable_iterator()\n\n self.assertEqual(host_dataset.output_types, device_dataset.output_types)\n self.assertEqual(host_dataset.output_types, iterator.output_types)\n self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes)\n self.assertEqual(host_dataset.output_shapes, iterator.output_shapes)\n self.assertEqual(host_dataset.output_classes, device_dataset.output_classes)\n self.assertEqual(host_dataset.output_classes, iterator.output_classes)\n\n next_element = iterator.get_next()\n self.assertEqual(dtypes.int64, next_element.dtype)\n self.assertEqual([], next_element.shape)\n\n worker_config = config_pb2.ConfigProto(device_count={\"CPU\": 2})\n with self.test_session(config=worker_config) as sess:\n sess.run(iterator.initializer)\n for i in range(5):\n self.assertEqual(i, sess.run(next_element))\n sess.run(iterator.initializer)\n for i in range(10):\n self.assertEqual(i, sess.run(next_element))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(next_element)\n\n def testPrefetchToDeviceGpuWithReInit(self):\n if not test_util.is_gpu_available():\n self.skipTest(\"No GPU available\")\n\n host_dataset = dataset_ops.Dataset.range(10)\n device_dataset = host_dataset.apply(\n prefetching_ops.prefetch_to_device(\"/gpu:0\"))\n\n iterator = device_dataset.make_initializable_iterator()\n next_element = iterator.get_next()\n\n with self.cached_session() as sess:\n sess.run(iterator.initializer)\n for i in range(5):\n self.assertEqual(i, sess.run(next_element))\n sess.run(iterator.initializer)\n for i in range(10):\n self.assertEqual(i, sess.run(next_element))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(next_element)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for Proximal Gradient Descent operations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import gradient_descent\nfrom tensorflow.python.training import proximal_gradient_descent\n\n\nclass ProximalGradientDescentOptimizerTest(test.TestCase):\n\n def doTestProximalGradientDescentwithoutRegularization(\n self, use_resource=False):\n with self.cached_session() as sess:\n if use_resource:\n var0 = resource_variable_ops.ResourceVariable([0.0, 0.0])\n var1 = resource_variable_ops.ResourceVariable([0.0, 0.0])\n else:\n var0 = variables.Variable([0.0, 0.0])\n var1 = variables.Variable([0.0, 0.0])\n grads0 = constant_op.constant([0.1, 0.2])\n grads1 = constant_op.constant([0.01, 0.02])\n opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(\n 3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n variables.global_variables_initializer().run()\n\n v0_val, v1_val = sess.run([var0, var1])\n self.assertAllClose([0.0, 0.0], v0_val)\n self.assertAllClose([0.0, 0.0], v1_val)\n\n # Run 3 steps Proximal Gradient Descent.\n for _ in range(3):\n update.run()\n\n v0_val, v1_val = sess.run([var0, var1])\n self.assertAllClose(np.array([-0.9, -1.8]), v0_val)\n self.assertAllClose(np.array([-0.09, -0.18]), v1_val)\n\n def testProximalGradientDescentwithoutRegularization(self):\n self.doTestProximalGradientDescentwithoutRegularization(use_resource=False)\n\n def testResourceProximalGradientDescentwithoutRegularization(self):\n self.doTestProximalGradientDescentwithoutRegularization(use_resource=True)\n\n def testProximalGradientDescentwithoutRegularization2(self):\n with self.cached_session() as sess:\n var0 = variables.Variable([1.0, 2.0])\n var1 = variables.Variable([4.0, 3.0])\n grads0 = constant_op.constant([0.1, 0.2])\n grads1 = constant_op.constant([0.01, 0.02])\n\n opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(\n 3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n variables.global_variables_initializer().run()\n\n v0_val, v1_val = sess.run([var0, var1])\n self.assertAllClose([1.0, 2.0], v0_val)\n self.assertAllClose([4.0, 3.0], v1_val)\n\n # Run 3 steps Proximal Gradient Descent\n for _ in range(3):\n update.run()\n\n v0_val, v1_val = sess.run([var0, var1])\n self.assertAllClose(np.array([0.1, 0.2]), v0_val)\n self.assertAllClose(np.array([3.91, 2.82]), v1_val)\n\n def testMinimizeSparseResourceVariable(self):\n for dtype in [dtypes.float32, dtypes.float64]:\n with self.cached_session():\n var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)\n x = constant_op.constant([[4.0], [5.0]], dtype=dtype)\n pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)\n loss = pred * pred\n sgd_op = proximal_gradient_descent.ProximalGradientDescentOptimizer(\n 1.0).minimize(loss)\n variables.global_variables_initializer().run()\n # Fetch params to validate initial values\n self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())\n # Run 1 step of sgd\n sgd_op.run()\n # Validate updated params\n self.assertAllCloseAccordingToType(\n [[-111, -138]], var0.eval(), atol=0.01)\n\n def testProximalGradientDescentWithL1_L2(self):\n with self.cached_session() as sess:\n var0 = variables.Variable([1.0, 2.0])\n var1 = variables.Variable([4.0, 3.0])\n grads0 = constant_op.constant([0.1, 0.2])\n grads1 = constant_op.constant([0.01, 0.02])\n\n opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(\n 3.0, l1_regularization_strength=0.001, l2_regularization_strength=2.0)\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n variables.global_variables_initializer().run()\n\n v0_val, v1_val = sess.run([var0, var1])\n self.assertAllClose([1.0, 2.0], v0_val)\n self.assertAllClose([4.0, 3.0], v1_val)\n\n # Run 10 steps Proximal Gradient Descent\n for _ in range(10):\n update.run()\n\n v0_val, v1_val = sess.run([var0, var1])\n self.assertAllClose(np.array([-0.0495, -0.0995]), v0_val)\n self.assertAllClose(np.array([-0.0045, -0.0095]), v1_val)\n\n def applyOptimizer(self, opt, steps=5, is_sparse=False):\n if is_sparse:\n var0 = variables.Variable([[1.0], [2.0]])\n var1 = variables.Variable([[3.0], [4.0]])\n grads0 = ops.IndexedSlices(\n constant_op.constant(\n [0.1], shape=[1, 1]),\n constant_op.constant([0]),\n constant_op.constant([2, 1]))\n grads1 = ops.IndexedSlices(\n constant_op.constant(\n [0.02], shape=[1, 1]),\n constant_op.constant([1]),\n constant_op.constant([2, 1]))\n else:\n var0 = variables.Variable([1.0, 2.0])\n var1 = variables.Variable([3.0, 4.0])\n grads0 = constant_op.constant([0.1, 0.2])\n grads1 = constant_op.constant([0.01, 0.02])\n\n update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n variables.global_variables_initializer().run()\n\n sess = ops.get_default_session()\n v0_val, v1_val = sess.run([var0, var1])\n if is_sparse:\n self.assertAllClose([[1.0], [2.0]], v0_val)\n self.assertAllClose([[3.0], [4.0]], v1_val)\n else:\n self.assertAllClose([1.0, 2.0], v0_val)\n self.assertAllClose([3.0, 4.0], v1_val)\n\n # Run ProximalAdagrad for a few steps\n for _ in range(steps):\n update.run()\n\n v0_val, v1_val = sess.run([var0, var1])\n return v0_val, v1_val\n\n def testEquivSparseGradientDescentwithoutRegularization(self):\n with self.cached_session():\n val0, val1 = self.applyOptimizer(\n proximal_gradient_descent.ProximalGradientDescentOptimizer(\n 3.0,\n l1_regularization_strength=0.0,\n l2_regularization_strength=0.0),\n is_sparse=True)\n\n with self.cached_session():\n val2, val3 = self.applyOptimizer(\n gradient_descent.GradientDescentOptimizer(3.0), is_sparse=True)\n\n self.assertAllClose(val0, val2)\n self.assertAllClose(val1, val3)\n\n def testEquivGradientDescentwithoutRegularization(self):\n with self.cached_session():\n val0, val1 = self.applyOptimizer(\n proximal_gradient_descent.ProximalGradientDescentOptimizer(\n 3.0,\n l1_regularization_strength=0.0,\n l2_regularization_strength=0.0))\n\n with self.cached_session():\n val2, val3 = self.applyOptimizer(\n gradient_descent.GradientDescentOptimizer(3.0))\n\n self.assertAllClose(val0, val2)\n self.assertAllClose(val1, val3)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n# ==============================================================================\n\"\"\"Tests for SequenceFileDataset.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom tensorflow.contrib.hadoop.python.ops import hadoop_dataset_ops\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.platform import resource_loader\nfrom tensorflow.python.platform import test\n\n\nclass SequenceFileDatasetTest(test.TestCase):\n\n def test_sequence_file_dataset(self):\n \"\"\"Test case for SequenceFileDataset.\n\n The file is generated with `org.apache.hadoop.io.Text` for key/value.\n There are 25 records in the file with the format of:\n key = XXX\n value = VALUEXXX\n where XXX is replaced as the line number (starts with 001).\n \"\"\"\n filename = os.path.join(resource_loader.get_data_files_path(),\n \"testdata\", \"string.seq\")\n\n filenames = constant_op.constant([filename], dtypes.string)\n num_repeats = 2\n\n dataset = hadoop_dataset_ops.SequenceFileDataset(filenames).repeat(\n num_repeats)\n iterator = dataset.make_initializable_iterator()\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n with self.cached_session() as sess:\n sess.run(init_op)\n for _ in range(num_repeats): # Dataset is repeated.\n for i in range(25): # 25 records.\n v0 = b\"%03d\" % (i + 1)\n v1 = b\"VALUE%03d\" % (i + 1)\n self.assertEqual((v0, v1), sess.run(get_next))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the swig wrapper tf_optimizer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import meta_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.grappler import tf_optimizer\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import training as train\n\n\nclass MemoryOptimizerSwapTest(test.TestCase):\n \"\"\"Tests the Grappler memory optimizer.\"\"\"\n\n def testNoSwapping(self):\n \"\"\"Make sure the graph is preserved when there is nothing to swap.\"\"\"\n a = variables.VariableV1(10, name='a')\n b = variables.VariableV1(20, name='b')\n c = math_ops.add_n([a, b], name='c')\n d = math_ops.add_n([b, c], name='d')\n train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)\n train_op.append(d)\n mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())\n graph_size = len(mg.graph_def.node)\n nodes = [node.name for node in mg.graph_def.node]\n\n rewriter_config = rewriter_config_pb2.RewriterConfig(\n disable_model_pruning=True,\n constant_folding=rewriter_config_pb2.RewriterConfig.OFF,\n memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL)\n graph = tf_optimizer.OptimizeGraph(rewriter_config, mg)\n\n self.assertEqual(len(graph.node), graph_size)\n self.assertItemsEqual([node.name for node in graph.node], nodes)\n\n def testSimpleSwap(self):\n \"\"\"Check that the swap annotations are followed.\"\"\"\n a = variables.VariableV1(10, name='a')\n b = variables.VariableV1(20, name='b')\n c = math_ops.add_n([a, b], name='c')\n d = math_ops.add_n([b, c], name='d')\n train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)\n train_op.append(d)\n\n d.op._set_attr('_swap_to_host', attr_value_pb2.AttrValue(i=0))\n\n mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())\n graph_size = len(mg.graph_def.node)\n\n rewriter_config = rewriter_config_pb2.RewriterConfig(\n disable_model_pruning=True,\n meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.ONE,\n constant_folding=rewriter_config_pb2.RewriterConfig.OFF,\n memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL,\n min_graph_nodes=-1)\n graph = tf_optimizer.OptimizeGraph(rewriter_config, mg)\n\n self.assertEqual(len(graph.node), graph_size + 2)\n self.assertTrue(\n set([node.name for node in graph.node]) > set(\n ['a', 'b', 'c', 'd', 'swap_in_d_0', 'swap_out_d_0']))\n for node in graph.node:\n if node.name == 'swap_in_d_0':\n self.assertEqual('swap_out_d_0', node.input[0])\n self.assertEqual('^b/read', node.input[1])\n elif node.name == 'swap_out_d_0':\n self.assertEqual('b/read', node.input[0])\n elif node.name == 'd':\n self.assertEqual('swap_in_d_0', node.input[0])\n self.assertEqual('c', node.input[1])\n\n\nclass MemoryOptimizerRecomputeTest(test.TestCase):\n \"\"\"Tests the Python interface to recomputation rewrites.\n\n See core/grappler/optimizers/memory_optimizer_test.cc for functional tests.\n \"\"\"\n\n def _GetMetaGraph(self, batch_size=14, image_dim=12, optimizer_scope_name=''):\n \"\"\"A simple layered graph with conv, an intermediate op, and a ReLU.\"\"\"\n graph = ops.Graph()\n with graph.as_default():\n random_seed.set_random_seed(1)\n current_activation = variable_scope.get_variable(\n name='start', shape=[batch_size, image_dim, image_dim, 5])\n conv_filter = variable_scope.get_variable(\n name='filter', shape=[5, 5, 5, 5])\n for layer_number in range(10):\n with variable_scope.variable_scope('layer_{}'.format(layer_number)):\n after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1],\n 'SAME')\n current_activation = 2. * after_conv\n current_activation = nn.relu(current_activation)\n loss = math_ops.reduce_mean(current_activation)\n with ops.name_scope(optimizer_scope_name):\n optimizer = train.AdamOptimizer(0.001)\n train_op = optimizer.minimize(loss)\n init_op = variables.global_variables_initializer()\n metagraph = train.export_meta_graph()\n return (metagraph, init_op.name, train_op.name, loss.name)\n\n def testRewritingDefaultGradientNames(self):\n \"\"\"Tests that rewriting occurs with default gradient names.\"\"\"\n (original_metagraph, _, _, _) = self._GetMetaGraph()\n rewritten_graph_def = tf_optimizer.OptimizeGraph(\n rewriter_config_pb2.RewriterConfig(\n disable_model_pruning=True,\n constant_folding=rewriter_config_pb2.RewriterConfig.OFF,\n dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF,\n layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF,\n arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,\n min_graph_nodes=-1,\n memory_optimization=rewriter_config_pb2.RewriterConfig.\n RECOMPUTATION_HEURISTICS), original_metagraph)\n self.assertGreater(\n len(rewritten_graph_def.node),\n len(original_metagraph.graph_def.node))\n self.assertEqual(\n 0,\n len([node for node in original_metagraph.graph_def.node\n if 'Recomputed/' in node.name]))\n self.assertEqual(\n 20, # Two per layer\n len([node for node in rewritten_graph_def.node\n if 'Recomputed/' in node.name]))\n\n def testRewritingNameScopedGradientNames(self):\n \"\"\"Tests that rewriting occurs with non-standard gradient names.\"\"\"\n (original_metagraph, _, _, _) = self._GetMetaGraph(\n optimizer_scope_name='optimizer')\n rewritten_graph_def = tf_optimizer.OptimizeGraph(\n rewriter_config_pb2.RewriterConfig(\n disable_model_pruning=True,\n constant_folding=rewriter_config_pb2.RewriterConfig.OFF,\n dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF,\n layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF,\n arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,\n min_graph_nodes=-1,\n memory_optimization=rewriter_config_pb2.RewriterConfig.\n RECOMPUTATION_HEURISTICS,\n # Checks that name scope \"gradients/\" also match sub-scope.\n memory_optimizer_target_node_name_scope='gradients/'),\n original_metagraph)\n self.assertGreater(\n len(rewritten_graph_def.node),\n len(original_metagraph.graph_def.node))\n self.assertEqual(\n 0,\n len([node for node in original_metagraph.graph_def.node\n if 'Recomputed/' in node.name]))\n self.assertEqual(\n 20, # Two per layer\n len([node for node in rewritten_graph_def.node\n if 'Recomputed/' in node.name]))\n\n def testRewritingNameScopedGradientNamesScope(self):\n \"\"\"Tests that rewriting occurs with non-standard gradient names.\"\"\"\n (original_metagraph, _, _,\n _) = self._GetMetaGraph(optimizer_scope_name='foo/bar')\n rewritten_graph_def = tf_optimizer.OptimizeGraph(\n rewriter_config_pb2.RewriterConfig(\n disable_model_pruning=True,\n constant_folding=rewriter_config_pb2.RewriterConfig.OFF,\n dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF,\n layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF,\n arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,\n memory_optimization=rewriter_config_pb2.RewriterConfig.\n RECOMPUTATION_HEURISTICS,\n # This should not match anything.\n memory_optimizer_target_node_name_scope='r/gradients/'),\n original_metagraph)\n self.assertEqual(\n len(rewritten_graph_def.node), len(original_metagraph.graph_def.node))\n self.assertEqual(0,\n len([\n node for node in original_metagraph.graph_def.node\n if 'Recomputed/' in node.name\n ]))\n self.assertEqual(0,\n len([\n node for node in rewritten_graph_def.node\n if 'Recomputed/' in node.name\n ]))\n\n def _GetMemoryOptimizerSessionConfig(self):\n rewrite_options = rewriter_config_pb2.RewriterConfig(\n disable_model_pruning=True,\n memory_optimization=rewriter_config_pb2.RewriterConfig.HEURISTICS)\n graph_options = config_pb2.GraphOptions(rewrite_options=rewrite_options)\n return config_pb2.ConfigProto(graph_options=graph_options)\n\n def _RunMetaGraphWithConfig(\n self, config, metagraph, init_op_name, train_op_name, loss_op_name):\n graph = ops.Graph()\n with graph.as_default():\n train.import_meta_graph(metagraph)\n init_op = graph.get_operation_by_name(init_op_name)\n train_op = graph.get_operation_by_name(train_op_name)\n loss_op = graph.get_tensor_by_name(loss_op_name)\n with session.Session(config=config, graph=graph) as sess:\n sess.run(init_op)\n sess.run(train_op)\n sess.run(train_op)\n return sess.run(loss_op)\n\n def testRecomputationRewritingNoErrors(self):\n \"\"\"Tests that graph output is not significantly different with rewriting.\"\"\"\n (original_metagraph, init_op_name, train_op_name, loss_op_name\n ) = self._GetMetaGraph()\n original_loss = self._RunMetaGraphWithConfig(\n config=config_pb2.ConfigProto(),\n metagraph=original_metagraph,\n init_op_name=init_op_name,\n train_op_name=train_op_name,\n loss_op_name=loss_op_name)\n memory_optimized_loss = self._RunMetaGraphWithConfig(\n config=self._GetMemoryOptimizerSessionConfig(),\n metagraph=original_metagraph,\n init_op_name=init_op_name,\n train_op_name=train_op_name,\n loss_op_name=loss_op_name)\n self.assertAllClose(original_loss, memory_optimized_loss, rtol=1e-2)\n\n def _annotated_graph(self):\n graph = ops.Graph()\n with graph.as_default():\n random_seed.set_random_seed(2)\n current_activation = variable_scope.get_variable(\n name='start', shape=[1, 2, 2, 5])\n conv_filter = variable_scope.get_variable(\n name='filter', shape=[5, 5, 5, 5])\n for layer_number in range(3):\n with variable_scope.variable_scope('layer_{}'.format(layer_number)):\n after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1],\n 'SAME')\n current_activation = 2. * after_conv\n current_activation.op._set_attr(\n '_recompute_hint',\n # The value of the attribute does not matter; just that the key\n # exists in the op's attributes.\n attr_value_pb2.AttrValue(i=1))\n current_activation += 5.\n current_activation.op._set_attr(\n '_recompute_hint', attr_value_pb2.AttrValue(i=0))\n current_activation = nn.relu(current_activation)\n current_activation.op._set_attr(\n '_recompute_hint', attr_value_pb2.AttrValue(i=1))\n loss = math_ops.reduce_mean(current_activation)\n optimizer = train.AdamOptimizer(0.001)\n train_op = optimizer.minimize(loss)\n init_op = variables.global_variables_initializer()\n return graph, init_op, train_op\n\n def testHintNoMetaGraph(self):\n # Closer to expected usage, but does not check that a re-write actually\n # happens; see testHintDoesRewrite.\n graph, init_op, train_op = self._annotated_graph()\n with graph.as_default():\n manual_memory_config = rewriter_config_pb2.RewriterConfig(\n memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL)\n graph_options = config_pb2.GraphOptions(\n rewrite_options=manual_memory_config)\n session_config = config_pb2.ConfigProto(graph_options=graph_options)\n with session.Session(config=session_config) as sess:\n sess.run(init_op)\n sess.run(train_op)\n\n def testHintDoesRewrite(self):\n graph = self._annotated_graph()[0]\n with graph.as_default():\n metagraph = train.export_meta_graph()\n self.assertEqual(\n 0,\n len([node for node in metagraph.graph_def.node\n if 'Recomputed/' in node.name]))\n rewritten_graph_def = tf_optimizer.OptimizeGraph(\n rewriter_config_pb2.RewriterConfig(\n min_graph_nodes=-1,\n memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL),\n metagraph)\n self.assertEqual(\n 9,\n len([node for node in rewritten_graph_def.node\n if 'Recomputed/' in node.name]))\n\nif __name__ == '__main__':\n test.main()\n"
] | [
[
"tensorflow.python.estimator.canned.head._binary_logistic_head_with_sigmoid_cross_entropy_loss",
"tensorflow.python.util.tf_export.estimator_export",
"tensorflow.python.training.training_util.get_global_step",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.estimator.canned.head._multi_class_head_with_softmax_cross_entropy_loss",
"tensorflow.python.summary.summary.histogram",
"tensorflow.python.estimator.canned.dnn._dnn_logit_fn_builder",
"tensorflow.python.ops.partitioned_variables.min_max_variable_partitioner",
"tensorflow.python.estimator.canned.head._regression_head",
"tensorflow.python.estimator.canned.optimizers.get_optimizer_instance",
"tensorflow.python.ops.nn.zero_fraction",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.estimator.canned.linear._linear_logit_fn_builder"
],
[
"tensorflow.python.ops.gradient_checker.compute_gradient",
"tensorflow.python.ops.manip_ops.roll",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.platform.test.main",
"numpy.random.rand",
"numpy.roll",
"numpy.random.randint"
],
[
"tensorflow.python.framework.sparse_tensor.SparseTensorValue",
"tensorflow.python.data.experimental.ops.prefetching_ops.prefetch_to_device",
"tensorflow.python.data.ops.dataset_ops.Dataset.range",
"tensorflow.python.platform.test.main",
"tensorflow.python.framework.ops.device",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.framework.test_util.is_gpu_available"
],
[
"tensorflow.python.ops.resource_variable_ops.ResourceVariable",
"tensorflow.python.framework.ops.get_default_session",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.embedding_ops.embedding_lookup",
"tensorflow.python.platform.test.main",
"tensorflow.python.training.proximal_gradient_descent.ProximalGradientDescentOptimizer",
"tensorflow.python.ops.variables.global_variables_initializer",
"numpy.array",
"tensorflow.python.training.gradient_descent.GradientDescentOptimizer",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.contrib.hadoop.python.ops.hadoop_dataset_ops.SequenceFileDataset",
"tensorflow.python.platform.resource_loader.get_data_files_path",
"tensorflow.python.platform.test.main",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.grappler.tf_optimizer.OptimizeGraph",
"tensorflow.core.protobuf.config_pb2.GraphOptions",
"tensorflow.python.training.training.import_meta_graph",
"tensorflow.python.ops.nn.conv2d",
"tensorflow.python.framework.ops.get_collection_ref",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.nn.relu",
"tensorflow.python.ops.math_ops.reduce_mean",
"tensorflow.python.ops.variables.VariableV1",
"tensorflow.python.client.session.Session",
"tensorflow.python.training.training.export_meta_graph",
"tensorflow.core.framework.attr_value_pb2.AttrValue",
"tensorflow.python.framework.random_seed.set_random_seed",
"tensorflow.core.protobuf.rewriter_config_pb2.RewriterConfig",
"tensorflow.python.ops.math_ops.add_n",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.training.training.AdamOptimizer",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.core.protobuf.config_pb2.ConfigProto"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"1.7",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"1.12"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
imperial-genomics-facility/data-management-python | [
"7b867d8d4562a49173d0b823bdc4bf374a3688f0",
"7b867d8d4562a49173d0b823bdc4bf374a3688f0"
] | [
"igf_data/igfdb/useradaptor.py",
"test/igf_airflow/calculate_seqrun_file_size_test.py"
] | [
"import pandas as pd\r\nimport json, hashlib, os, codecs, base64\r\nfrom igf_data.igfdb.baseadaptor import BaseAdaptor\r\nfrom igf_data.igfdb.igfTables import User\r\n\r\nclass UserAdaptor(BaseAdaptor):\r\n '''\r\n An adaptor class for table User\r\n '''\r\n def _email_check(self, email):\r\n '''\r\n An internal function to check if email_id has '@' or not\r\n \r\n :param email: a string containing the email id\r\n '''\r\n if '@' not in email:\r\n raise ValueError('Email id {0} is not correctly formatted'.format(email))\r\n\r\n\r\n def _encrypt_password(self, series, password_column='password', \r\n salt_column='encryption_salt', \r\n ht_pass_column='ht_password'):\r\n '''\r\n An internal function for encrypting password\r\n\r\n :param series: A pandas data series\r\n :param password_column: Name of the password column, default password\r\n :param salt_column: Name of the salt column, default encryption_salt\r\n :param ht_pass_column: Name of the ht_password column, default ht_password\r\n :returns: A pandas series\r\n '''\r\n try:\r\n if not isinstance(series, pd.Series):\r\n series=pd.DataFrame(series)\r\n\r\n if password_column in series.index and \\\r\n not pd.isnull(series[password_column]): # password is optional\r\n salt=codecs.encode(os.urandom(32),\"hex\").decode(\"utf-8\") # calculate salt value\r\n password=series[password_column] # fetch password\r\n if not isinstance(password, str):\r\n password=str(series.password_column).encode('utf-8') # encode password if its not a string\r\n\r\n if password: # always encrypt password\r\n ht_pass=\\\r\n '{0}{1}'.format(\\\r\n '{SHA}',\r\n base64.b64encode(\\\r\n hashlib.sha1(password.encode('utf-8')).\\\r\n digest()).decode()) # calculate sha1 for htaccess password\r\n series[ht_pass_column]=ht_pass # set htaccess password\r\n key=salt+password # construct key using salt and password\r\n password=hashlib.sha512(str(key).encode('utf-8')).hexdigest() # create password hash\r\n series[password_column]=password # set hash to data series\r\n series[salt_column]=salt # set salt to data series\r\n return series\r\n except:\r\n raise\r\n\r\n\r\n def _map_missing_user_status(self,data_series,categoty_column,hpc_user_column,\r\n hpc_user,non_hpc_user):\r\n '''\r\n An internal function for assigning user status\r\n\r\n :param data_series: A pandas data series\r\n :param categoty_column: Name of the category column ## FIX TYPO\r\n :param hpc_user_column: Name of the hpc username column\r\n :param hpc_user: HPC user tag\r\n :param non_hpc_user: Non HPC user tag\r\n :returns: A pandas data series\r\n '''\r\n try:\r\n if not isinstance(data_series, pd.Series):\r\n data_series=pd.DataFrame(data_series)\r\n\r\n if categoty_column not in data_series or \\\r\n pd.isnull(data_series[categoty_column]):\r\n if hpc_user_column in data_series and \\\r\n not pd.isnull(data_series[hpc_user_column]) and \\\r\n data_series[hpc_user_column]!='':\r\n data_series[categoty_column]=hpc_user # assign hpc user\r\n else:\r\n data_series[categoty_column]=non_hpc_user # non hpc user\r\n\r\n return data_series\r\n except:\r\n raise\r\n\r\n\r\n def _preprocess_data(self,data, password_column='password', categoty_column='category',\r\n email_column='email_id', hpc_user_column='hpc_username',\r\n hpc_user='HPC_USER', non_hpc_user='NON_HPC_USER',\r\n user_igf_id_column='user_igf_id', username_column='username',\r\n salt_column='encryption_salt'):\r\n '''\r\n An internal function for preprocess data before loading\r\n\r\n :param data: A pamdas dataframe or a list of dictionaries\r\n :param password_column: Name of the password column, default password\r\n :param categoty_column: Name of the user category column, default category\r\n :param email_column: Name of the email id column, default email_id\r\n :param hpc_user_column: Name of the hpc username column, default hpc_username\r\n :param hpc_user: Tag name for HPC user, default HPC_USER\r\n :param non_hpc_user: Tag name for non HPC user, default NON_HPC_USER\r\n :param user_igf_id_column: Name of the user id column, default user_igf_id\r\n :param username_column: Name of the igf username column, default username\r\n :param salt_column: Name of the salt column, default encryption_salt\r\n :returns: A pandas dataframe\r\n '''\r\n try:\r\n if not isinstance(data, pd.DataFrame):\r\n data=pd.DataFrame(data)\r\n\r\n new_data=data.apply(lambda x: self._encrypt_password(series=x),1) # encrypt password\r\n new_data[email_column].map(lambda x: self._email_check(email=x)) # check email id, it should contail '@'\r\n new_data=new_data.fillna('')\r\n if categoty_column not in new_data.columns:\r\n new_data[categoty_column]=None # add category column if it doesn't exists\r\n\r\n new_data.apply(\\\r\n lambda x: self._map_missing_user_status(\\\r\n data_series=x,\r\n categoty_column=categoty_column,\r\n hpc_user_column=hpc_user_column,\r\n hpc_user=hpc_user,\r\n non_hpc_user=non_hpc_user),\r\n axis=1) # assign categoty, if user has hpc_username, then its 'HPC_USER'\r\n return new_data\r\n except:\r\n raise\r\n\r\n\r\n def store_user_data(self, data, autosave=True):\r\n '''\r\n Load data to user table\r\n\r\n :param data: A pandas dataframe\r\n :param autosave: A toggle for autocommit, default True\r\n :returns: None\r\n '''\r\n try:\r\n if not isinstance(data, pd.DataFrame):\r\n data=pd.DataFrame(data)\r\n\r\n data=self._preprocess_data(data=data)\r\n self.store_records(table=User, data=data, mode='serial' )\r\n if autosave:\r\n self.commit_session()\r\n except:\r\n if autosave:\r\n self.rollback_session()\r\n raise\r\n\r\n\r\n def fetch_user_records_igf_id(self, user_igf_id):\r\n '''\r\n A method for fetching data for User table\r\n \r\n :param user_igf_id: an igf id\r\n :returns: user object\r\n '''\r\n try:\r\n user=\\\r\n self.fetch_records_by_column(\\\r\n table=User,\r\n column_name=User.user_igf_id,\r\n column_id=user_igf_id,\r\n output_mode='one' )\r\n return user\r\n except:\r\n raise\r\n\r\n\r\n def fetch_user_records_email_id(self, user_email_id):\r\n '''\r\n A method for fetching data for User table\r\n \r\n :param user_email_id: an email id\r\n :returns: user object\r\n '''\r\n try:\r\n user=\\\r\n self.fetch_records_by_column(\\\r\n table=User,\r\n column_name=User.email_id,\r\n column_id=user_email_id,\r\n output_mode='one' )\r\n return user\r\n except:\r\n raise\r\n\r\n\r\n def check_user_records_email_id(self,email_id):\r\n '''\r\n A method for checking existing user data in db\r\n \r\n :param email_id: An email id\r\n :returns: True if the file is present in db or False if its not\r\n '''\r\n try:\r\n user_check=False\r\n user_obj=\\\r\n self.fetch_records_by_column(\\\r\n table=User,\r\n column_name=User.email_id,\r\n column_id=email_id,\r\n output_mode='one_or_none' )\r\n if user_obj is not None:\r\n user_check=True\r\n return user_check\r\n except:\r\n raise\r\n",
"import unittest,os\nimport pandas as pd\nfrom igf_data.utils.fileutils import get_temp_dir,remove_dir\nfrom igf_airflow.seqrun.calculate_seqrun_file_size import calculate_seqrun_file_list\n\nclass Calculate_seqrun_file_list_testA(unittest.TestCase):\n def setUp(self):\n self.workdir = get_temp_dir()\n self.seqrun_id = 'seqrun1'\n os.mkdir(os.path.join(self.workdir,self.seqrun_id))\n file1 = os.path.join(self.workdir,self.seqrun_id,'Data')\n os.mkdir(file1)\n file1 = os.path.join(file1,'f1')\n self.file1 = os.path.relpath(file1,os.path.join(self.workdir,self.seqrun_id))\n with open(file1,'w') as fp:\n fp.write('ATGC')\n self.file1_size = os.path.getsize(file1)\n file2 = os.path.join(self.workdir,self.seqrun_id,'Thumbnail_Images')\n os.mkdir(file2)\n file2 = os.path.join(file2,'f2')\n self.file2 = os.path.relpath(file2,os.path.join(self.workdir,self.seqrun_id))\n with open(file2,'w') as fp:\n fp.write('CGTA')\n\n def tearDown(self):\n remove_dir(self.workdir)\n\n def test_calculate_seqrun_file_list(self):\n output_dir = get_temp_dir()\n output_json = \\\n calculate_seqrun_file_list(\n seqrun_id=self.seqrun_id,\n seqrun_base_dir=self.workdir,\n output_path=output_dir)\n df = pd.read_json(output_json)\n self.assertTrue('file_path' in df.columns)\n file1_entry = df[df['file_path']==self.file1]\n self.assertEqual(len(file1_entry.index),1)\n self.assertEqual(file1_entry['file_size'].values[0],self.file1_size)\n file2_entry = df[df['file_path']==self.file2]\n self.assertEqual(len(file2_entry.index),0)\n\nif __name__=='__main__':\n unittest.main()"
] | [
[
"pandas.isnull",
"pandas.DataFrame"
],
[
"pandas.read_json"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
TheGupta2012/qiskit-terra | [
"5ea6e9557655b144228c29d7099375f5d2c91120",
"5ea6e9557655b144228c29d7099375f5d2c91120",
"5ea6e9557655b144228c29d7099375f5d2c91120",
"5ea6e9557655b144228c29d7099375f5d2c91120",
"5ea6e9557655b144228c29d7099375f5d2c91120",
"5ea6e9557655b144228c29d7099375f5d2c91120"
] | [
"qiskit/pulse/library/continuous.py",
"test/python/quantum_info/states/test_statevector.py",
"qiskit/quantum_info/synthesis/xx_decompose/circuits.py",
"test/python/quantum_info/operators/test_operator.py",
"test/python/quantum_info/operators/channel/test_choi.py",
"test/python/opflow/test_gradients.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=missing-return-doc, invalid-unary-operand-type\n\n\"\"\"Module for builtin continuous pulse functions.\"\"\"\n\nimport functools\nfrom typing import Union, Tuple, Optional\n\nimport numpy as np\nfrom qiskit.pulse.exceptions import PulseError\n\n\ndef constant(times: np.ndarray, amp: complex) -> np.ndarray:\n \"\"\"Continuous constant pulse.\n\n Args:\n times: Times to output pulse for.\n amp: Complex pulse amplitude.\n \"\"\"\n return np.full(len(times), amp, dtype=np.complex_)\n\n\ndef zero(times: np.ndarray) -> np.ndarray:\n \"\"\"Continuous zero pulse.\n\n Args:\n times: Times to output pulse for.\n \"\"\"\n return constant(times, 0)\n\n\ndef square(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:\n \"\"\"Continuous square wave.\n\n Args:\n times: Times to output wave for.\n amp: Pulse amplitude. Wave range is [-amp, amp].\n freq: Pulse frequency. units of 1/dt.\n phase: Pulse phase.\n \"\"\"\n x = times * freq + phase / np.pi\n return amp * (2 * (2 * np.floor(x) - np.floor(2 * x)) + 1).astype(np.complex_)\n\n\ndef sawtooth(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:\n \"\"\"Continuous sawtooth wave.\n\n Args:\n times: Times to output wave for.\n amp: Pulse amplitude. Wave range is [-amp, amp].\n freq: Pulse frequency. units of 1/dt.\n phase: Pulse phase.\n \"\"\"\n x = times * freq + phase / np.pi\n return amp * 2 * (x - np.floor(1 / 2 + x)).astype(np.complex_)\n\n\ndef triangle(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:\n \"\"\"Continuous triangle wave.\n\n Args:\n times: Times to output wave for.\n amp: Pulse amplitude. Wave range is [-amp, amp].\n freq: Pulse frequency. units of 1/dt.\n phase: Pulse phase.\n \"\"\"\n return amp * (-2 * np.abs(sawtooth(times, 1, freq, phase=(phase - np.pi / 2) / 2)) + 1).astype(\n np.complex_\n )\n\n\ndef cos(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:\n \"\"\"Continuous cosine wave.\n\n Args:\n times: Times to output wave for.\n amp: Pulse amplitude.\n freq: Pulse frequency, units of 1/dt.\n phase: Pulse phase.\n \"\"\"\n return amp * np.cos(2 * np.pi * freq * times + phase).astype(np.complex_)\n\n\ndef sin(times: np.ndarray, amp: complex, freq: float, phase: float = 0) -> np.ndarray:\n \"\"\"Continuous cosine wave.\n\n Args:\n times: Times to output wave for.\n amp: Pulse amplitude.\n freq: Pulse frequency, units of 1/dt.\n phase: Pulse phase.\n \"\"\"\n return amp * np.sin(2 * np.pi * freq * times + phase).astype(np.complex_)\n\n\ndef _fix_gaussian_width(\n gaussian_samples,\n amp: float,\n center: float,\n sigma: float,\n zeroed_width: Optional[float] = None,\n rescale_amp: bool = False,\n ret_scale_factor: bool = False,\n) -> np.ndarray:\n r\"\"\"Enforce that the supplied gaussian pulse is zeroed at a specific width.\n\n This is achieved by subtracting $\\Omega_g(center \\pm zeroed_width/2)$ from all samples.\n\n amp: Pulse amplitude at `center`.\n center: Center (mean) of pulse.\n sigma: Standard deviation of pulse.\n zeroed_width: Subtract baseline from gaussian pulses to make sure\n $\\Omega_g(center \\pm zeroed_width/2)=0$ is satisfied. This is used to avoid\n large discontinuities at the start of a gaussian pulse. If unsupplied,\n defaults to $2*(center + 1)$ such that $\\Omega_g(-1)=0$ and $\\Omega_g(2*(center + 1))=0$.\n rescale_amp: If True the pulse will be rescaled so that $\\Omega_g(center)=amp$.\n ret_scale_factor: Return amplitude scale factor.\n \"\"\"\n if zeroed_width is None:\n zeroed_width = 2 * (center + 1)\n\n zero_offset = gaussian(np.array([zeroed_width / 2]), amp, 0, sigma)\n gaussian_samples -= zero_offset\n amp_scale_factor = 1.0\n if rescale_amp:\n amp_scale_factor = amp / (amp - zero_offset) if amp - zero_offset != 0 else 1.0\n gaussian_samples *= amp_scale_factor\n\n if ret_scale_factor:\n return gaussian_samples, amp_scale_factor\n return gaussian_samples\n\n\ndef gaussian(\n times: np.ndarray,\n amp: complex,\n center: float,\n sigma: float,\n zeroed_width: Optional[float] = None,\n rescale_amp: bool = False,\n ret_x: bool = False,\n) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:\n r\"\"\"Continuous unnormalized gaussian pulse.\n\n Integrated area under curve is $\\Omega_g(amp, sigma) = amp \\times np.sqrt(2\\pi \\sigma^2)$\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude at `center`. If `zeroed_width` is set pulse amplitude at center\n will be $amp-\\Omega_g(center \\pm zeroed_width/2)$ unless `rescale_amp` is set,\n in which case all samples will be rescaled such that the center\n amplitude will be `amp`.\n center: Center (mean) of pulse.\n sigma: Width (standard deviation) of pulse.\n zeroed_width: Subtract baseline from gaussian pulses to make sure\n $\\Omega_g(center \\pm zeroed_width/2)=0$ is satisfied. This is used to avoid\n large discontinuities at the start of a gaussian pulse.\n rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will\n be rescaled so that $\\Omega_g(center)=amp$.\n ret_x: Return centered and standard deviation normalized pulse location.\n $x=(times-center)/sigma.\n \"\"\"\n times = np.asarray(times, dtype=np.complex_)\n x = (times - center) / sigma\n gauss = amp * np.exp(-(x ** 2) / 2).astype(np.complex_)\n\n if zeroed_width is not None:\n gauss = _fix_gaussian_width(\n gauss,\n amp=amp,\n center=center,\n sigma=sigma,\n zeroed_width=zeroed_width,\n rescale_amp=rescale_amp,\n )\n\n if ret_x:\n return gauss, x\n return gauss\n\n\ndef gaussian_deriv(\n times: np.ndarray,\n amp: complex,\n center: float,\n sigma: float,\n ret_gaussian: bool = False,\n zeroed_width: Optional[float] = None,\n rescale_amp: bool = False,\n) -> np.ndarray:\n r\"\"\"Continuous unnormalized gaussian derivative pulse.\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude at `center`.\n center: Center (mean) of pulse.\n sigma: Width (standard deviation) of pulse.\n ret_gaussian: Return gaussian with which derivative was taken with.\n zeroed_width: Subtract baseline of pulse to make sure\n $\\Omega_g(center \\pm zeroed_width/2)=0$ is satisfied. This is used to avoid\n large discontinuities at the start of a pulse.\n rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will\n be rescaled so that $\\Omega_g(center)=amp$.\n \"\"\"\n gauss, x = gaussian(\n times,\n amp=amp,\n center=center,\n sigma=sigma,\n zeroed_width=zeroed_width,\n rescale_amp=rescale_amp,\n ret_x=True,\n )\n gauss_deriv = -x / sigma * gauss\n if ret_gaussian:\n return gauss_deriv, gauss\n return gauss_deriv\n\n\ndef _fix_sech_width(\n sech_samples,\n amp: float,\n center: float,\n sigma: float,\n zeroed_width: Optional[float] = None,\n rescale_amp: bool = False,\n ret_scale_factor: bool = False,\n) -> np.ndarray:\n r\"\"\"Enforce that the supplied sech pulse is zeroed at a specific width.\n\n This is achieved by subtracting $\\Omega_g(center \\pm zeroed_width/2)$ from all samples.\n\n amp: Pulse amplitude at `center`.\n center: Center (mean) of pulse.\n sigma: Standard deviation of pulse.\n zeroed_width: Subtract baseline from sech pulses to make sure\n $\\Omega_g(center \\pm zeroed_width/2)=0$ is satisfied. This is used to avoid\n large discontinuities at the start of a sech pulse. If unsupplied,\n defaults to $2*(center + 1)$ such that $\\Omega_g(-1)=0$ and $\\Omega_g(2*(center + 1))=0$.\n rescale_amp: If True the pulse will be rescaled so that $\\Omega_g(center)=amp$.\n ret_scale_factor: Return amplitude scale factor.\n \"\"\"\n if zeroed_width is None:\n zeroed_width = 2 * (center + 1)\n\n zero_offset = sech(np.array([zeroed_width / 2]), amp, 0, sigma)\n sech_samples -= zero_offset\n amp_scale_factor = 1.0\n if rescale_amp:\n amp_scale_factor = amp / (amp - zero_offset) if amp - zero_offset != 0 else 1.0\n sech_samples *= amp_scale_factor\n\n if ret_scale_factor:\n return sech_samples, amp_scale_factor\n return sech_samples\n\n\ndef sech_fn(x, *args, **kwargs):\n r\"\"\"Hyperbolic secant function\"\"\"\n return 1.0 / np.cosh(x, *args, **kwargs)\n\n\ndef sech(\n times: np.ndarray,\n amp: complex,\n center: float,\n sigma: float,\n zeroed_width: Optional[float] = None,\n rescale_amp: bool = False,\n ret_x: bool = False,\n) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:\n r\"\"\"Continuous unnormalized sech pulse.\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude at `center`.\n center: Center (mean) of pulse.\n sigma: Width (standard deviation) of pulse.\n zeroed_width: Subtract baseline from pulse to make sure\n $\\Omega_g(center \\pm zeroed_width/2)=0$ is satisfied. This is used to avoid\n large discontinuities at the start and end of the pulse.\n rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will\n be rescaled so that $\\Omega_g(center)=amp$.\n ret_x: Return centered and standard deviation normalized pulse location.\n $x=(times-center)/sigma$.\n \"\"\"\n times = np.asarray(times, dtype=np.complex_)\n x = (times - center) / sigma\n sech_out = amp * sech_fn(x).astype(np.complex_)\n\n if zeroed_width is not None:\n sech_out = _fix_sech_width(\n sech_out,\n amp=amp,\n center=center,\n sigma=sigma,\n zeroed_width=zeroed_width,\n rescale_amp=rescale_amp,\n )\n\n if ret_x:\n return sech_out, x\n return sech_out\n\n\ndef sech_deriv(\n times: np.ndarray, amp: complex, center: float, sigma: float, ret_sech: bool = False\n) -> np.ndarray:\n \"\"\"Continuous unnormalized sech derivative pulse.\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude at `center`.\n center: Center (mean) of pulse.\n sigma: Width (standard deviation) of pulse.\n ret_sech: Return sech with which derivative was taken with.\n \"\"\"\n sech_out, x = sech(times, amp=amp, center=center, sigma=sigma, ret_x=True)\n sech_out_deriv = -sech_out * np.tanh(x) / sigma\n if ret_sech:\n return sech_out_deriv, sech_out\n return sech_out_deriv\n\n\ndef gaussian_square(\n times: np.ndarray,\n amp: complex,\n center: float,\n square_width: float,\n sigma: float,\n zeroed_width: Optional[float] = None,\n) -> np.ndarray:\n r\"\"\"Continuous gaussian square pulse.\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude.\n center: Center of the square pulse component.\n square_width: Width of the square pulse component.\n sigma: Standard deviation of Gaussian rise/fall portion of the pulse.\n zeroed_width: Subtract baseline of gaussian square pulse\n to enforce $\\OmegaSquare(center \\pm zeroed_width/2)=0$.\n\n Raises:\n PulseError: if zeroed_width is not compatible with square_width.\n \"\"\"\n square_start = center - square_width / 2\n square_stop = center + square_width / 2\n if zeroed_width:\n if zeroed_width < square_width:\n raise PulseError(\"zeroed_width cannot be smaller than square_width.\")\n gaussian_zeroed_width = zeroed_width - square_width\n else:\n gaussian_zeroed_width = None\n\n funclist = [\n functools.partial(\n gaussian,\n amp=amp,\n center=square_start,\n sigma=sigma,\n zeroed_width=gaussian_zeroed_width,\n rescale_amp=True,\n ),\n functools.partial(\n gaussian,\n amp=amp,\n center=square_stop,\n sigma=sigma,\n zeroed_width=gaussian_zeroed_width,\n rescale_amp=True,\n ),\n functools.partial(constant, amp=amp),\n ]\n condlist = [times <= square_start, times >= square_stop]\n return np.piecewise(times.astype(np.complex_), condlist, funclist)\n\n\ndef drag(\n times: np.ndarray,\n amp: complex,\n center: float,\n sigma: float,\n beta: float,\n zeroed_width: Optional[float] = None,\n rescale_amp: bool = False,\n) -> np.ndarray:\n r\"\"\"Continuous Y-only correction DRAG pulse for standard nonlinear oscillator (SNO) [1].\n\n [1] Gambetta, J. M., Motzoi, F., Merkel, S. T. & Wilhelm, F. K.\n Analytic control methods for high-fidelity unitary operations\n in a weakly nonlinear oscillator. Phys. Rev. A 83, 012308 (2011).\n\n Args:\n times: Times to output pulse for.\n amp: Pulse amplitude at `center`.\n center: Center (mean) of pulse.\n sigma: Width (standard deviation) of pulse.\n beta: Y correction amplitude. For the SNO this is $\\beta=-\\frac{\\lambda_1^2}{4\\Delta_2}$.\n Where $\\lambds_1$ is the relative coupling strength between the first excited and second\n excited states and $\\Delta_2$ is the detuning between the respective excited states.\n zeroed_width: Subtract baseline of drag pulse to make sure\n $\\Omega_g(center \\pm zeroed_width/2)=0$ is satisfied. This is used to avoid\n large discontinuities at the start of a drag pulse.\n rescale_amp: If `zeroed_width` is not `None` and `rescale_amp=True` the pulse will\n be rescaled so that $\\Omega_g(center)=amp$.\n\n \"\"\"\n gauss_deriv, gauss = gaussian_deriv(\n times,\n amp=amp,\n center=center,\n sigma=sigma,\n ret_gaussian=True,\n zeroed_width=zeroed_width,\n rescale_amp=rescale_amp,\n )\n\n return gauss + 1j * beta * gauss_deriv\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\n\"\"\"Tests for Statevector quantum state class.\"\"\"\n\nimport unittest\nimport logging\nfrom itertools import permutations\nfrom ddt import ddt, data\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom qiskit.test import QiskitTestCase\nfrom qiskit import QiskitError\nfrom qiskit import QuantumRegister, QuantumCircuit\nfrom qiskit import transpile\nfrom qiskit.circuit.library import HGate, QFT\nfrom qiskit.providers.basicaer import QasmSimulatorPy\n\nfrom qiskit.quantum_info.random import random_unitary, random_statevector, random_pauli\nfrom qiskit.quantum_info.states import Statevector\nfrom qiskit.quantum_info.operators.operator import Operator\nfrom qiskit.quantum_info.operators.symplectic import Pauli, SparsePauliOp\nfrom qiskit.quantum_info.operators.predicates import matrix_equal\nfrom qiskit.visualization.state_visualization import numbers_to_latex_terms, state_to_latex\n\nlogger = logging.getLogger(__name__)\n\n\n@ddt\nclass TestStatevector(QiskitTestCase):\n \"\"\"Tests for Statevector class.\"\"\"\n\n @classmethod\n def rand_vec(cls, n, normalize=False):\n \"\"\"Return complex vector or statevector\"\"\"\n seed = np.random.randint(0, np.iinfo(np.int32).max)\n logger.debug(\"rand_vec default_rng seeded with seed=%s\", seed)\n rng = np.random.default_rng(seed)\n\n vec = rng.random(n) + 1j * rng.random(n)\n if normalize:\n vec /= np.sqrt(np.dot(vec, np.conj(vec)))\n return vec\n\n def test_init_array_qubit(self):\n \"\"\"Test subsystem initialization from N-qubit array.\"\"\"\n # Test automatic inference of qubit subsystems\n vec = self.rand_vec(8)\n for dims in [None, 8]:\n state = Statevector(vec, dims=dims)\n assert_allclose(state.data, vec)\n self.assertEqual(state.dim, 8)\n self.assertEqual(state.dims(), (2, 2, 2))\n self.assertEqual(state.num_qubits, 3)\n\n def test_init_array(self):\n \"\"\"Test initialization from array.\"\"\"\n vec = self.rand_vec(3)\n state = Statevector(vec)\n assert_allclose(state.data, vec)\n self.assertEqual(state.dim, 3)\n self.assertEqual(state.dims(), (3,))\n self.assertIsNone(state.num_qubits)\n\n vec = self.rand_vec(2 * 3 * 4)\n state = Statevector(vec, dims=[2, 3, 4])\n assert_allclose(state.data, vec)\n self.assertEqual(state.dim, 2 * 3 * 4)\n self.assertEqual(state.dims(), (2, 3, 4))\n self.assertIsNone(state.num_qubits)\n\n def test_init_circuit(self):\n \"\"\"Test initialization from circuit.\"\"\"\n circuit = QuantumCircuit(3)\n circuit.x(0)\n state = Statevector(circuit)\n\n self.assertEqual(state.dim, 8)\n self.assertEqual(state.dims(), (2, 2, 2))\n self.assertTrue(all(state.data == np.array([0, 1, 0, 0, 0, 0, 0, 0], dtype=complex)))\n self.assertEqual(state.num_qubits, 3)\n\n def test_init_array_except(self):\n \"\"\"Test initialization exception from array.\"\"\"\n vec = self.rand_vec(4)\n self.assertRaises(QiskitError, Statevector, vec, dims=[4, 2])\n self.assertRaises(QiskitError, Statevector, vec, dims=[2, 4])\n self.assertRaises(QiskitError, Statevector, vec, dims=5)\n\n def test_init_statevector(self):\n \"\"\"Test initialization from Statevector.\"\"\"\n vec1 = Statevector(self.rand_vec(4))\n vec2 = Statevector(vec1)\n self.assertEqual(vec1, vec2)\n\n def test_from_circuit(self):\n \"\"\"Test initialization from a circuit.\"\"\"\n # random unitaries\n u0 = random_unitary(2).data\n u1 = random_unitary(2).data\n # add to circuit\n qr = QuantumRegister(2)\n circ = QuantumCircuit(qr)\n circ.unitary(u0, [qr[0]])\n circ.unitary(u1, [qr[1]])\n target = Statevector(np.kron(u1, u0).dot([1, 0, 0, 0]))\n vec = Statevector.from_instruction(circ)\n self.assertEqual(vec, target)\n\n # Test tensor product of 1-qubit gates\n circuit = QuantumCircuit(3)\n circuit.h(0)\n circuit.x(1)\n circuit.ry(np.pi / 2, 2)\n target = Statevector.from_label(\"000\").evolve(Operator(circuit))\n psi = Statevector.from_instruction(circuit)\n self.assertEqual(psi, target)\n\n # Test decomposition of Controlled-Phase gate\n lam = np.pi / 4\n circuit = QuantumCircuit(2)\n circuit.h(0)\n circuit.h(1)\n circuit.cp(lam, 0, 1)\n target = Statevector.from_label(\"00\").evolve(Operator(circuit))\n psi = Statevector.from_instruction(circuit)\n self.assertEqual(psi, target)\n\n # Test decomposition of controlled-H gate\n circuit = QuantumCircuit(2)\n circ.x(0)\n circuit.ch(0, 1)\n target = Statevector.from_label(\"00\").evolve(Operator(circuit))\n psi = Statevector.from_instruction(circuit)\n self.assertEqual(psi, target)\n\n # Test custom controlled gate\n qc = QuantumCircuit(2)\n qc.x(0)\n qc.h(1)\n gate = qc.to_gate()\n gate_ctrl = gate.control()\n\n circuit = QuantumCircuit(3)\n circuit.x(0)\n circuit.append(gate_ctrl, range(3))\n target = Statevector.from_label(\"000\").evolve(Operator(circuit))\n psi = Statevector.from_instruction(circuit)\n self.assertEqual(psi, target)\n\n # Test initialize instruction\n target = Statevector([1, 0, 0, 1j]) / np.sqrt(2)\n circuit = QuantumCircuit(2)\n circuit.initialize(target.data, [0, 1])\n psi = Statevector.from_instruction(circuit)\n self.assertEqual(psi, target)\n\n # Test reset instruction\n target = Statevector([1, 0])\n circuit = QuantumCircuit(1)\n circuit.h(0)\n circuit.reset(0)\n psi = Statevector.from_instruction(circuit)\n self.assertEqual(psi, target)\n\n def test_from_instruction(self):\n \"\"\"Test initialization from an instruction.\"\"\"\n target = np.dot(HGate().to_matrix(), [1, 0])\n vec = Statevector.from_instruction(HGate()).data\n global_phase_equivalent = matrix_equal(vec, target, ignore_phase=True)\n self.assertTrue(global_phase_equivalent)\n\n def test_from_label(self):\n \"\"\"Test initialization from a label\"\"\"\n x_p = Statevector(np.array([1, 1]) / np.sqrt(2))\n x_m = Statevector(np.array([1, -1]) / np.sqrt(2))\n y_p = Statevector(np.array([1, 1j]) / np.sqrt(2))\n y_m = Statevector(np.array([1, -1j]) / np.sqrt(2))\n z_p = Statevector(np.array([1, 0]))\n z_m = Statevector(np.array([0, 1]))\n\n label = \"01\"\n target = z_p.tensor(z_m)\n self.assertEqual(target, Statevector.from_label(label))\n\n label = \"+-\"\n target = x_p.tensor(x_m)\n self.assertEqual(target, Statevector.from_label(label))\n\n label = \"rl\"\n target = y_p.tensor(y_m)\n self.assertEqual(target, Statevector.from_label(label))\n\n def test_equal(self):\n \"\"\"Test __eq__ method\"\"\"\n for _ in range(10):\n vec = self.rand_vec(4)\n self.assertEqual(Statevector(vec), Statevector(vec.tolist()))\n\n def test_getitem(self):\n \"\"\"Test __getitem__ method\"\"\"\n for _ in range(10):\n vec = self.rand_vec(4)\n state = Statevector(vec)\n for i in range(4):\n self.assertEqual(state[i], vec[i])\n self.assertEqual(state[format(i, \"b\")], vec[i])\n\n def test_getitem_except(self):\n \"\"\"Test __getitem__ method raises exceptions.\"\"\"\n for i in range(1, 4):\n state = Statevector(self.rand_vec(2 ** i))\n self.assertRaises(QiskitError, state.__getitem__, 2 ** i)\n self.assertRaises(QiskitError, state.__getitem__, -1)\n\n def test_copy(self):\n \"\"\"Test Statevector copy method\"\"\"\n for _ in range(5):\n vec = self.rand_vec(4)\n orig = Statevector(vec)\n cpy = orig.copy()\n cpy._data[0] += 1.0\n self.assertFalse(cpy == orig)\n\n def test_is_valid(self):\n \"\"\"Test is_valid method.\"\"\"\n state = Statevector([1, 1])\n self.assertFalse(state.is_valid())\n for _ in range(10):\n state = Statevector(self.rand_vec(4, normalize=True))\n self.assertTrue(state.is_valid())\n\n def test_to_operator(self):\n \"\"\"Test to_operator method for returning projector.\"\"\"\n for _ in range(10):\n vec = self.rand_vec(4)\n target = Operator(np.outer(vec, np.conj(vec)))\n op = Statevector(vec).to_operator()\n self.assertEqual(op, target)\n\n def test_evolve(self):\n \"\"\"Test _evolve method.\"\"\"\n for _ in range(10):\n op = random_unitary(4)\n vec = self.rand_vec(4)\n target = Statevector(np.dot(op.data, vec))\n evolved = Statevector(vec).evolve(op)\n self.assertEqual(target, evolved)\n\n def test_evolve_subsystem(self):\n \"\"\"Test subsystem _evolve method.\"\"\"\n # Test evolving single-qubit of 3-qubit system\n for _ in range(5):\n vec = self.rand_vec(8)\n state = Statevector(vec)\n op0 = random_unitary(2)\n op1 = random_unitary(2)\n op2 = random_unitary(2)\n\n # Test evolve on 1-qubit\n op = op0\n op_full = Operator(np.eye(4)).tensor(op)\n target = Statevector(np.dot(op_full.data, vec))\n self.assertEqual(state.evolve(op, qargs=[0]), target)\n\n # Evolve on qubit 1\n op_full = Operator(np.eye(2)).tensor(op).tensor(np.eye(2))\n target = Statevector(np.dot(op_full.data, vec))\n self.assertEqual(state.evolve(op, qargs=[1]), target)\n\n # Evolve on qubit 2\n op_full = op.tensor(np.eye(4))\n target = Statevector(np.dot(op_full.data, vec))\n self.assertEqual(state.evolve(op, qargs=[2]), target)\n\n # Test evolve on 2-qubits\n op = op1.tensor(op0)\n\n # Evolve on qubits [0, 2]\n op_full = op1.tensor(np.eye(2)).tensor(op0)\n target = Statevector(np.dot(op_full.data, vec))\n self.assertEqual(state.evolve(op, qargs=[0, 2]), target)\n\n # Evolve on qubits [2, 0]\n op_full = op0.tensor(np.eye(2)).tensor(op1)\n target = Statevector(np.dot(op_full.data, vec))\n self.assertEqual(state.evolve(op, qargs=[2, 0]), target)\n\n # Test evolve on 3-qubits\n op = op2.tensor(op1).tensor(op0)\n\n # Evolve on qubits [0, 1, 2]\n op_full = op\n target = Statevector(np.dot(op_full.data, vec))\n self.assertEqual(state.evolve(op, qargs=[0, 1, 2]), target)\n\n # Evolve on qubits [2, 1, 0]\n op_full = op0.tensor(op1).tensor(op2)\n target = Statevector(np.dot(op_full.data, vec))\n self.assertEqual(state.evolve(op, qargs=[2, 1, 0]), target)\n\n def test_evolve_global_phase(self):\n \"\"\"Test evolve circuit with global phase.\"\"\"\n state_i = Statevector([1, 0])\n qr = QuantumRegister(2)\n phase = np.pi / 4\n circ = QuantumCircuit(qr, global_phase=phase)\n circ.x(0)\n state_f = state_i.evolve(circ, qargs=[0])\n target = Statevector([0, 1]) * np.exp(1j * phase)\n self.assertEqual(state_f, target)\n\n def test_conjugate(self):\n \"\"\"Test conjugate method.\"\"\"\n for _ in range(10):\n vec = self.rand_vec(4)\n target = Statevector(np.conj(vec))\n state = Statevector(vec).conjugate()\n self.assertEqual(state, target)\n\n def test_expand(self):\n \"\"\"Test expand method.\"\"\"\n for _ in range(10):\n vec0 = self.rand_vec(2)\n vec1 = self.rand_vec(3)\n target = np.kron(vec1, vec0)\n state = Statevector(vec0).expand(Statevector(vec1))\n self.assertEqual(state.dim, 6)\n self.assertEqual(state.dims(), (2, 3))\n assert_allclose(state.data, target)\n\n def test_tensor(self):\n \"\"\"Test tensor method.\"\"\"\n for _ in range(10):\n vec0 = self.rand_vec(2)\n vec1 = self.rand_vec(3)\n target = np.kron(vec0, vec1)\n state = Statevector(vec0).tensor(Statevector(vec1))\n self.assertEqual(state.dim, 6)\n self.assertEqual(state.dims(), (3, 2))\n assert_allclose(state.data, target)\n\n def test_inner(self):\n \"\"\"Test inner method.\"\"\"\n for _ in range(10):\n vec0 = Statevector(self.rand_vec(4))\n vec1 = Statevector(self.rand_vec(4))\n target = np.vdot(vec0.data, vec1.data)\n result = vec0.inner(vec1)\n self.assertAlmostEqual(result, target)\n vec0 = Statevector(self.rand_vec(6), dims=(2, 3))\n vec1 = Statevector(self.rand_vec(6), dims=(2, 3))\n target = np.vdot(vec0.data, vec1.data)\n result = vec0.inner(vec1)\n self.assertAlmostEqual(result, target)\n\n def test_inner_except(self):\n \"\"\"Test inner method raises exceptions.\"\"\"\n vec0 = Statevector(self.rand_vec(4))\n vec1 = Statevector(self.rand_vec(3))\n self.assertRaises(QiskitError, vec0.inner, vec1)\n vec0 = Statevector(self.rand_vec(6), dims=(2, 3))\n vec1 = Statevector(self.rand_vec(6), dims=(3, 2))\n self.assertRaises(QiskitError, vec0.inner, vec1)\n\n def test_add(self):\n \"\"\"Test add method.\"\"\"\n for _ in range(10):\n vec0 = self.rand_vec(4)\n vec1 = self.rand_vec(4)\n state0 = Statevector(vec0)\n state1 = Statevector(vec1)\n self.assertEqual(state0 + state1, Statevector(vec0 + vec1))\n\n def test_add_except(self):\n \"\"\"Test add method raises exceptions.\"\"\"\n state1 = Statevector(self.rand_vec(2))\n state2 = Statevector(self.rand_vec(3))\n self.assertRaises(QiskitError, state1.__add__, state2)\n\n def test_subtract(self):\n \"\"\"Test subtract method.\"\"\"\n for _ in range(10):\n vec0 = self.rand_vec(4)\n vec1 = self.rand_vec(4)\n state0 = Statevector(vec0)\n state1 = Statevector(vec1)\n self.assertEqual(state0 - state1, Statevector(vec0 - vec1))\n\n def test_multiply(self):\n \"\"\"Test multiply method.\"\"\"\n for _ in range(10):\n vec = self.rand_vec(4)\n state = Statevector(vec)\n val = np.random.rand() + 1j * np.random.rand()\n self.assertEqual(val * state, Statevector(val * state))\n\n def test_negate(self):\n \"\"\"Test negate method\"\"\"\n for _ in range(10):\n vec = self.rand_vec(4)\n state = Statevector(vec)\n self.assertEqual(-state, Statevector(-1 * vec))\n\n def test_equiv(self):\n \"\"\"Test equiv method\"\"\"\n vec = np.array([1, 0, 0, -1j]) / np.sqrt(2)\n phase = np.exp(-1j * np.pi / 4)\n statevec = Statevector(vec)\n self.assertTrue(statevec.equiv(phase * vec))\n self.assertTrue(statevec.equiv(Statevector(phase * vec)))\n self.assertFalse(statevec.equiv(2 * vec))\n\n def test_equiv_on_circuit(self):\n \"\"\"Test the equiv method on different types of input.\"\"\"\n statevec = Statevector([1, 0])\n\n qc = QuantumCircuit(1)\n self.assertTrue(statevec.equiv(qc))\n qc.x(0)\n self.assertFalse(statevec.equiv(qc))\n\n def test_to_dict(self):\n \"\"\"Test to_dict method\"\"\"\n\n with self.subTest(msg=\"dims = (2, 3)\"):\n vec = Statevector(np.arange(1, 7), dims=(2, 3))\n target = {\"00\": 1, \"01\": 2, \"10\": 3, \"11\": 4, \"20\": 5, \"21\": 6}\n self.assertDictAlmostEqual(target, vec.to_dict())\n\n with self.subTest(msg=\"dims = (11, )\"):\n vec = Statevector(np.arange(1, 12), dims=(11,))\n target = {str(i): i + 1 for i in range(11)}\n self.assertDictAlmostEqual(target, vec.to_dict())\n\n with self.subTest(msg=\"dims = (2, 11)\"):\n vec = Statevector(np.arange(1, 23), dims=(2, 11))\n target = {}\n for i in range(11):\n for j in range(2):\n key = f\"{i},{j}\"\n target[key] = 2 * i + j + 1\n self.assertDictAlmostEqual(target, vec.to_dict())\n\n def test_probabilities_product(self):\n \"\"\"Test probabilities method for product state\"\"\"\n\n state = Statevector.from_label(\"+0\")\n\n # 2-qubit qargs\n with self.subTest(msg=\"P(None)\"):\n probs = state.probabilities()\n target = np.array([0.5, 0, 0.5, 0])\n self.assertTrue(np.allclose(probs, target))\n\n with self.subTest(msg=\"P([0, 1])\"):\n probs = state.probabilities([0, 1])\n target = np.array([0.5, 0, 0.5, 0])\n self.assertTrue(np.allclose(probs, target))\n\n with self.subTest(msg=\"P([1, 0]\"):\n probs = state.probabilities([1, 0])\n target = np.array([0.5, 0.5, 0, 0])\n self.assertTrue(np.allclose(probs, target))\n\n # 1-qubit qargs\n with self.subTest(msg=\"P([0])\"):\n probs = state.probabilities([0])\n target = np.array([1, 0])\n self.assertTrue(np.allclose(probs, target))\n\n with self.subTest(msg=\"P([1])\"):\n probs = state.probabilities([1])\n target = np.array([0.5, 0.5])\n self.assertTrue(np.allclose(probs, target))\n\n def test_probabilities_ghz(self):\n \"\"\"Test probabilities method for GHZ state\"\"\"\n\n state = (Statevector.from_label(\"000\") + Statevector.from_label(\"111\")) / np.sqrt(2)\n\n # 3-qubit qargs\n target = np.array([0.5, 0, 0, 0, 0, 0, 0, 0.5])\n for qargs in [[0, 1, 2], [2, 1, 0], [1, 2, 0], [1, 0, 2]]:\n with self.subTest(msg=f\"P({qargs})\"):\n probs = state.probabilities(qargs)\n self.assertTrue(np.allclose(probs, target))\n\n # 2-qubit qargs\n target = np.array([0.5, 0, 0, 0.5])\n for qargs in [[0, 1], [2, 1], [1, 2], [1, 2]]:\n with self.subTest(msg=f\"P({qargs})\"):\n probs = state.probabilities(qargs)\n self.assertTrue(np.allclose(probs, target))\n\n # 1-qubit qargs\n target = np.array([0.5, 0.5])\n for qargs in [[0], [1], [2]]:\n with self.subTest(msg=f\"P({qargs})\"):\n probs = state.probabilities(qargs)\n self.assertTrue(np.allclose(probs, target))\n\n def test_probabilities_w(self):\n \"\"\"Test probabilities method with W state\"\"\"\n\n state = (\n Statevector.from_label(\"001\")\n + Statevector.from_label(\"010\")\n + Statevector.from_label(\"100\")\n ) / np.sqrt(3)\n\n # 3-qubit qargs\n target = np.array([0, 1 / 3, 1 / 3, 0, 1 / 3, 0, 0, 0])\n for qargs in [[0, 1, 2], [2, 1, 0], [1, 2, 0], [1, 0, 2]]:\n with self.subTest(msg=f\"P({qargs})\"):\n probs = state.probabilities(qargs)\n self.assertTrue(np.allclose(probs, target))\n\n # 2-qubit qargs\n target = np.array([1 / 3, 1 / 3, 1 / 3, 0])\n for qargs in [[0, 1], [2, 1], [1, 2], [1, 2]]:\n with self.subTest(msg=f\"P({qargs})\"):\n probs = state.probabilities(qargs)\n self.assertTrue(np.allclose(probs, target))\n\n # 1-qubit qargs\n target = np.array([2 / 3, 1 / 3])\n for qargs in [[0], [1], [2]]:\n with self.subTest(msg=f\"P({qargs})\"):\n probs = state.probabilities(qargs)\n self.assertTrue(np.allclose(probs, target))\n\n def test_probabilities_dict_product(self):\n \"\"\"Test probabilities_dict method for product state\"\"\"\n\n state = Statevector.from_label(\"+0\")\n\n # 2-qubit qargs\n with self.subTest(msg=\"P(None)\"):\n probs = state.probabilities_dict()\n target = {\"00\": 0.5, \"10\": 0.5}\n self.assertDictAlmostEqual(probs, target)\n\n with self.subTest(msg=\"P([0, 1])\"):\n probs = state.probabilities_dict([0, 1])\n target = {\"00\": 0.5, \"10\": 0.5}\n self.assertDictAlmostEqual(probs, target)\n\n with self.subTest(msg=\"P([1, 0]\"):\n probs = state.probabilities_dict([1, 0])\n target = {\"00\": 0.5, \"01\": 0.5}\n self.assertDictAlmostEqual(probs, target)\n\n # 1-qubit qargs\n with self.subTest(msg=\"P([0])\"):\n probs = state.probabilities_dict([0])\n target = {\"0\": 1}\n self.assertDictAlmostEqual(probs, target)\n\n with self.subTest(msg=\"P([1])\"):\n probs = state.probabilities_dict([1])\n target = {\"0\": 0.5, \"1\": 0.5}\n self.assertDictAlmostEqual(probs, target)\n\n def test_probabilities_dict_ghz(self):\n \"\"\"Test probabilities_dict method for GHZ state\"\"\"\n\n state = (Statevector.from_label(\"000\") + Statevector.from_label(\"111\")) / np.sqrt(2)\n\n # 3-qubit qargs\n target = {\"000\": 0.5, \"111\": 0.5}\n for qargs in [[0, 1, 2], [2, 1, 0], [1, 2, 0], [1, 0, 2]]:\n with self.subTest(msg=f\"P({qargs})\"):\n probs = state.probabilities_dict(qargs)\n self.assertDictAlmostEqual(probs, target)\n\n # 2-qubit qargs\n target = {\"00\": 0.5, \"11\": 0.5}\n for qargs in [[0, 1], [2, 1], [1, 2], [1, 2]]:\n with self.subTest(msg=f\"P({qargs})\"):\n probs = state.probabilities_dict(qargs)\n self.assertDictAlmostEqual(probs, target)\n\n # 1-qubit qargs\n target = {\"0\": 0.5, \"1\": 0.5}\n for qargs in [[0], [1], [2]]:\n with self.subTest(msg=f\"P({qargs})\"):\n probs = state.probabilities_dict(qargs)\n self.assertDictAlmostEqual(probs, target)\n\n def test_probabilities_dict_w(self):\n \"\"\"Test probabilities_dict method with W state\"\"\"\n\n state = (\n Statevector.from_label(\"001\")\n + Statevector.from_label(\"010\")\n + Statevector.from_label(\"100\")\n ) / np.sqrt(3)\n\n # 3-qubit qargs\n target = np.array([0, 1 / 3, 1 / 3, 0, 1 / 3, 0, 0, 0])\n target = {\"001\": 1 / 3, \"010\": 1 / 3, \"100\": 1 / 3}\n for qargs in [[0, 1, 2], [2, 1, 0], [1, 2, 0], [1, 0, 2]]:\n with self.subTest(msg=f\"P({qargs})\"):\n probs = state.probabilities_dict(qargs)\n self.assertDictAlmostEqual(probs, target)\n\n # 2-qubit qargs\n target = {\"00\": 1 / 3, \"01\": 1 / 3, \"10\": 1 / 3}\n for qargs in [[0, 1], [2, 1], [1, 2], [1, 2]]:\n with self.subTest(msg=f\"P({qargs})\"):\n probs = state.probabilities_dict(qargs)\n self.assertDictAlmostEqual(probs, target)\n\n # 1-qubit qargs\n target = {\"0\": 2 / 3, \"1\": 1 / 3}\n for qargs in [[0], [1], [2]]:\n with self.subTest(msg=f\"P({qargs})\"):\n probs = state.probabilities_dict(qargs)\n self.assertDictAlmostEqual(probs, target)\n\n def test_sample_counts_ghz(self):\n \"\"\"Test sample_counts method for GHZ state\"\"\"\n\n shots = 2000\n threshold = 0.02 * shots\n state = (Statevector.from_label(\"000\") + Statevector.from_label(\"111\")) / np.sqrt(2)\n state.seed(100)\n\n # 3-qubit qargs\n target = {\"000\": shots / 2, \"111\": shots / 2}\n for qargs in [[0, 1, 2], [2, 1, 0], [1, 2, 0], [1, 0, 2]]:\n\n with self.subTest(msg=f\"counts (qargs={qargs})\"):\n counts = state.sample_counts(shots, qargs=qargs)\n self.assertDictAlmostEqual(counts, target, threshold)\n\n # 2-qubit qargs\n target = {\"00\": shots / 2, \"11\": shots / 2}\n for qargs in [[0, 1], [2, 1], [1, 2], [1, 2]]:\n\n with self.subTest(msg=f\"counts (qargs={qargs})\"):\n counts = state.sample_counts(shots, qargs=qargs)\n self.assertDictAlmostEqual(counts, target, threshold)\n\n # 1-qubit qargs\n target = {\"0\": shots / 2, \"1\": shots / 2}\n for qargs in [[0], [1], [2]]:\n\n with self.subTest(msg=f\"counts (qargs={qargs})\"):\n counts = state.sample_counts(shots, qargs=qargs)\n self.assertDictAlmostEqual(counts, target, threshold)\n\n def test_sample_counts_w(self):\n \"\"\"Test sample_counts method for W state\"\"\"\n shots = 3000\n threshold = 0.02 * shots\n state = (\n Statevector.from_label(\"001\")\n + Statevector.from_label(\"010\")\n + Statevector.from_label(\"100\")\n ) / np.sqrt(3)\n state.seed(100)\n\n target = {\"001\": shots / 3, \"010\": shots / 3, \"100\": shots / 3}\n for qargs in [[0, 1, 2], [2, 1, 0], [1, 2, 0], [1, 0, 2]]:\n\n with self.subTest(msg=f\"P({qargs})\"):\n counts = state.sample_counts(shots, qargs=qargs)\n self.assertDictAlmostEqual(counts, target, threshold)\n\n # 2-qubit qargs\n target = {\"00\": shots / 3, \"01\": shots / 3, \"10\": shots / 3}\n for qargs in [[0, 1], [2, 1], [1, 2], [1, 2]]:\n\n with self.subTest(msg=f\"P({qargs})\"):\n counts = state.sample_counts(shots, qargs=qargs)\n self.assertDictAlmostEqual(counts, target, threshold)\n\n # 1-qubit qargs\n target = {\"0\": 2 * shots / 3, \"1\": shots / 3}\n for qargs in [[0], [1], [2]]:\n\n with self.subTest(msg=f\"P({qargs})\"):\n counts = state.sample_counts(shots, qargs=qargs)\n self.assertDictAlmostEqual(counts, target, threshold)\n\n def test_sample_counts_qutrit(self):\n \"\"\"Test sample_counts method for qutrit state\"\"\"\n p = 0.3\n shots = 1000\n threshold = 0.03 * shots\n state = Statevector([np.sqrt(p), 0, np.sqrt(1 - p)])\n state.seed(100)\n\n with self.subTest(msg=\"counts\"):\n target = {\"0\": shots * p, \"2\": shots * (1 - p)}\n counts = state.sample_counts(shots=shots)\n self.assertDictAlmostEqual(counts, target, threshold)\n\n def test_sample_memory_ghz(self):\n \"\"\"Test sample_memory method for GHZ state\"\"\"\n\n shots = 2000\n state = (Statevector.from_label(\"000\") + Statevector.from_label(\"111\")) / np.sqrt(2)\n state.seed(100)\n\n # 3-qubit qargs\n target = {\"000\": shots / 2, \"111\": shots / 2}\n for qargs in [[0, 1, 2], [2, 1, 0], [1, 2, 0], [1, 0, 2]]:\n\n with self.subTest(msg=f\"memory (qargs={qargs})\"):\n memory = state.sample_memory(shots, qargs=qargs)\n self.assertEqual(len(memory), shots)\n self.assertEqual(set(memory), set(target))\n\n # 2-qubit qargs\n target = {\"00\": shots / 2, \"11\": shots / 2}\n for qargs in [[0, 1], [2, 1], [1, 2], [1, 2]]:\n\n with self.subTest(msg=f\"memory (qargs={qargs})\"):\n memory = state.sample_memory(shots, qargs=qargs)\n self.assertEqual(len(memory), shots)\n self.assertEqual(set(memory), set(target))\n\n # 1-qubit qargs\n target = {\"0\": shots / 2, \"1\": shots / 2}\n for qargs in [[0], [1], [2]]:\n\n with self.subTest(msg=f\"memory (qargs={qargs})\"):\n memory = state.sample_memory(shots, qargs=qargs)\n self.assertEqual(len(memory), shots)\n self.assertEqual(set(memory), set(target))\n\n def test_sample_memory_w(self):\n \"\"\"Test sample_memory method for W state\"\"\"\n shots = 3000\n state = (\n Statevector.from_label(\"001\")\n + Statevector.from_label(\"010\")\n + Statevector.from_label(\"100\")\n ) / np.sqrt(3)\n state.seed(100)\n\n target = {\"001\": shots / 3, \"010\": shots / 3, \"100\": shots / 3}\n for qargs in [[0, 1, 2], [2, 1, 0], [1, 2, 0], [1, 0, 2]]:\n\n with self.subTest(msg=f\"memory (qargs={qargs})\"):\n memory = state.sample_memory(shots, qargs=qargs)\n self.assertEqual(len(memory), shots)\n self.assertEqual(set(memory), set(target))\n\n # 2-qubit qargs\n target = {\"00\": shots / 3, \"01\": shots / 3, \"10\": shots / 3}\n for qargs in [[0, 1], [2, 1], [1, 2], [1, 2]]:\n\n with self.subTest(msg=f\"memory (qargs={qargs})\"):\n memory = state.sample_memory(shots, qargs=qargs)\n self.assertEqual(len(memory), shots)\n self.assertEqual(set(memory), set(target))\n\n # 1-qubit qargs\n target = {\"0\": 2 * shots / 3, \"1\": shots / 3}\n for qargs in [[0], [1], [2]]:\n\n with self.subTest(msg=f\"memory (qargs={qargs})\"):\n memory = state.sample_memory(shots, qargs=qargs)\n self.assertEqual(len(memory), shots)\n self.assertEqual(set(memory), set(target))\n\n def test_sample_memory_qutrit(self):\n \"\"\"Test sample_memory method for qutrit state\"\"\"\n p = 0.3\n shots = 1000\n state = Statevector([np.sqrt(p), 0, np.sqrt(1 - p)])\n state.seed(100)\n\n with self.subTest(msg=\"memory\"):\n memory = state.sample_memory(shots)\n self.assertEqual(len(memory), shots)\n self.assertEqual(set(memory), {\"0\", \"2\"})\n\n def test_reset_2qubit(self):\n \"\"\"Test reset method for 2-qubit state\"\"\"\n\n state = Statevector(np.array([1, 0, 0, 1]) / np.sqrt(2))\n state.seed(100)\n\n with self.subTest(msg=\"reset\"):\n psi = state.copy()\n value = psi.reset()\n target = Statevector(np.array([1, 0, 0, 0]))\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"reset\"):\n psi = state.copy()\n value = psi.reset([0, 1])\n target = Statevector(np.array([1, 0, 0, 0]))\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"reset [0]\"):\n psi = state.copy()\n value = psi.reset([0])\n targets = [Statevector(np.array([1, 0, 0, 0])), Statevector(np.array([0, 0, 1, 0]))]\n self.assertIn(value, targets)\n\n with self.subTest(msg=\"reset [0]\"):\n psi = state.copy()\n value = psi.reset([1])\n targets = [Statevector(np.array([1, 0, 0, 0])), Statevector(np.array([0, 1, 0, 0]))]\n self.assertIn(value, targets)\n\n def test_reset_qutrit(self):\n \"\"\"Test reset method for qutrit\"\"\"\n\n state = Statevector(np.array([1, 1, 1]) / np.sqrt(3))\n state.seed(200)\n value = state.reset()\n target = Statevector(np.array([1, 0, 0]))\n self.assertEqual(value, target)\n\n def test_measure_2qubit(self):\n \"\"\"Test measure method for 2-qubit state\"\"\"\n\n state = Statevector.from_label(\"+0\")\n seed = 200\n shots = 100\n\n with self.subTest(msg=\"measure\"):\n for i in range(shots):\n psi = state.copy()\n psi.seed(seed + i)\n outcome, value = psi.measure()\n self.assertIn(outcome, [\"00\", \"10\"])\n if outcome == \"00\":\n target = Statevector.from_label(\"00\")\n self.assertEqual(value, target)\n else:\n target = Statevector.from_label(\"10\")\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"measure [0, 1]\"):\n for i in range(shots):\n psi = state.copy()\n outcome, value = psi.measure([0, 1])\n self.assertIn(outcome, [\"00\", \"10\"])\n if outcome == \"00\":\n target = Statevector.from_label(\"00\")\n self.assertEqual(value, target)\n else:\n target = Statevector.from_label(\"10\")\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"measure [1, 0]\"):\n for i in range(shots):\n psi = state.copy()\n outcome, value = psi.measure([1, 0])\n self.assertIn(outcome, [\"00\", \"01\"])\n if outcome == \"00\":\n target = Statevector.from_label(\"00\")\n self.assertEqual(value, target)\n else:\n target = Statevector.from_label(\"10\")\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"measure [0]\"):\n for i in range(shots):\n psi = state.copy()\n outcome, value = psi.measure([0])\n self.assertEqual(outcome, \"0\")\n target = Statevector(np.array([1, 0, 1, 0]) / np.sqrt(2))\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"measure [1]\"):\n for i in range(shots):\n psi = state.copy()\n outcome, value = psi.measure([1])\n self.assertIn(outcome, [\"0\", \"1\"])\n if outcome == \"0\":\n target = Statevector.from_label(\"00\")\n self.assertEqual(value, target)\n else:\n target = Statevector.from_label(\"10\")\n self.assertEqual(value, target)\n\n def test_measure_qutrit(self):\n \"\"\"Test measure method for qutrit\"\"\"\n\n state = Statevector(np.array([1, 1, 1]) / np.sqrt(3))\n seed = 200\n shots = 100\n\n for i in range(shots):\n psi = state.copy()\n psi.seed(seed + i)\n outcome, value = psi.measure()\n self.assertIn(outcome, [\"0\", \"1\", \"2\"])\n if outcome == \"0\":\n target = Statevector([1, 0, 0])\n self.assertEqual(value, target)\n elif outcome == \"1\":\n target = Statevector([0, 1, 0])\n self.assertEqual(value, target)\n else:\n target = Statevector([0, 0, 1])\n self.assertEqual(value, target)\n\n def test_from_int(self):\n \"\"\"Test from_int method\"\"\"\n\n with self.subTest(msg=\"from_int(0, 4)\"):\n target = Statevector([1, 0, 0, 0])\n value = Statevector.from_int(0, 4)\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"from_int(3, 4)\"):\n target = Statevector([0, 0, 0, 1])\n value = Statevector.from_int(3, 4)\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"from_int(8, (3, 3))\"):\n target = Statevector([0, 0, 0, 0, 0, 0, 0, 0, 1], dims=(3, 3))\n value = Statevector.from_int(8, (3, 3))\n self.assertEqual(target, value)\n\n def test_expval(self):\n \"\"\"Test expectation_value method\"\"\"\n\n psi = Statevector([1, 0, 0, 1]) / np.sqrt(2)\n for label, target in [\n (\"II\", 1),\n (\"XX\", 1),\n (\"YY\", -1),\n (\"ZZ\", 1),\n (\"IX\", 0),\n (\"YZ\", 0),\n (\"ZX\", 0),\n (\"YI\", 0),\n ]:\n with self.subTest(msg=f\"<{label}>\"):\n op = Pauli(label)\n expval = psi.expectation_value(op)\n self.assertAlmostEqual(expval, target)\n\n psi = Statevector([np.sqrt(2), 0, 0, 0, 0, 0, 0, 1 + 1j]) / 2\n for label, target in [\n (\"XXX\", np.sqrt(2) / 2),\n (\"YYY\", -np.sqrt(2) / 2),\n (\"ZZZ\", 0),\n (\"XYZ\", 0),\n (\"YIY\", 0),\n ]:\n with self.subTest(msg=f\"<{label}>\"):\n op = Pauli(label)\n expval = psi.expectation_value(op)\n self.assertAlmostEqual(expval, target)\n\n labels = [\"XXX\", \"IXI\", \"YYY\", \"III\"]\n coeffs = [3.0, 5.5, -1j, 23]\n spp_op = SparsePauliOp.from_list(list(zip(labels, coeffs)))\n expval = psi.expectation_value(spp_op)\n target = 25.121320343559642 + 0.7071067811865476j\n self.assertAlmostEqual(expval, target)\n\n @data(\n \"II\",\n \"IX\",\n \"IY\",\n \"IZ\",\n \"XI\",\n \"XX\",\n \"XY\",\n \"XZ\",\n \"YI\",\n \"YX\",\n \"YY\",\n \"YZ\",\n \"ZI\",\n \"ZX\",\n \"ZY\",\n \"ZZ\",\n \"-II\",\n \"-IX\",\n \"-IY\",\n \"-IZ\",\n \"-XI\",\n \"-XX\",\n \"-XY\",\n \"-XZ\",\n \"-YI\",\n \"-YX\",\n \"-YY\",\n \"-YZ\",\n \"-ZI\",\n \"-ZX\",\n \"-ZY\",\n \"-ZZ\",\n \"iII\",\n \"iIX\",\n \"iIY\",\n \"iIZ\",\n \"iXI\",\n \"iXX\",\n \"iXY\",\n \"iXZ\",\n \"iYI\",\n \"iYX\",\n \"iYY\",\n \"iYZ\",\n \"iZI\",\n \"iZX\",\n \"iZY\",\n \"iZZ\",\n \"-iII\",\n \"-iIX\",\n \"-iIY\",\n \"-iIZ\",\n \"-iXI\",\n \"-iXX\",\n \"-iXY\",\n \"-iXZ\",\n \"-iYI\",\n \"-iYX\",\n \"-iYY\",\n \"-iYZ\",\n \"-iZI\",\n \"-iZX\",\n \"-iZY\",\n \"-iZZ\",\n )\n def test_expval_pauli(self, pauli):\n \"\"\"Test expectation_value method for Pauli op\"\"\"\n seed = 1020\n op = Pauli(pauli)\n state = random_statevector(2 ** op.num_qubits, seed=seed)\n target = state.expectation_value(op.to_matrix())\n expval = state.expectation_value(op)\n self.assertAlmostEqual(expval, target)\n\n @data([0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1])\n def test_expval_pauli_qargs(self, qubits):\n \"\"\"Test expectation_value method for Pauli op\"\"\"\n seed = 1020\n op = random_pauli(2, seed=seed)\n state = random_statevector(2 ** 3, seed=seed)\n target = state.expectation_value(op.to_matrix(), qubits)\n expval = state.expectation_value(op, qubits)\n self.assertAlmostEqual(expval, target)\n\n @data(*(qargs for i in range(4) for qargs in permutations(range(4), r=i + 1)))\n def test_probabilities_qargs(self, qargs):\n \"\"\"Test probabilities method with qargs\"\"\"\n # Get initial state\n nq = 4\n nc = len(qargs)\n state_circ = QuantumCircuit(nq, nc)\n for i in range(nq):\n state_circ.ry((i + 1) * np.pi / (nq + 1), i)\n\n # Get probabilities\n state = Statevector(state_circ)\n probs = state.probabilities(qargs)\n\n # Estimate target probs from simulator measurement\n sim = QasmSimulatorPy()\n shots = 5000\n seed = 100\n circ = transpile(state_circ, sim)\n circ.measure(qargs, range(nc))\n result = sim.run(circ, shots=shots, seed_simulator=seed).result()\n target = np.zeros(2 ** nc, dtype=float)\n for i, p in result.get_counts(0).int_outcomes().items():\n target[i] = p / shots\n # Compare\n delta = np.linalg.norm(probs - target)\n self.assertLess(delta, 0.05)\n\n def test_global_phase(self):\n \"\"\"Test global phase is handled correctly when evolving statevector.\"\"\"\n\n qc = QuantumCircuit(1)\n qc.rz(0.5, 0)\n qc2 = transpile(qc, basis_gates=[\"p\"])\n sv = Statevector.from_instruction(qc2)\n expected = np.array([0.96891242 - 0.24740396j, 0])\n self.assertEqual(float(qc2.global_phase), 2 * np.pi - 0.25)\n self.assertEqual(sv, Statevector(expected))\n\n def test_reverse_qargs(self):\n \"\"\"Test reverse_qargs method\"\"\"\n circ1 = QFT(5)\n circ2 = circ1.reverse_bits()\n\n state1 = Statevector.from_instruction(circ1)\n state2 = Statevector.from_instruction(circ2)\n self.assertEqual(state1.reverse_qargs(), state2)\n\n def test_drawings(self):\n \"\"\"Test draw method\"\"\"\n qc1 = QFT(5)\n sv = Statevector.from_instruction(qc1)\n with self.subTest(msg=\"str(statevector)\"):\n str(sv)\n for drawtype in [\"repr\", \"text\", \"latex\", \"latex_source\", \"qsphere\", \"hinton\", \"bloch\"]:\n with self.subTest(msg=f\"draw('{drawtype}')\"):\n sv.draw(drawtype)\n with self.subTest(msg=\" draw('latex', convention='vector')\"):\n sv.draw(\"latex\", convention=\"vector\")\n\n def test_state_to_latex_for_large_statevector(self):\n \"\"\"Test conversion of large dense state vector\"\"\"\n sv = Statevector(np.ones((2 ** 15, 1)))\n latex_representation = state_to_latex(sv)\n self.assertEqual(\n latex_representation,\n \" |000000000000000\\\\rangle+ |000000000000001\\\\rangle+ |000000000000010\\\\rangle+\"\n \" |000000000000011\\\\rangle+ |000000000000100\\\\rangle+ |000000000000101\\\\rangle +\"\n \" \\\\ldots + |111111111111011\\\\rangle+ |111111111111100\\\\rangle+\"\n \" |111111111111101\\\\rangle+ |111111111111110\\\\rangle+ |111111111111111\\\\rangle\",\n )\n\n def test_state_to_latex_for_large_sparse_statevector(self):\n \"\"\"Test conversion of large sparse state vector\"\"\"\n sv = Statevector(np.eye(2 ** 15, 1))\n latex_representation = state_to_latex(sv)\n self.assertEqual(latex_representation, \" |000000000000000\\\\rangle\")\n\n def test_number_to_latex_terms(self):\n \"\"\"Test conversions of complex numbers to latex terms\"\"\"\n\n cases = [\n ([1 - 8e-17, 0], [\"\", None]),\n ([0, -1], [None, \"-\"]),\n ([0, 1], [None, \"\"]),\n ([0, 1j], [None, \"i\"]),\n ([-1, 1], [\"-\", \"+\"]),\n ([0, 1j], [None, \"i\"]),\n ([-1, 1j], [\"-\", \"+i\"]),\n ([1e-16 + 1j], [\"i\"]),\n ([-1 + 1e-16 * 1j], [\"-\"]),\n ([-1, -1 - 1j], [\"-\", \"+ (-1 - i)\"]),\n ([np.sqrt(2) / 2, np.sqrt(2) / 2], [\"\\\\frac{\\\\sqrt{2}}{2}\", \"+\\\\frac{\\\\sqrt{2}}{2}\"]),\n ([1 + np.sqrt(2)], [\"(1 + \\\\sqrt{2})\"]),\n ]\n for numbers, latex_terms in cases:\n terms = numbers_to_latex_terms(numbers)\n self.assertListEqual(terms, latex_terms)\n\n def test_statevector_draw_latex_regression(self):\n \"\"\"Test numerical rounding errors are not printed\"\"\"\n sv = Statevector(np.array([1 - 8e-17, 8.32667268e-17j]))\n latex_string = sv.draw(output=\"latex_source\")\n self.assertTrue(latex_string.startswith(\" |0\\\\rangle\"))\n self.assertNotIn(\"|1\\\\rangle\", latex_string)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nTools for building optimal circuits out of XX interactions.\n\nInputs:\n + A set of native XX operations, described as strengths.\n + A right-angled path, computed using the methods in `paths.py`.\n\nOutput:\n + A circuit which implements the target operation (expressed exactly as the exponential of\n `a XX + b YY + c ZZ`) using the native operations and local gates.\n\"\"\"\n\nfrom functools import reduce\nimport math\nfrom operator import itemgetter\n\nimport numpy as np\n\nfrom qiskit.circuit.quantumcircuit import QuantumCircuit\nfrom qiskit.circuit.library.standard_gates import RXXGate, RYYGate, RZGate\nfrom qiskit.exceptions import QiskitError\n\nfrom .paths import decomposition_hop\nfrom .utilities import EPSILON, safe_arccos\nfrom .weyl import (\n apply_reflection,\n apply_shift,\n canonical_rotation_circuit,\n reflection_options,\n shift_options,\n)\n\n\n# pylint:disable=invalid-name\ndef decompose_xxyy_into_xxyy_xx(a_target, b_target, a_source, b_source, interaction):\n \"\"\"\n Consumes a target canonical interaction CAN(a_target, b_target) and source interactions\n CAN(a1, b1), CAN(a2), then manufactures a circuit identity of the form\n\n CAN(a_target, b_target) = (Zr, Zs) CAN(a_source, b_source) (Zu, Zv) CAN(interaction) (Zx, Zy).\n\n Returns the 6-tuple (r, s, u, v, x, y).\n \"\"\"\n\n cplus, cminus = np.cos(a_source + b_source), np.cos(a_source - b_source)\n splus, sminus = np.sin(a_source + b_source), np.sin(a_source - b_source)\n ca, sa = np.cos(interaction), np.sin(interaction)\n\n uplusv = (\n 1\n / 2\n * safe_arccos(\n cminus ** 2 * ca ** 2 + sminus ** 2 * sa ** 2 - np.cos(a_target - b_target) ** 2,\n 2 * cminus * ca * sminus * sa,\n )\n )\n uminusv = (\n 1\n / 2\n * safe_arccos(\n cplus ** 2 * ca ** 2 + splus ** 2 * sa ** 2 - np.cos(a_target + b_target) ** 2,\n 2 * cplus * ca * splus * sa,\n )\n )\n\n u, v = (uplusv + uminusv) / 2, (uplusv - uminusv) / 2\n\n # NOTE: the target matrix is phase-free\n middle_matrix = reduce(\n np.dot,\n [\n RXXGate(2 * a_source).to_matrix() @ RYYGate(2 * b_source).to_matrix(),\n np.kron(RZGate(2 * u).to_matrix(), RZGate(2 * v).to_matrix()),\n RXXGate(2 * interaction).to_matrix(),\n ],\n )\n\n phase_solver = np.array(\n [\n [\n 1 / 4,\n 1 / 4,\n 1 / 4,\n 1 / 4,\n ],\n [\n 1 / 4,\n -1 / 4,\n -1 / 4,\n 1 / 4,\n ],\n [\n 1 / 4,\n 1 / 4,\n -1 / 4,\n -1 / 4,\n ],\n [\n 1 / 4,\n -1 / 4,\n 1 / 4,\n -1 / 4,\n ],\n ]\n )\n inner_phases = [\n np.angle(middle_matrix[0, 0]),\n np.angle(middle_matrix[1, 1]),\n np.angle(middle_matrix[1, 2]) + np.pi / 2,\n np.angle(middle_matrix[0, 3]) + np.pi / 2,\n ]\n r, s, x, y = np.dot(phase_solver, inner_phases)\n\n # If there's a phase discrepancy, need to conjugate by an extra Z/2 (x) Z/2.\n generated_matrix = reduce(\n np.dot,\n [\n np.kron(RZGate(2 * r).to_matrix(), RZGate(2 * s).to_matrix()),\n middle_matrix,\n np.kron(RZGate(2 * x).to_matrix(), RZGate(2 * y).to_matrix()),\n ],\n )\n if (abs(np.angle(generated_matrix[3, 0]) - np.pi / 2) < 0.01 and a_target > b_target) or (\n abs(np.angle(generated_matrix[3, 0]) + np.pi / 2) < 0.01 and a_target < b_target\n ):\n x += np.pi / 4\n y += np.pi / 4\n r -= np.pi / 4\n s -= np.pi / 4\n\n return r, s, u, v, x, y\n\n\ndef xx_circuit_step(source, strength, target, embodiment):\n \"\"\"\n Builds a single step in an XX-based circuit.\n\n `source` and `target` are positive canonical coordinates; `strength` is the interaction strength\n at this step in the circuit as a canonical coordinate (so that CX = RZX(pi/2) corresponds to\n pi/4); and `embodiment` is a Qiskit circuit which enacts the canonical gate of the prescribed\n interaction `strength`.\n \"\"\"\n\n permute_source_for_overlap, permute_target_for_overlap = None, None\n\n # apply all possible reflections, shifts to the source\n for source_reflection_name in reflection_options:\n reflected_source_coord, source_reflection, reflection_phase_shift = apply_reflection(\n source_reflection_name, source\n )\n for source_shift_name in shift_options:\n shifted_source_coord, source_shift, shift_phase_shift = apply_shift(\n source_shift_name, reflected_source_coord\n )\n\n # check for overlap, back out permutation\n source_shared, target_shared = None, None\n for i, j in [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)]:\n\n if (\n abs(np.mod(abs(shifted_source_coord[i] - target[j]), np.pi)) < EPSILON\n or abs(np.mod(abs(shifted_source_coord[i] - target[j]), np.pi) - np.pi)\n < EPSILON\n ):\n source_shared, target_shared = i, j\n break\n if source_shared is None:\n continue\n\n # pick out the other coordinates\n source_first, source_second = (x for x in [0, 1, 2] if x != source_shared)\n target_first, target_second = (x for x in [0, 1, 2] if x != target_shared)\n\n # check for arccos validity\n r, s, u, v, x, y = decompose_xxyy_into_xxyy_xx(\n float(target[target_first]),\n float(target[target_second]),\n float(shifted_source_coord[source_first]),\n float(shifted_source_coord[source_second]),\n float(strength),\n )\n if any(math.isnan(val) for val in (r, s, u, v, x, y)):\n continue\n\n # OK: this combination of things works.\n # save the permutation which rotates the shared coordinate into ZZ.\n permute_source_for_overlap = canonical_rotation_circuit(source_first, source_second)\n permute_target_for_overlap = canonical_rotation_circuit(target_first, target_second)\n break\n\n if permute_source_for_overlap is not None:\n break\n\n if permute_source_for_overlap is None:\n raise QiskitError(\n \"Error during RZX decomposition: Could not find a suitable Weyl \"\n f\"reflection to match {source} to {target} along {strength}.\"\n )\n\n prefix_circuit, affix_circuit = QuantumCircuit(2), QuantumCircuit(2)\n\n # the basic formula we're trying to work with is:\n # target^p_t_f_o =\n # rs * (source^s_reflection * s_shift)^p_s_f_o * uv * operation * xy\n # but we're rearranging it into the form\n # target = affix source prefix\n # and computing just the prefix / affix circuits.\n\n # the outermost prefix layer comes from the (inverse) target permutation.\n prefix_circuit.compose(permute_target_for_overlap.inverse(), inplace=True)\n # the middle prefix layer comes from the local Z rolls.\n prefix_circuit.rz(2 * x, [0])\n prefix_circuit.rz(2 * y, [1])\n prefix_circuit.compose(embodiment, inplace=True)\n prefix_circuit.rz(2 * u, [0])\n prefix_circuit.rz(2 * v, [1])\n # the innermost prefix layer is source_reflection, shifted by source_shift,\n # finally conjugated by p_s_f_o.\n prefix_circuit.compose(permute_source_for_overlap, inplace=True)\n prefix_circuit.compose(source_reflection, inplace=True)\n prefix_circuit.global_phase += -np.log(reflection_phase_shift).imag\n prefix_circuit.global_phase += -np.log(shift_phase_shift).imag\n\n # the affix circuit is constructed in reverse.\n # first (i.e., innermost), we install the other half of the source transformations and p_s_f_o.\n affix_circuit.compose(source_reflection.inverse(), inplace=True)\n affix_circuit.compose(source_shift, inplace=True)\n affix_circuit.compose(permute_source_for_overlap.inverse(), inplace=True)\n # then, the other local rolls in the middle.\n affix_circuit.rz(2 * r, [0])\n affix_circuit.rz(2 * s, [1])\n # finally, the other half of the p_t_f_o conjugation.\n affix_circuit.compose(permute_target_for_overlap, inplace=True)\n\n return {\"prefix_circuit\": prefix_circuit, \"affix_circuit\": affix_circuit}\n\n\ndef canonical_xx_circuit(target, strength_sequence, basis_embodiments):\n \"\"\"\n Assembles a Qiskit circuit from a specified `strength_sequence` of XX-type interactions which\n emulates the canonical gate at canonical coordinate `target`. The circuits supplied by\n `basis_embodiments` are used to instantiate the individual XX actions.\n\n NOTE: The elements of `strength_sequence` are expected to be normalized so that np.pi/2\n corresponds to RZX(np.pi/2) = CX; `target` is taken to be a positive canonical coordinate;\n and `basis_embodiments` maps `strength_sequence` elements to circuits which instantiate\n these gates.\n \"\"\"\n # empty decompositions are easy!\n if len(strength_sequence) == 0:\n return QuantumCircuit(2)\n\n # assemble the prefix / affix circuits\n prefix_circuit, affix_circuit = QuantumCircuit(2), QuantumCircuit(2)\n while len(strength_sequence) > 1:\n source = decomposition_hop(target, strength_sequence)\n strength = strength_sequence[-1]\n\n preceding_prefix_circuit, preceding_affix_circuit = itemgetter(\n \"prefix_circuit\", \"affix_circuit\"\n )(xx_circuit_step(source, strength / 2, target, basis_embodiments[strength]))\n\n prefix_circuit.compose(preceding_prefix_circuit, inplace=True)\n affix_circuit.compose(preceding_affix_circuit, inplace=True, front=True)\n\n target, strength_sequence = source, strength_sequence[:-1]\n\n circuit = prefix_circuit\n\n # lastly, deal with the \"leading\" gate.\n if target[0] <= np.pi / 4:\n circuit.compose(basis_embodiments[strength_sequence[0]], inplace=True)\n else:\n _, source_reflection, reflection_phase_shift = apply_reflection(\"reflect XX, YY\", [0, 0, 0])\n _, source_shift, shift_phase_shift = apply_shift(\"X shift\", [0, 0, 0])\n\n circuit.compose(source_reflection, inplace=True)\n circuit.compose(basis_embodiments[strength_sequence[0]], inplace=True)\n circuit.compose(source_reflection.inverse(), inplace=True)\n circuit.compose(source_shift, inplace=True)\n circuit.global_phase += -np.log(shift_phase_shift).imag\n circuit.global_phase += -np.log(reflection_phase_shift).imag\n\n circuit.compose(affix_circuit, inplace=True)\n\n return circuit\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=invalid-name\n\n\"\"\"Tests for Operator matrix linear operator class.\"\"\"\n\nimport unittest\nimport logging\nimport copy\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport scipy.linalg as la\n\nfrom qiskit import QiskitError\nfrom qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\nfrom qiskit.circuit.library import HGate, CHGate, CXGate, QFT\nfrom qiskit.test import QiskitTestCase\nfrom qiskit.quantum_info.operators.operator import Operator\nfrom qiskit.quantum_info.operators.predicates import matrix_equal\n\nlogger = logging.getLogger(__name__)\n\n\nclass OperatorTestCase(QiskitTestCase):\n \"\"\"Test utils for Operator\"\"\"\n\n # Pauli-matrix unitaries\n UI = np.eye(2)\n UX = np.array([[0, 1], [1, 0]])\n UY = np.array([[0, -1j], [1j, 0]])\n UZ = np.diag([1, -1])\n UH = np.array([[1, 1], [1, -1]]) / np.sqrt(2)\n\n @classmethod\n def rand_rho(cls, n):\n \"\"\"Return random density matrix\"\"\"\n seed = np.random.randint(0, np.iinfo(np.int32).max)\n logger.debug(\"rand_rho default_rng seeded with seed=%s\", seed)\n rng = np.random.default_rng(seed)\n\n psi = rng.random(n) + 1j * rng.random(n)\n rho = np.outer(psi, psi.conj())\n rho /= np.trace(rho)\n return rho\n\n @classmethod\n def rand_matrix(cls, rows, cols=None, real=False):\n \"\"\"Return a random matrix.\"\"\"\n seed = np.random.randint(0, np.iinfo(np.int32).max)\n logger.debug(\"rand_matrix default_rng seeded with seed=%s\", seed)\n rng = np.random.default_rng(seed)\n\n if cols is None:\n cols = rows\n if real:\n return rng.random(size=(rows, cols))\n return rng.random(size=(rows, cols)) + 1j * rng.random(size=(rows, cols))\n\n def simple_circuit_no_measure(self):\n \"\"\"Return a unitary circuit and the corresponding unitary array.\"\"\"\n qr = QuantumRegister(3)\n circ = QuantumCircuit(qr)\n circ.h(qr[0])\n circ.x(qr[1])\n circ.ry(np.pi / 2, qr[2])\n y90 = (1 / np.sqrt(2)) * np.array([[1, -1], [1, 1]])\n target = Operator(np.kron(y90, np.kron(self.UX, self.UH)))\n return circ, target\n\n def simple_circuit_with_measure(self):\n \"\"\"Return a unitary circuit with measurement.\"\"\"\n qr = QuantumRegister(2)\n cr = ClassicalRegister(2)\n circ = QuantumCircuit(qr, cr)\n circ.h(qr[0])\n circ.x(qr[1])\n circ.measure(qr, cr)\n return circ\n\n\nclass TestOperator(OperatorTestCase):\n \"\"\"Tests for Operator linear operator class.\"\"\"\n\n def test_init_array_qubit(self):\n \"\"\"Test subsystem initialization from N-qubit array.\"\"\"\n # Test automatic inference of qubit subsystems\n mat = self.rand_matrix(8, 8)\n op = Operator(mat)\n assert_allclose(op.data, mat)\n self.assertEqual(op.dim, (8, 8))\n self.assertEqual(op.input_dims(), (2, 2, 2))\n self.assertEqual(op.output_dims(), (2, 2, 2))\n self.assertEqual(op.num_qubits, 3)\n\n op = Operator(mat, input_dims=8, output_dims=8)\n assert_allclose(op.data, mat)\n self.assertEqual(op.dim, (8, 8))\n self.assertEqual(op.input_dims(), (2, 2, 2))\n self.assertEqual(op.output_dims(), (2, 2, 2))\n self.assertEqual(op.num_qubits, 3)\n\n def test_init_array(self):\n \"\"\"Test initialization from array.\"\"\"\n mat = np.eye(3)\n op = Operator(mat)\n assert_allclose(op.data, mat)\n self.assertEqual(op.dim, (3, 3))\n self.assertEqual(op.input_dims(), (3,))\n self.assertEqual(op.output_dims(), (3,))\n self.assertIsNone(op.num_qubits)\n\n mat = self.rand_matrix(2 * 3 * 4, 4 * 5)\n op = Operator(mat, input_dims=[4, 5], output_dims=[2, 3, 4])\n assert_allclose(op.data, mat)\n self.assertEqual(op.dim, (4 * 5, 2 * 3 * 4))\n self.assertEqual(op.input_dims(), (4, 5))\n self.assertEqual(op.output_dims(), (2, 3, 4))\n self.assertIsNone(op.num_qubits)\n\n def test_init_array_except(self):\n \"\"\"Test initialization exception from array.\"\"\"\n mat = self.rand_matrix(4, 4)\n self.assertRaises(QiskitError, Operator, mat, input_dims=[4, 2])\n self.assertRaises(QiskitError, Operator, mat, input_dims=[2, 4])\n self.assertRaises(QiskitError, Operator, mat, input_dims=5)\n\n def test_init_operator(self):\n \"\"\"Test initialization from Operator.\"\"\"\n op1 = Operator(self.rand_matrix(4, 4))\n op2 = Operator(op1)\n self.assertEqual(op1, op2)\n\n def test_circuit_init(self):\n \"\"\"Test initialization from a circuit.\"\"\"\n # Test tensor product of 1-qubit gates\n circuit = QuantumCircuit(3)\n circuit.h(0)\n circuit.x(1)\n circuit.ry(np.pi / 2, 2)\n op = Operator(circuit)\n y90 = (1 / np.sqrt(2)) * np.array([[1, -1], [1, 1]])\n target = np.kron(y90, np.kron(self.UX, self.UH))\n global_phase_equivalent = matrix_equal(op.data, target, ignore_phase=True)\n self.assertTrue(global_phase_equivalent)\n\n # Test decomposition of Controlled-Phase gate\n lam = np.pi / 4\n circuit = QuantumCircuit(2)\n circuit.cp(lam, 0, 1)\n op = Operator(circuit)\n target = np.diag([1, 1, 1, np.exp(1j * lam)])\n global_phase_equivalent = matrix_equal(op.data, target, ignore_phase=True)\n self.assertTrue(global_phase_equivalent)\n\n # Test decomposition of controlled-H gate\n circuit = QuantumCircuit(2)\n circuit.ch(0, 1)\n op = Operator(circuit)\n target = np.kron(self.UI, np.diag([1, 0])) + np.kron(self.UH, np.diag([0, 1]))\n global_phase_equivalent = matrix_equal(op.data, target, ignore_phase=True)\n self.assertTrue(global_phase_equivalent)\n\n def test_instruction_init(self):\n \"\"\"Test initialization from a circuit.\"\"\"\n gate = CXGate()\n op = Operator(gate).data\n target = gate.to_matrix()\n global_phase_equivalent = matrix_equal(op, target, ignore_phase=True)\n self.assertTrue(global_phase_equivalent)\n\n gate = CHGate()\n op = Operator(gate).data\n had = HGate().to_matrix()\n target = np.kron(had, np.diag([0, 1])) + np.kron(np.eye(2), np.diag([1, 0]))\n global_phase_equivalent = matrix_equal(op, target, ignore_phase=True)\n self.assertTrue(global_phase_equivalent)\n\n def test_circuit_init_except(self):\n \"\"\"Test initialization from circuit with measure raises exception.\"\"\"\n circuit = self.simple_circuit_with_measure()\n self.assertRaises(QiskitError, Operator, circuit)\n\n def test_equal(self):\n \"\"\"Test __eq__ method\"\"\"\n mat = self.rand_matrix(2, 2, real=True)\n self.assertEqual(Operator(np.array(mat, dtype=complex)), Operator(mat))\n mat = self.rand_matrix(4, 4)\n self.assertEqual(Operator(mat.tolist()), Operator(mat))\n\n def test_data(self):\n \"\"\"Test Operator representation string property.\"\"\"\n mat = self.rand_matrix(2, 2)\n op = Operator(mat)\n assert_allclose(mat, op.data)\n\n def test_dim(self):\n \"\"\"Test Operator dim property.\"\"\"\n mat = self.rand_matrix(4, 4)\n self.assertEqual(Operator(mat).dim, (4, 4))\n self.assertEqual(Operator(mat, input_dims=[4], output_dims=[4]).dim, (4, 4))\n self.assertEqual(Operator(mat, input_dims=[2, 2], output_dims=[2, 2]).dim, (4, 4))\n\n def test_input_dims(self):\n \"\"\"Test Operator input_dims method.\"\"\"\n op = Operator(self.rand_matrix(2 * 3 * 4, 4 * 5), input_dims=[4, 5], output_dims=[2, 3, 4])\n self.assertEqual(op.input_dims(), (4, 5))\n self.assertEqual(op.input_dims(qargs=[0, 1]), (4, 5))\n self.assertEqual(op.input_dims(qargs=[1, 0]), (5, 4))\n self.assertEqual(op.input_dims(qargs=[0]), (4,))\n self.assertEqual(op.input_dims(qargs=[1]), (5,))\n\n def test_output_dims(self):\n \"\"\"Test Operator output_dims method.\"\"\"\n op = Operator(self.rand_matrix(2 * 3 * 4, 4 * 5), input_dims=[4, 5], output_dims=[2, 3, 4])\n self.assertEqual(op.output_dims(), (2, 3, 4))\n self.assertEqual(op.output_dims(qargs=[0, 1, 2]), (2, 3, 4))\n self.assertEqual(op.output_dims(qargs=[2, 1, 0]), (4, 3, 2))\n self.assertEqual(op.output_dims(qargs=[2, 0, 1]), (4, 2, 3))\n self.assertEqual(op.output_dims(qargs=[0]), (2,))\n self.assertEqual(op.output_dims(qargs=[1]), (3,))\n self.assertEqual(op.output_dims(qargs=[2]), (4,))\n self.assertEqual(op.output_dims(qargs=[0, 2]), (2, 4))\n self.assertEqual(op.output_dims(qargs=[2, 0]), (4, 2))\n\n def test_reshape(self):\n \"\"\"Test Operator reshape method.\"\"\"\n op = Operator(self.rand_matrix(8, 8))\n reshaped1 = op.reshape(input_dims=[8], output_dims=[8])\n reshaped2 = op.reshape(input_dims=[4, 2], output_dims=[2, 4])\n self.assertEqual(op.output_dims(), (2, 2, 2))\n self.assertEqual(op.input_dims(), (2, 2, 2))\n self.assertEqual(reshaped1.output_dims(), (8,))\n self.assertEqual(reshaped1.input_dims(), (8,))\n self.assertEqual(reshaped2.output_dims(), (2, 4))\n self.assertEqual(reshaped2.input_dims(), (4, 2))\n\n def test_reshape_num_qubits(self):\n \"\"\"Test Operator reshape method with num_qubits.\"\"\"\n op = Operator(self.rand_matrix(8, 8), input_dims=(4, 2), output_dims=(2, 4))\n reshaped = op.reshape(num_qubits=3)\n self.assertEqual(reshaped.num_qubits, 3)\n self.assertEqual(reshaped.output_dims(), (2, 2, 2))\n self.assertEqual(reshaped.input_dims(), (2, 2, 2))\n\n def test_reshape_raise(self):\n \"\"\"Test Operator reshape method with invalid args.\"\"\"\n op = Operator(self.rand_matrix(3, 3))\n self.assertRaises(QiskitError, op.reshape, num_qubits=2)\n\n def test_copy(self):\n \"\"\"Test Operator copy method\"\"\"\n mat = np.eye(2)\n with self.subTest(\"Deep copy\"):\n orig = Operator(mat)\n cpy = orig.copy()\n cpy._data[0, 0] = 0.0\n self.assertFalse(cpy == orig)\n with self.subTest(\"Shallow copy\"):\n orig = Operator(mat)\n clone = copy.copy(orig)\n clone._data[0, 0] = 0.0\n self.assertTrue(clone == orig)\n\n def test_is_unitary(self):\n \"\"\"Test is_unitary method.\"\"\"\n # X-90 rotation\n X90 = la.expm(-1j * 0.5 * np.pi * np.array([[0, 1], [1, 0]]) / 2)\n self.assertTrue(Operator(X90).is_unitary())\n # Non-unitary should return false\n self.assertFalse(Operator([[1, 0], [0, 0]]).is_unitary())\n\n def test_to_operator(self):\n \"\"\"Test to_operator method.\"\"\"\n op1 = Operator(self.rand_matrix(4, 4))\n op2 = op1.to_operator()\n self.assertEqual(op1, op2)\n\n def test_conjugate(self):\n \"\"\"Test conjugate method.\"\"\"\n matr = self.rand_matrix(2, 4, real=True)\n mati = self.rand_matrix(2, 4, real=True)\n op = Operator(matr + 1j * mati)\n uni_conj = op.conjugate()\n self.assertEqual(uni_conj, Operator(matr - 1j * mati))\n\n def test_transpose(self):\n \"\"\"Test transpose method.\"\"\"\n matr = self.rand_matrix(2, 4, real=True)\n mati = self.rand_matrix(2, 4, real=True)\n op = Operator(matr + 1j * mati)\n uni_t = op.transpose()\n self.assertEqual(uni_t, Operator(matr.T + 1j * mati.T))\n\n def test_adjoint(self):\n \"\"\"Test adjoint method.\"\"\"\n matr = self.rand_matrix(2, 4, real=True)\n mati = self.rand_matrix(2, 4, real=True)\n op = Operator(matr + 1j * mati)\n uni_adj = op.adjoint()\n self.assertEqual(uni_adj, Operator(matr.T - 1j * mati.T))\n\n def test_compose_except(self):\n \"\"\"Test compose different dimension exception\"\"\"\n self.assertRaises(QiskitError, Operator(np.eye(2)).compose, Operator(np.eye(3)))\n self.assertRaises(QiskitError, Operator(np.eye(2)).compose, 2)\n\n def test_compose(self):\n \"\"\"Test compose method.\"\"\"\n\n op1 = Operator(self.UX)\n op2 = Operator(self.UY)\n\n targ = Operator(np.dot(self.UY, self.UX))\n self.assertEqual(op1.compose(op2), targ)\n self.assertEqual(op1 & op2, targ)\n\n targ = Operator(np.dot(self.UX, self.UY))\n self.assertEqual(op2.compose(op1), targ)\n self.assertEqual(op2 & op1, targ)\n\n def test_dot(self):\n \"\"\"Test dot method.\"\"\"\n op1 = Operator(self.UY)\n op2 = Operator(self.UX)\n\n targ = Operator(np.dot(self.UY, self.UX))\n self.assertEqual(op1.dot(op2), targ)\n\n targ = Operator(np.dot(self.UX, self.UY))\n self.assertEqual(op2.dot(op1), targ)\n\n def test_compose_front(self):\n \"\"\"Test front compose method.\"\"\"\n\n opYX = Operator(self.UY).compose(Operator(self.UX), front=True)\n matYX = np.dot(self.UY, self.UX)\n self.assertEqual(opYX, Operator(matYX))\n\n opXY = Operator(self.UX).compose(Operator(self.UY), front=True)\n matXY = np.dot(self.UX, self.UY)\n self.assertEqual(opXY, Operator(matXY))\n\n def test_compose_subsystem(self):\n \"\"\"Test subsystem compose method.\"\"\"\n # 3-qubit operator\n mat = self.rand_matrix(8, 8)\n mat_a = self.rand_matrix(2, 2)\n mat_b = self.rand_matrix(2, 2)\n mat_c = self.rand_matrix(2, 2)\n op = Operator(mat)\n op1 = Operator(mat_a)\n op2 = Operator(np.kron(mat_b, mat_a))\n op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a)))\n\n # op3 qargs=[0, 1, 2]\n targ = np.dot(np.kron(mat_c, np.kron(mat_b, mat_a)), mat)\n self.assertEqual(op.compose(op3, qargs=[0, 1, 2]), Operator(targ))\n self.assertEqual(op.compose(op3([0, 1, 2])), Operator(targ))\n self.assertEqual(op & op3([0, 1, 2]), Operator(targ))\n # op3 qargs=[2, 1, 0]\n targ = np.dot(np.kron(mat_a, np.kron(mat_b, mat_c)), mat)\n self.assertEqual(op.compose(op3, qargs=[2, 1, 0]), Operator(targ))\n self.assertEqual(op & op3([2, 1, 0]), Operator(targ))\n\n # op2 qargs=[0, 1]\n targ = np.dot(np.kron(np.eye(2), np.kron(mat_b, mat_a)), mat)\n self.assertEqual(op.compose(op2, qargs=[0, 1]), Operator(targ))\n self.assertEqual(op & op2([0, 1]), Operator(targ))\n # op2 qargs=[2, 0]\n targ = np.dot(np.kron(mat_a, np.kron(np.eye(2), mat_b)), mat)\n self.assertEqual(op.compose(op2, qargs=[2, 0]), Operator(targ))\n self.assertEqual(op & op2([2, 0]), Operator(targ))\n\n # op1 qargs=[0]\n targ = np.dot(np.kron(np.eye(4), mat_a), mat)\n self.assertEqual(op.compose(op1, qargs=[0]), Operator(targ))\n self.assertEqual(op & op1([0]), Operator(targ))\n # op1 qargs=[1]\n targ = np.dot(np.kron(np.eye(2), np.kron(mat_a, np.eye(2))), mat)\n self.assertEqual(op.compose(op1, qargs=[1]), Operator(targ))\n self.assertEqual(op & op1([1]), Operator(targ))\n # op1 qargs=[2]\n targ = np.dot(np.kron(mat_a, np.eye(4)), mat)\n self.assertEqual(op.compose(op1, qargs=[2]), Operator(targ))\n self.assertEqual(op & op1([2]), Operator(targ))\n\n def test_dot_subsystem(self):\n \"\"\"Test subsystem dot method.\"\"\"\n # 3-qubit operator\n mat = self.rand_matrix(8, 8)\n mat_a = self.rand_matrix(2, 2)\n mat_b = self.rand_matrix(2, 2)\n mat_c = self.rand_matrix(2, 2)\n op = Operator(mat)\n op1 = Operator(mat_a)\n op2 = Operator(np.kron(mat_b, mat_a))\n op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a)))\n\n # op3 qargs=[0, 1, 2]\n targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a)))\n self.assertEqual(op.dot(op3, qargs=[0, 1, 2]), Operator(targ))\n self.assertEqual(op.dot(op3([0, 1, 2])), Operator(targ))\n # op3 qargs=[2, 1, 0]\n targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c)))\n self.assertEqual(op.dot(op3, qargs=[2, 1, 0]), Operator(targ))\n self.assertEqual(op.dot(op3([2, 1, 0])), Operator(targ))\n\n # op2 qargs=[0, 1]\n targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a)))\n self.assertEqual(op.dot(op2, qargs=[0, 1]), Operator(targ))\n self.assertEqual(op.dot(op2([0, 1])), Operator(targ))\n # op2 qargs=[2, 0]\n targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b)))\n self.assertEqual(op.dot(op2, qargs=[2, 0]), Operator(targ))\n self.assertEqual(op.dot(op2([2, 0])), Operator(targ))\n\n # op1 qargs=[0]\n targ = np.dot(mat, np.kron(np.eye(4), mat_a))\n self.assertEqual(op.dot(op1, qargs=[0]), Operator(targ))\n self.assertEqual(op.dot(op1([0])), Operator(targ))\n # op1 qargs=[1]\n targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2))))\n self.assertEqual(op.dot(op1, qargs=[1]), Operator(targ))\n self.assertEqual(op.dot(op1([1])), Operator(targ))\n # op1 qargs=[2]\n targ = np.dot(mat, np.kron(mat_a, np.eye(4)))\n self.assertEqual(op.dot(op1, qargs=[2]), Operator(targ))\n self.assertEqual(op.dot(op1([2])), Operator(targ))\n\n def test_compose_front_subsystem(self):\n \"\"\"Test subsystem front compose method.\"\"\"\n # 3-qubit operator\n mat = self.rand_matrix(8, 8)\n mat_a = self.rand_matrix(2, 2)\n mat_b = self.rand_matrix(2, 2)\n mat_c = self.rand_matrix(2, 2)\n op = Operator(mat)\n op1 = Operator(mat_a)\n op2 = Operator(np.kron(mat_b, mat_a))\n op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a)))\n\n # op3 qargs=[0, 1, 2]\n targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a)))\n self.assertEqual(op.compose(op3, qargs=[0, 1, 2], front=True), Operator(targ))\n # op3 qargs=[2, 1, 0]\n targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c)))\n self.assertEqual(op.compose(op3, qargs=[2, 1, 0], front=True), Operator(targ))\n\n # op2 qargs=[0, 1]\n targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a)))\n self.assertEqual(op.compose(op2, qargs=[0, 1], front=True), Operator(targ))\n # op2 qargs=[2, 0]\n targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b)))\n self.assertEqual(op.compose(op2, qargs=[2, 0], front=True), Operator(targ))\n\n # op1 qargs=[0]\n targ = np.dot(mat, np.kron(np.eye(4), mat_a))\n self.assertEqual(op.compose(op1, qargs=[0], front=True), Operator(targ))\n\n # op1 qargs=[1]\n targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2))))\n self.assertEqual(op.compose(op1, qargs=[1], front=True), Operator(targ))\n\n # op1 qargs=[2]\n targ = np.dot(mat, np.kron(mat_a, np.eye(4)))\n self.assertEqual(op.compose(op1, qargs=[2], front=True), Operator(targ))\n\n def test_power(self):\n \"\"\"Test power method.\"\"\"\n X90 = la.expm(-1j * 0.5 * np.pi * np.array([[0, 1], [1, 0]]) / 2)\n op = Operator(X90)\n self.assertEqual(op.power(2), Operator([[0, -1j], [-1j, 0]]))\n self.assertEqual(op.power(4), Operator(-1 * np.eye(2)))\n self.assertEqual(op.power(8), Operator(np.eye(2)))\n\n def test_expand(self):\n \"\"\"Test expand method.\"\"\"\n mat1 = self.UX\n mat2 = np.eye(3, dtype=complex)\n\n mat21 = np.kron(mat2, mat1)\n op21 = Operator(mat1).expand(Operator(mat2))\n self.assertEqual(op21.dim, (6, 6))\n assert_allclose(op21.data, Operator(mat21).data)\n\n mat12 = np.kron(mat1, mat2)\n op12 = Operator(mat2).expand(Operator(mat1))\n self.assertEqual(op12.dim, (6, 6))\n assert_allclose(op12.data, Operator(mat12).data)\n\n def test_tensor(self):\n \"\"\"Test tensor method.\"\"\"\n mat1 = self.UX\n mat2 = np.eye(3, dtype=complex)\n\n mat21 = np.kron(mat2, mat1)\n op21 = Operator(mat2).tensor(Operator(mat1))\n self.assertEqual(op21.dim, (6, 6))\n assert_allclose(op21.data, Operator(mat21).data)\n\n mat12 = np.kron(mat1, mat2)\n op12 = Operator(mat1).tensor(Operator(mat2))\n self.assertEqual(op12.dim, (6, 6))\n assert_allclose(op12.data, Operator(mat12).data)\n\n def test_power_except(self):\n \"\"\"Test power method raises exceptions if not square.\"\"\"\n op = Operator(self.rand_matrix(2, 3))\n # Non-integer power raises error\n self.assertRaises(QiskitError, op.power, 0.5)\n\n def test_add(self):\n \"\"\"Test add method.\"\"\"\n mat1 = self.rand_matrix(4, 4)\n mat2 = self.rand_matrix(4, 4)\n op1 = Operator(mat1)\n op2 = Operator(mat2)\n self.assertEqual(op1._add(op2), Operator(mat1 + mat2))\n self.assertEqual(op1 + op2, Operator(mat1 + mat2))\n self.assertEqual(op1 - op2, Operator(mat1 - mat2))\n\n def test_add_except(self):\n \"\"\"Test add method raises exceptions.\"\"\"\n op1 = Operator(self.rand_matrix(2, 2))\n op2 = Operator(self.rand_matrix(3, 3))\n self.assertRaises(QiskitError, op1._add, op2)\n\n def test_add_qargs(self):\n \"\"\"Test add method with qargs.\"\"\"\n mat = self.rand_matrix(8, 8)\n mat0 = self.rand_matrix(2, 2)\n mat1 = self.rand_matrix(2, 2)\n\n op = Operator(mat)\n op0 = Operator(mat0)\n op01 = Operator(np.kron(mat1, mat0))\n\n with self.subTest(msg=\"qargs=[0]\"):\n value = op + op0([0])\n target = op + Operator(np.kron(np.eye(4), mat0))\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[1]\"):\n value = op + op0([1])\n target = op + Operator(np.kron(np.kron(np.eye(2), mat0), np.eye(2)))\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[2]\"):\n value = op + op0([2])\n target = op + Operator(np.kron(mat0, np.eye(4)))\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[0, 1]\"):\n value = op + op01([0, 1])\n target = op + Operator(np.kron(np.eye(2), np.kron(mat1, mat0)))\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[1, 0]\"):\n value = op + op01([1, 0])\n target = op + Operator(np.kron(np.eye(2), np.kron(mat0, mat1)))\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[0, 2]\"):\n value = op + op01([0, 2])\n target = op + Operator(np.kron(mat1, np.kron(np.eye(2), mat0)))\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[2, 0]\"):\n value = op + op01([2, 0])\n target = op + Operator(np.kron(mat0, np.kron(np.eye(2), mat1)))\n self.assertEqual(value, target)\n\n def test_sub_qargs(self):\n \"\"\"Test subtract method with qargs.\"\"\"\n mat = self.rand_matrix(8, 8)\n mat0 = self.rand_matrix(2, 2)\n mat1 = self.rand_matrix(2, 2)\n\n op = Operator(mat)\n op0 = Operator(mat0)\n op01 = Operator(np.kron(mat1, mat0))\n\n with self.subTest(msg=\"qargs=[0]\"):\n value = op - op0([0])\n target = op - Operator(np.kron(np.eye(4), mat0))\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[1]\"):\n value = op - op0([1])\n target = op - Operator(np.kron(np.kron(np.eye(2), mat0), np.eye(2)))\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[2]\"):\n value = op - op0([2])\n target = op - Operator(np.kron(mat0, np.eye(4)))\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[0, 1]\"):\n value = op - op01([0, 1])\n target = op - Operator(np.kron(np.eye(2), np.kron(mat1, mat0)))\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[1, 0]\"):\n value = op - op01([1, 0])\n target = op - Operator(np.kron(np.eye(2), np.kron(mat0, mat1)))\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[0, 2]\"):\n value = op - op01([0, 2])\n target = op - Operator(np.kron(mat1, np.kron(np.eye(2), mat0)))\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[2, 0]\"):\n value = op - op01([2, 0])\n target = op - Operator(np.kron(mat0, np.kron(np.eye(2), mat1)))\n self.assertEqual(value, target)\n\n def test_multiply(self):\n \"\"\"Test multiply method.\"\"\"\n mat = self.rand_matrix(4, 4)\n val = np.exp(5j)\n op = Operator(mat)\n self.assertEqual(op._multiply(val), Operator(val * mat))\n self.assertEqual(val * op, Operator(val * mat))\n\n def test_multiply_except(self):\n \"\"\"Test multiply method raises exceptions.\"\"\"\n op = Operator(self.rand_matrix(2, 2))\n self.assertRaises(QiskitError, op._multiply, \"s\")\n self.assertRaises(QiskitError, op.__rmul__, \"s\")\n self.assertRaises(QiskitError, op._multiply, op)\n self.assertRaises(QiskitError, op.__rmul__, op)\n\n def test_negate(self):\n \"\"\"Test negate method\"\"\"\n mat = self.rand_matrix(4, 4)\n op = Operator(mat)\n self.assertEqual(-op, Operator(-1 * mat))\n\n def test_equiv(self):\n \"\"\"Test negate method\"\"\"\n mat = np.diag([1, np.exp(1j * np.pi / 2)])\n phase = np.exp(-1j * np.pi / 4)\n op = Operator(mat)\n self.assertTrue(op.equiv(phase * mat))\n self.assertTrue(op.equiv(Operator(phase * mat)))\n self.assertFalse(op.equiv(2 * mat))\n\n def test_reverse_qargs(self):\n \"\"\"Test reverse_qargs method\"\"\"\n circ1 = QFT(5)\n circ2 = circ1.reverse_bits()\n\n state1 = Operator(circ1)\n state2 = Operator(circ2)\n self.assertEqual(state1.reverse_qargs(), state2)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=invalid-name\n\n\"\"\"Tests for Choi quantum channel representation class.\"\"\"\n\nimport copy\nimport unittest\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom qiskit import QiskitError\nfrom qiskit.quantum_info.states import DensityMatrix\nfrom qiskit.quantum_info.operators.channel import Choi\nfrom .channel_test_case import ChannelTestCase\n\n\nclass TestChoi(ChannelTestCase):\n \"\"\"Tests for Choi channel representation.\"\"\"\n\n def test_init(self):\n \"\"\"Test initialization\"\"\"\n mat4 = np.eye(4) / 2.0\n chan = Choi(mat4)\n assert_allclose(chan.data, mat4)\n self.assertEqual(chan.dim, (2, 2))\n self.assertEqual(chan.num_qubits, 1)\n\n mat8 = np.eye(8) / 2.0\n chan = Choi(mat8, input_dims=4)\n assert_allclose(chan.data, mat8)\n self.assertEqual(chan.dim, (4, 2))\n self.assertIsNone(chan.num_qubits)\n\n chan = Choi(mat8, input_dims=2)\n assert_allclose(chan.data, mat8)\n self.assertEqual(chan.dim, (2, 4))\n self.assertIsNone(chan.num_qubits)\n\n mat16 = np.eye(16) / 4\n chan = Choi(mat16)\n assert_allclose(chan.data, mat16)\n self.assertEqual(chan.dim, (4, 4))\n self.assertEqual(chan.num_qubits, 2)\n\n # Wrong input or output dims should raise exception\n self.assertRaises(QiskitError, Choi, mat8, input_dims=[4], output_dims=[4])\n\n def test_circuit_init(self):\n \"\"\"Test initialization from a circuit.\"\"\"\n circuit, target = self.simple_circuit_no_measure()\n op = Choi(circuit)\n target = Choi(target)\n self.assertEqual(op, target)\n\n def test_circuit_init_except(self):\n \"\"\"Test initialization from circuit with measure raises exception.\"\"\"\n circuit = self.simple_circuit_with_measure()\n self.assertRaises(QiskitError, Choi, circuit)\n\n def test_equal(self):\n \"\"\"Test __eq__ method\"\"\"\n mat = self.rand_matrix(4, 4)\n self.assertEqual(Choi(mat), Choi(mat))\n\n def test_copy(self):\n \"\"\"Test copy method\"\"\"\n mat = np.eye(2)\n with self.subTest(\"Deep copy\"):\n orig = Choi(mat)\n cpy = orig.copy()\n cpy._data[0, 0] = 0.0\n self.assertFalse(cpy == orig)\n with self.subTest(\"Shallow copy\"):\n orig = Choi(mat)\n clone = copy.copy(orig)\n clone._data[0, 0] = 0.0\n self.assertTrue(clone == orig)\n\n def test_clone(self):\n \"\"\"Test clone method\"\"\"\n mat = np.eye(4)\n orig = Choi(mat)\n clone = copy.copy(orig)\n clone._data[0, 0] = 0.0\n self.assertTrue(clone == orig)\n\n def test_is_cptp(self):\n \"\"\"Test is_cptp method.\"\"\"\n self.assertTrue(Choi(self.depol_choi(0.25)).is_cptp())\n # Non-CPTP should return false\n self.assertFalse(Choi(1.25 * self.choiI - 0.25 * self.depol_choi(1)).is_cptp())\n\n def test_conjugate(self):\n \"\"\"Test conjugate method.\"\"\"\n # Test channel measures in Z basis and prepares in Y basis\n # Zp -> Yp, Zm -> Ym\n Zp, Zm = np.diag([1, 0]), np.diag([0, 1])\n Yp, Ym = np.array([[1, -1j], [1j, 1]]) / 2, np.array([[1, 1j], [-1j, 1]]) / 2\n chan = Choi(np.kron(Zp, Yp) + np.kron(Zm, Ym))\n # Conjugate channel swaps Y-basis states\n targ = Choi(np.kron(Zp, Ym) + np.kron(Zm, Yp))\n chan_conj = chan.conjugate()\n self.assertEqual(chan_conj, targ)\n\n def test_transpose(self):\n \"\"\"Test transpose method.\"\"\"\n # Test channel measures in Z basis and prepares in Y basis\n # Zp -> Yp, Zm -> Ym\n Zp, Zm = np.diag([1, 0]), np.diag([0, 1])\n Yp, Ym = np.array([[1, -1j], [1j, 1]]) / 2, np.array([[1, 1j], [-1j, 1]]) / 2\n chan = Choi(np.kron(Zp, Yp) + np.kron(Zm, Ym))\n # Transpose channel swaps basis\n targ = Choi(np.kron(Yp, Zp) + np.kron(Ym, Zm))\n chan_t = chan.transpose()\n self.assertEqual(chan_t, targ)\n\n def test_adjoint(self):\n \"\"\"Test adjoint method.\"\"\"\n # Test channel measures in Z basis and prepares in Y basis\n # Zp -> Yp, Zm -> Ym\n Zp, Zm = np.diag([1, 0]), np.diag([0, 1])\n Yp, Ym = np.array([[1, -1j], [1j, 1]]) / 2, np.array([[1, 1j], [-1j, 1]]) / 2\n chan = Choi(np.kron(Zp, Yp) + np.kron(Zm, Ym))\n # Ajoint channel swaps Y-basis elements and Z<->Y bases\n targ = Choi(np.kron(Ym, Zp) + np.kron(Yp, Zm))\n chan_adj = chan.adjoint()\n self.assertEqual(chan_adj, targ)\n\n def test_compose_except(self):\n \"\"\"Test compose different dimension exception\"\"\"\n self.assertRaises(QiskitError, Choi(np.eye(4)).compose, Choi(np.eye(8)))\n self.assertRaises(QiskitError, Choi(np.eye(4)).compose, 2)\n\n def test_compose(self):\n \"\"\"Test compose method.\"\"\"\n # UnitaryChannel evolution\n chan1 = Choi(self.choiX)\n chan2 = Choi(self.choiY)\n chan = chan1.compose(chan2)\n targ = Choi(self.choiZ)\n self.assertEqual(chan, targ)\n\n # 50% depolarizing channel\n chan1 = Choi(self.depol_choi(0.5))\n chan = chan1.compose(chan1)\n targ = Choi(self.depol_choi(0.75))\n self.assertEqual(chan, targ)\n\n # Measure and rotation\n Zp, Zm = np.diag([1, 0]), np.diag([0, 1])\n Xp, Xm = np.array([[1, 1], [1, 1]]) / 2, np.array([[1, -1], [-1, 1]]) / 2\n chan1 = Choi(np.kron(Zp, Xp) + np.kron(Zm, Xm))\n chan2 = Choi(self.choiX)\n # X-gate second does nothing\n targ = Choi(np.kron(Zp, Xp) + np.kron(Zm, Xm))\n self.assertEqual(chan1.compose(chan2), targ)\n self.assertEqual(chan1 & chan2, targ)\n # X-gate first swaps Z states\n targ = Choi(np.kron(Zm, Xp) + np.kron(Zp, Xm))\n self.assertEqual(chan2.compose(chan1), targ)\n self.assertEqual(chan2 & chan1, targ)\n\n # Compose different dimensions\n chan1 = Choi(np.eye(8) / 4, input_dims=2, output_dims=4)\n chan2 = Choi(np.eye(8) / 2, input_dims=4, output_dims=2)\n chan = chan1.compose(chan2)\n self.assertEqual(chan.dim, (2, 2))\n chan = chan2.compose(chan1)\n self.assertEqual(chan.dim, (4, 4))\n\n def test_dot(self):\n \"\"\"Test dot method.\"\"\"\n # UnitaryChannel evolution\n chan1 = Choi(self.choiX)\n chan2 = Choi(self.choiY)\n targ = Choi(self.choiZ)\n self.assertEqual(chan1.dot(chan2), targ)\n\n # 50% depolarizing channel\n chan1 = Choi(self.depol_choi(0.5))\n targ = Choi(self.depol_choi(0.75))\n self.assertEqual(chan1.dot(chan1), targ)\n\n # Measure and rotation\n Zp, Zm = np.diag([1, 0]), np.diag([0, 1])\n Xp, Xm = np.array([[1, 1], [1, 1]]) / 2, np.array([[1, -1], [-1, 1]]) / 2\n chan1 = Choi(np.kron(Zp, Xp) + np.kron(Zm, Xm))\n chan2 = Choi(self.choiX)\n # X-gate second does nothing\n targ = Choi(np.kron(Zp, Xp) + np.kron(Zm, Xm))\n self.assertEqual(chan2.dot(chan1), targ)\n # X-gate first swaps Z states\n targ = Choi(np.kron(Zm, Xp) + np.kron(Zp, Xm))\n self.assertEqual(chan1.dot(chan2), targ)\n\n # Compose different dimensions\n chan1 = Choi(np.eye(8) / 4, input_dims=2, output_dims=4)\n chan2 = Choi(np.eye(8) / 2, input_dims=4, output_dims=2)\n chan = chan1.dot(chan2)\n self.assertEqual(chan.dim, (4, 4))\n chan = chan2.dot(chan1)\n self.assertEqual(chan.dim, (2, 2))\n\n def test_compose_front(self):\n \"\"\"Test front compose method.\"\"\"\n # UnitaryChannel evolution\n chan1 = Choi(self.choiX)\n chan2 = Choi(self.choiY)\n chan = chan1.compose(chan2, front=True)\n targ = Choi(self.choiZ)\n self.assertEqual(chan, targ)\n\n # 50% depolarizing channel\n chan1 = Choi(self.depol_choi(0.5))\n chan = chan1.compose(chan1, front=True)\n targ = Choi(self.depol_choi(0.75))\n self.assertEqual(chan, targ)\n\n # Measure and rotation\n Zp, Zm = np.diag([1, 0]), np.diag([0, 1])\n Xp, Xm = np.array([[1, 1], [1, 1]]) / 2, np.array([[1, -1], [-1, 1]]) / 2\n chan1 = Choi(np.kron(Zp, Xp) + np.kron(Zm, Xm))\n chan2 = Choi(self.choiX)\n # X-gate second does nothing\n chan = chan2.compose(chan1, front=True)\n targ = Choi(np.kron(Zp, Xp) + np.kron(Zm, Xm))\n self.assertEqual(chan, targ)\n # X-gate first swaps Z states\n chan = chan1.compose(chan2, front=True)\n targ = Choi(np.kron(Zm, Xp) + np.kron(Zp, Xm))\n self.assertEqual(chan, targ)\n\n # Compose different dimensions\n chan1 = Choi(np.eye(8) / 4, input_dims=2, output_dims=4)\n chan2 = Choi(np.eye(8) / 2, input_dims=4, output_dims=2)\n chan = chan1.compose(chan2, front=True)\n self.assertEqual(chan.dim, (4, 4))\n chan = chan2.compose(chan1, front=True)\n self.assertEqual(chan.dim, (2, 2))\n\n def test_expand(self):\n \"\"\"Test expand method.\"\"\"\n rho0, rho1 = np.diag([1, 0]), np.diag([0, 1])\n rho_init = DensityMatrix(np.kron(rho0, rho0))\n chan1 = Choi(self.choiI)\n chan2 = Choi(self.choiX)\n\n # X \\otimes I\n chan = chan1.expand(chan2)\n rho_targ = DensityMatrix(np.kron(rho1, rho0))\n self.assertEqual(chan.dim, (4, 4))\n self.assertEqual(rho_init.evolve(chan), rho_targ)\n\n # I \\otimes X\n chan = chan2.expand(chan1)\n rho_targ = DensityMatrix(np.kron(rho0, rho1))\n self.assertEqual(chan.dim, (4, 4))\n self.assertEqual(rho_init.evolve(chan), rho_targ)\n\n # Completely depolarizing\n chan_dep = Choi(self.depol_choi(1))\n chan = chan_dep.expand(chan_dep)\n rho_targ = DensityMatrix(np.diag([1, 1, 1, 1]) / 4)\n self.assertEqual(chan.dim, (4, 4))\n self.assertEqual(rho_init.evolve(chan), rho_targ)\n\n def test_tensor(self):\n \"\"\"Test tensor method.\"\"\"\n rho0, rho1 = np.diag([1, 0]), np.diag([0, 1])\n rho_init = DensityMatrix(np.kron(rho0, rho0))\n chan1 = Choi(self.choiI)\n chan2 = Choi(self.choiX)\n\n # X \\otimes I\n rho_targ = DensityMatrix(np.kron(rho1, rho0))\n chan = chan2.tensor(chan1)\n self.assertEqual(chan.dim, (4, 4))\n self.assertEqual(rho_init.evolve(chan), rho_targ)\n chan = chan2 ^ chan1\n self.assertEqual(chan.dim, (4, 4))\n self.assertEqual(rho_init.evolve(chan), rho_targ)\n\n # I \\otimes X\n rho_targ = DensityMatrix(np.kron(rho0, rho1))\n chan = chan1.tensor(chan2)\n self.assertEqual(chan.dim, (4, 4))\n self.assertEqual(rho_init.evolve(chan), rho_targ)\n chan = chan1 ^ chan2\n self.assertEqual(chan.dim, (4, 4))\n self.assertEqual(rho_init.evolve(chan), rho_targ)\n\n # Completely depolarizing\n rho_targ = DensityMatrix(np.diag([1, 1, 1, 1]) / 4)\n chan_dep = Choi(self.depol_choi(1))\n chan = chan_dep.tensor(chan_dep)\n self.assertEqual(chan.dim, (4, 4))\n self.assertEqual(rho_init.evolve(chan), rho_targ)\n chan = chan_dep ^ chan_dep\n self.assertEqual(chan.dim, (4, 4))\n self.assertEqual(rho_init.evolve(chan), rho_targ)\n\n def test_power(self):\n \"\"\"Test power method.\"\"\"\n # 10% depolarizing channel\n p_id = 0.9\n depol = Choi(self.depol_choi(1 - p_id))\n\n # Compose 3 times\n p_id3 = p_id ** 3\n chan3 = depol.power(3)\n targ3 = Choi(self.depol_choi(1 - p_id3))\n self.assertEqual(chan3, targ3)\n\n def test_add(self):\n \"\"\"Test add method.\"\"\"\n mat1 = 0.5 * self.choiI\n mat2 = 0.5 * self.depol_choi(1)\n chan1 = Choi(mat1)\n chan2 = Choi(mat2)\n targ = Choi(mat1 + mat2)\n self.assertEqual(chan1._add(chan2), targ)\n self.assertEqual(chan1 + chan2, targ)\n targ = Choi(mat1 - mat2)\n self.assertEqual(chan1 - chan2, targ)\n\n def test_add_qargs(self):\n \"\"\"Test add method with qargs.\"\"\"\n mat = self.rand_matrix(8 ** 2, 8 ** 2)\n mat0 = self.rand_matrix(4, 4)\n mat1 = self.rand_matrix(4, 4)\n\n op = Choi(mat)\n op0 = Choi(mat0)\n op1 = Choi(mat1)\n op01 = op1.tensor(op0)\n eye = Choi(self.choiI)\n\n with self.subTest(msg=\"qargs=[0]\"):\n value = op + op0([0])\n target = op + eye.tensor(eye).tensor(op0)\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[1]\"):\n value = op + op0([1])\n target = op + eye.tensor(op0).tensor(eye)\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[2]\"):\n value = op + op0([2])\n target = op + op0.tensor(eye).tensor(eye)\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[0, 1]\"):\n value = op + op01([0, 1])\n target = op + eye.tensor(op1).tensor(op0)\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[1, 0]\"):\n value = op + op01([1, 0])\n target = op + eye.tensor(op0).tensor(op1)\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[0, 2]\"):\n value = op + op01([0, 2])\n target = op + op1.tensor(eye).tensor(op0)\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[2, 0]\"):\n value = op + op01([2, 0])\n target = op + op0.tensor(eye).tensor(op1)\n self.assertEqual(value, target)\n\n def test_sub_qargs(self):\n \"\"\"Test subtract method with qargs.\"\"\"\n mat = self.rand_matrix(8 ** 2, 8 ** 2)\n mat0 = self.rand_matrix(4, 4)\n mat1 = self.rand_matrix(4, 4)\n\n op = Choi(mat)\n op0 = Choi(mat0)\n op1 = Choi(mat1)\n op01 = op1.tensor(op0)\n eye = Choi(self.choiI)\n\n with self.subTest(msg=\"qargs=[0]\"):\n value = op - op0([0])\n target = op - eye.tensor(eye).tensor(op0)\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[1]\"):\n value = op - op0([1])\n target = op - eye.tensor(op0).tensor(eye)\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[2]\"):\n value = op - op0([2])\n target = op - op0.tensor(eye).tensor(eye)\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[0, 1]\"):\n value = op - op01([0, 1])\n target = op - eye.tensor(op1).tensor(op0)\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[1, 0]\"):\n value = op - op01([1, 0])\n target = op - eye.tensor(op0).tensor(op1)\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[0, 2]\"):\n value = op - op01([0, 2])\n target = op - op1.tensor(eye).tensor(op0)\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"qargs=[2, 0]\"):\n value = op - op01([2, 0])\n target = op - op0.tensor(eye).tensor(op1)\n self.assertEqual(value, target)\n\n def test_add_except(self):\n \"\"\"Test add method raises exceptions.\"\"\"\n chan1 = Choi(self.choiI)\n chan2 = Choi(np.eye(8))\n self.assertRaises(QiskitError, chan1._add, chan2)\n self.assertRaises(QiskitError, chan1._add, 5)\n\n def test_multiply(self):\n \"\"\"Test multiply method.\"\"\"\n chan = Choi(self.choiI)\n val = 0.5\n targ = Choi(val * self.choiI)\n self.assertEqual(chan._multiply(val), targ)\n self.assertEqual(val * chan, targ)\n\n def test_multiply_except(self):\n \"\"\"Test multiply method raises exceptions.\"\"\"\n chan = Choi(self.choiI)\n self.assertRaises(QiskitError, chan._multiply, \"s\")\n self.assertRaises(QiskitError, chan.__rmul__, \"s\")\n self.assertRaises(QiskitError, chan._multiply, chan)\n self.assertRaises(QiskitError, chan.__rmul__, chan)\n\n def test_negate(self):\n \"\"\"Test negate method\"\"\"\n chan = Choi(self.choiI)\n targ = Choi(-1 * self.choiI)\n self.assertEqual(-chan, targ)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2019, 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n# =============================================================================\n\n\"\"\" Test Quantum Gradient Framework \"\"\"\n\nimport unittest\nfrom test.python.opflow import QiskitOpflowTestCase\nfrom itertools import product\nimport numpy as np\nfrom ddt import ddt, data, idata, unpack\n\ntry:\n import jax.numpy as jnp\n\n _HAS_JAX = True\nexcept ImportError:\n _HAS_JAX = False\n\nfrom qiskit import QuantumCircuit, QuantumRegister, BasicAer\nfrom qiskit.test import slow_test\nfrom qiskit.utils import QuantumInstance\nfrom qiskit.exceptions import MissingOptionalLibraryError\nfrom qiskit.utils import algorithm_globals\nfrom qiskit.algorithms import VQE\nfrom qiskit.algorithms.optimizers import CG\nfrom qiskit.opflow import (\n I,\n X,\n Y,\n Z,\n StateFn,\n CircuitStateFn,\n ListOp,\n CircuitSampler,\n TensoredOp,\n SummedOp,\n)\nfrom qiskit.opflow.gradients import Gradient, NaturalGradient, Hessian\nfrom qiskit.opflow.gradients.qfi import QFI\nfrom qiskit.opflow.gradients.circuit_qfis import LinCombFull, OverlapBlockDiag, OverlapDiag\nfrom qiskit.circuit import Parameter\nfrom qiskit.circuit import ParameterVector\nfrom qiskit.circuit.library import RealAmplitudes, EfficientSU2\n\n\n@ddt\nclass TestGradients(QiskitOpflowTestCase):\n \"\"\"Test Qiskit Gradient Framework\"\"\"\n\n def setUp(self):\n super().setUp()\n algorithm_globals.random_seed = 50\n\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_gradient_p(self, method):\n \"\"\"Test the state gradient for p\n |psi> = 1/sqrt(2)[[1, exp(ia)]]\n Tr(|psi><psi|Z) = 0\n Tr(|psi><psi|X) = cos(a)\n d<H>/da = - 0.5 sin(a)\n \"\"\"\n ham = 0.5 * X - 1 * Z\n a = Parameter(\"a\")\n params = a\n q = QuantumRegister(1)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.p(a, q[0])\n op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.0)\n\n state_grad = Gradient(grad_method=method).convert(operator=op, params=params)\n values_dict = [{a: np.pi / 4}, {a: 0}, {a: np.pi / 2}]\n correct_values = [-0.5 / np.sqrt(2), 0, -0.5]\n\n for i, value_dict in enumerate(values_dict):\n np.testing.assert_array_almost_equal(\n state_grad.assign_parameters(value_dict).eval(), correct_values[i], decimal=1\n )\n\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_gradient_u(self, method):\n \"\"\"Test the state gradient for U\n Tr(|psi><psi|Z) = - 0.5 sin(a)cos(c)\n Tr(|psi><psi|X) = cos^2(a/2) cos(b+c) - sin^2(a/2) cos(b-c)\n \"\"\"\n\n ham = 0.5 * X - 1 * Z\n a = Parameter(\"a\")\n b = Parameter(\"b\")\n c = Parameter(\"c\")\n\n q = QuantumRegister(1)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.u(a, b, c, q[0])\n\n op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.0)\n params = [a, b, c]\n state_grad = Gradient(grad_method=method).convert(operator=op, params=params)\n values_dict = [{a: np.pi / 4, b: 0, c: 0}, {a: np.pi / 4, b: np.pi / 4, c: np.pi / 4}]\n correct_values = [[0.3536, 0, 0], [0.3232, -0.42678, -0.92678]]\n for i, value_dict in enumerate(values_dict):\n np.testing.assert_array_almost_equal(\n state_grad.assign_parameters(value_dict).eval(), correct_values[i], decimal=1\n )\n\n # Tr(|psi><psi|Z) = - 0.5 sin(a)cos(c)\n # Tr(|psi><psi|X) = cos^2(a/2) cos(b+c) - sin^2(a/2) cos(b-c)\n # dTr(|psi><psi|H)/da = 0.5(cos(2a)) + 0.5()\n\n q = QuantumRegister(1)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.u(a, a, a, q[0])\n\n op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.0)\n params = [a]\n state_grad = Gradient(grad_method=method).convert(operator=op, params=params)\n values_dict = [{a: np.pi / 4}, {a: np.pi / 2}]\n correct_values = [[-1.03033], [-1]]\n for i, value_dict in enumerate(values_dict):\n np.testing.assert_array_almost_equal(\n state_grad.assign_parameters(value_dict).eval(), correct_values[i], decimal=1\n )\n\n @data(\"lin_comb\", \"param_shift\")\n def test_gradient_efficient_su2(self, method):\n \"\"\"Test the state gradient for EfficientSU2\"\"\"\n observable = SummedOp(\n [\n 0.2252 * (I ^ I),\n 0.5716 * (Z ^ Z),\n 0.3435 * (I ^ Z),\n -0.4347 * (Z ^ I),\n 0.091 * (Y ^ Y),\n 0.091 * (X ^ X),\n ]\n ).reduce()\n\n d = 2\n ansatz = EfficientSU2(observable.num_qubits, reps=d)\n\n # Define a set of initial parameters\n parameters = ansatz.ordered_parameters\n\n operator = ~StateFn(observable) @ StateFn(ansatz)\n\n values_dict = [\n {param: np.pi / 4 for param in parameters},\n {param: np.pi / 2 for param in parameters},\n ]\n correct_values = [\n [\n -0.38617868191914206 + 0j,\n -0.014055349300198364 + 0j,\n -0.06385049040183734 + 0j,\n 0.13620629212619334 + 0j,\n -0.15180743339043595 + 0j,\n -0.2378393653877069 + 0j,\n 0.0024060546876464237 + 0j,\n 0.09977051760912459 + 0j,\n 0.40357721595080603 + 0j,\n 0.010453846462186653 + 0j,\n -0.04578581127401049 + 0j,\n 0.04578581127401063 + 0j,\n ],\n [\n 0.4346999999999997 + 0j,\n 0.0,\n 0.0,\n 0.6625999999999991 + 0j,\n 0.0,\n 0.0,\n -0.34349999999999986 + 0j,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ],\n ]\n\n state_grad = Gradient(method).convert(operator, parameters)\n for i, value_dict in enumerate(values_dict):\n np.testing.assert_array_almost_equal(\n state_grad.assign_parameters(value_dict).eval(), correct_values[i], decimal=1\n )\n\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_gradient_rxx(self, method):\n \"\"\"Test the state gradient for XX rotation\"\"\"\n ham = TensoredOp([Z, X])\n a = Parameter(\"a\")\n\n q = QuantumRegister(2)\n qc = QuantumCircuit(q)\n qc.h(q[0])\n qc.rxx(a, q[0], q[1])\n\n op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.0)\n params = [a]\n state_grad = Gradient(grad_method=method).convert(operator=op, params=params)\n values_dict = [{a: np.pi / 4}, {a: np.pi / 2}]\n correct_values = [[-0.707], [-1.0]]\n for i, value_dict in enumerate(values_dict):\n np.testing.assert_array_almost_equal(\n state_grad.assign_parameters(value_dict).eval(), correct_values[i], decimal=1\n )\n\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_gradient_ryy(self, method):\n \"\"\"Test the state gradient for YY rotation\"\"\"\n alpha = Parameter(\"alpha\")\n ham = TensoredOp([Y, alpha * Y])\n a = Parameter(\"a\")\n\n q = QuantumRegister(2)\n qc = QuantumCircuit(q)\n qc.ryy(a, q[0], q[1])\n\n op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.0)\n state_grad = Gradient(grad_method=method).convert(operator=op, params=a)\n values_dict = [{a: np.pi / 8}, {a: np.pi}]\n correct_values = [[0], [0]]\n for i, value_dict in enumerate(values_dict):\n value_dict[alpha] = 1.0\n np.testing.assert_array_almost_equal(\n state_grad.assign_parameters(value_dict).eval(), correct_values[i], decimal=1\n )\n\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_gradient_rzz(self, method):\n \"\"\"Test the state gradient for ZZ rotation\"\"\"\n ham = Z ^ X\n a = Parameter(\"a\")\n\n q = QuantumRegister(2)\n qc = QuantumCircuit(q)\n qc.h(q[0])\n qc.rzz(a, q[0], q[1])\n\n op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.0)\n params = [a]\n state_grad = Gradient(grad_method=method).convert(operator=op, params=params)\n values_dict = [{a: np.pi / 4}, {a: np.pi / 2}]\n correct_values = [[-0.707], [-1.0]]\n for i, value_dict in enumerate(values_dict):\n np.testing.assert_array_almost_equal(\n state_grad.assign_parameters(value_dict).eval(), correct_values[i], decimal=1\n )\n\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_gradient_rzx(self, method):\n \"\"\"Test the state gradient for ZX rotation\"\"\"\n ham = Z ^ Z\n a = Parameter(\"a\")\n\n q = QuantumRegister(2)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.rzx(a, q[0], q[1])\n\n op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.0)\n params = [a]\n state_grad = Gradient(grad_method=method).convert(operator=op, params=params)\n values_dict = [{a: np.pi / 8}, {a: np.pi / 2}]\n correct_values = [[0.0], [0.0]]\n for i, value_dict in enumerate(values_dict):\n np.testing.assert_array_almost_equal(\n state_grad.assign_parameters(value_dict).eval(), correct_values[i], decimal=1\n )\n\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_state_gradient1(self, method):\n \"\"\"Test the state gradient\n\n Tr(|psi><psi|Z) = sin(a)sin(b)\n Tr(|psi><psi|X) = cos(a)\n d<H>/da = - 0.5 sin(a) - 1 cos(a)sin(b)\n d<H>/db = - 1 sin(a)cos(b)\n \"\"\"\n\n ham = 0.5 * X - 1 * Z\n a = Parameter(\"a\")\n b = Parameter(\"b\")\n params = [a, b]\n\n q = QuantumRegister(1)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.rz(params[0], q[0])\n qc.rx(params[1], q[0])\n op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.0)\n\n state_grad = Gradient(grad_method=method).convert(operator=op, params=params)\n values_dict = [\n {a: np.pi / 4, b: np.pi},\n {params[0]: np.pi / 4, params[1]: np.pi / 4},\n {params[0]: np.pi / 2, params[1]: np.pi / 4},\n ]\n correct_values = [\n [-0.5 / np.sqrt(2), 1 / np.sqrt(2)],\n [-0.5 / np.sqrt(2) - 0.5, -1 / 2.0],\n [-0.5, -1 / np.sqrt(2)],\n ]\n\n for i, value_dict in enumerate(values_dict):\n np.testing.assert_array_almost_equal(\n state_grad.assign_parameters(value_dict).eval(), correct_values[i], decimal=1\n )\n\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_state_gradient2(self, method):\n \"\"\"Test the state gradient 2\n\n Tr(|psi><psi|Z) = sin(a)sin(a)\n Tr(|psi><psi|X) = cos(a)\n d<H>/da = - 0.5 sin(a) - 2 cos(a)sin(a)\n \"\"\"\n ham = 0.5 * X - 1 * Z\n a = Parameter(\"a\")\n # b = Parameter('b')\n params = [a]\n\n q = QuantumRegister(1)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.rz(a, q[0])\n qc.rx(a, q[0])\n op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.0)\n\n state_grad = Gradient(grad_method=method).convert(operator=op, params=params)\n values_dict = [{a: np.pi / 4}, {a: 0}, {a: np.pi / 2}]\n correct_values = [-1.353553, -0, -0.5]\n\n for i, value_dict in enumerate(values_dict):\n np.testing.assert_array_almost_equal(\n state_grad.assign_parameters(value_dict).eval(), correct_values[i], decimal=1\n )\n\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_state_gradient3(self, method):\n \"\"\"Test the state gradient 3\n\n Tr(|psi><psi|Z) = sin(a)sin(c(a)) = sin(a)sin(cos(a)+1)\n Tr(|psi><psi|X) = cos(a)\n d<H>/da = - 0.5 sin(a) - 1 cos(a)sin(cos(a)+1) + 1 sin^2(a)cos(cos(a)+1)\n \"\"\"\n ham = 0.5 * X - 1 * Z\n a = Parameter(\"a\")\n # b = Parameter('b')\n params = a\n c = np.cos(a) + 1\n q = QuantumRegister(1)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.rz(a, q[0])\n qc.rx(c, q[0])\n op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.0)\n\n state_grad = Gradient(grad_method=method).convert(operator=op, params=params)\n values_dict = [{a: np.pi / 4}, {a: 0}, {a: np.pi / 2}]\n correct_values = [-1.1220, -0.9093, 0.0403]\n for i, value_dict in enumerate(values_dict):\n np.testing.assert_array_almost_equal(\n state_grad.assign_parameters(value_dict).eval(), correct_values[i], decimal=1\n )\n\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_state_gradient4(self, method):\n \"\"\"Test the state gradient 4\n Tr(|psi><psi|ZX) = -cos(a)\n daTr(|psi><psi|ZX) = sin(a)\n \"\"\"\n\n ham = X ^ Z\n a = Parameter(\"a\")\n params = a\n\n q = QuantumRegister(2)\n qc = QuantumCircuit(q)\n qc.x(q[0])\n qc.h(q[1])\n qc.crz(a, q[0], q[1])\n op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.0)\n\n state_grad = Gradient(grad_method=method).convert(operator=op, params=params)\n values_dict = [{a: np.pi / 4}, {a: 0}, {a: np.pi / 2}]\n correct_values = [1 / np.sqrt(2), 0, 1]\n\n for i, value_dict in enumerate(values_dict):\n np.testing.assert_array_almost_equal(\n state_grad.assign_parameters(value_dict).eval(), correct_values[i], decimal=1\n )\n\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_state_gradient5(self, method):\n \"\"\"Test the state gradient\n\n Tr(|psi><psi|Z) = sin(a0)sin(a1)\n Tr(|psi><psi|X) = cos(a0)\n d<H>/da0 = - 0.5 sin(a0) - 1 cos(a0)sin(a1)\n d<H>/da1 = - 1 sin(a0)cos(a1)\n \"\"\"\n\n ham = 0.5 * X - 1 * Z\n a = ParameterVector(\"a\", 2)\n params = a\n\n q = QuantumRegister(1)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.rz(params[0], q[0])\n qc.rx(params[1], q[0])\n op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.0)\n\n state_grad = Gradient(grad_method=method).convert(operator=op, params=params)\n values_dict = [\n {a: [np.pi / 4, np.pi]},\n {a: [np.pi / 4, np.pi / 4]},\n {a: [np.pi / 2, np.pi / 4]},\n ]\n correct_values = [\n [-0.5 / np.sqrt(2), 1 / np.sqrt(2)],\n [-0.5 / np.sqrt(2) - 0.5, -1 / 2.0],\n [-0.5, -1 / np.sqrt(2)],\n ]\n\n for i, value_dict in enumerate(values_dict):\n np.testing.assert_array_almost_equal(\n state_grad.assign_parameters(value_dict).eval(), correct_values[i], decimal=1\n )\n\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_state_hessian(self, method):\n \"\"\"Test the state Hessian\n\n Tr(|psi><psi|Z) = sin(a)sin(b)\n Tr(|psi><psi|X) = cos(a)\n d^2<H>/da^2 = - 0.5 cos(a) + 1 sin(a)sin(b)\n d^2<H>/dbda = - 1 cos(a)cos(b)\n d^2<H>/dbda = - 1 cos(a)cos(b)\n d^2<H>/db^2 = + 1 sin(a)sin(b)\n \"\"\"\n\n ham = 0.5 * X - 1 * Z\n params = ParameterVector(\"a\", 2)\n\n q = QuantumRegister(1)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.rz(params[0], q[0])\n qc.rx(params[1], q[0])\n\n op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.0)\n state_hess = Hessian(hess_method=method).convert(operator=op)\n\n values_dict = [\n {params[0]: np.pi / 4, params[1]: np.pi},\n {params[0]: np.pi / 4, params[1]: np.pi / 4},\n ]\n correct_values = [\n [[-0.5 / np.sqrt(2), 1 / np.sqrt(2)], [1 / np.sqrt(2), 0]],\n [[-0.5 / np.sqrt(2) + 0.5, -1 / 2.0], [-1 / 2.0, 0.5]],\n ]\n\n for i, value_dict in enumerate(values_dict):\n np.testing.assert_array_almost_equal(\n state_hess.assign_parameters(value_dict).eval(), correct_values[i], decimal=1\n )\n\n @unittest.skipIf(not _HAS_JAX, \"Skipping test due to missing jax module.\")\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_state_hessian_custom_combo_fn(self, method):\n \"\"\"Test the state Hessian with on an operator which includes\n a user-defined combo_fn.\n\n Tr(|psi><psi|Z) = sin(a)sin(b)\n Tr(|psi><psi|X) = cos(a)\n d^2<H>/da^2 = - 0.5 cos(a) + 1 sin(a)sin(b)\n d^2<H>/dbda = - 1 cos(a)cos(b)\n d^2<H>/dbda = - 1 cos(a)cos(b)\n d^2<H>/db^2 = + 1 sin(a)sin(b)\n \"\"\"\n\n ham = 0.5 * X - 1 * Z\n a = Parameter(\"a\")\n b = Parameter(\"b\")\n params = [(a, a), (a, b), (b, b)]\n\n q = QuantumRegister(1)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.rz(a, q[0])\n qc.rx(b, q[0])\n\n op = ListOp(\n [~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.0)],\n combo_fn=lambda x: x[0] ** 3 + 4 * x[0],\n )\n state_hess = Hessian(hess_method=method).convert(operator=op, params=params)\n\n values_dict = [\n {a: np.pi / 4, b: np.pi},\n {a: np.pi / 4, b: np.pi / 4},\n {a: np.pi / 2, b: np.pi / 4},\n ]\n\n correct_values = [\n [-1.28163104, 2.56326208, 1.06066017],\n [-0.04495626, -2.40716991, 1.8125],\n [2.82842712, -1.5, 1.76776695],\n ]\n\n for i, value_dict in enumerate(values_dict):\n np.testing.assert_array_almost_equal(\n state_hess.assign_parameters(value_dict).eval(), correct_values[i], decimal=1\n )\n\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_prob_grad(self, method):\n \"\"\"Test the probability gradient\n\n dp0/da = cos(a)sin(b) / 2\n dp1/da = - cos(a)sin(b) / 2\n dp0/db = sin(a)cos(b) / 2\n dp1/db = - sin(a)cos(b) / 2\n \"\"\"\n\n a = Parameter(\"a\")\n b = Parameter(\"b\")\n params = [a, b]\n\n q = QuantumRegister(1)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.rz(params[0], q[0])\n qc.rx(params[1], q[0])\n\n op = CircuitStateFn(primitive=qc, coeff=1.0)\n\n prob_grad = Gradient(grad_method=method).convert(operator=op, params=params)\n values_dict = [\n {a: np.pi / 4, b: 0},\n {params[0]: np.pi / 4, params[1]: np.pi / 4},\n {params[0]: np.pi / 2, params[1]: np.pi},\n ]\n correct_values = [\n [[0, 0], [1 / (2 * np.sqrt(2)), -1 / (2 * np.sqrt(2))]],\n [[1 / 4, -1 / 4], [1 / 4, -1 / 4]],\n [[0, 0], [-1 / 2, 1 / 2]],\n ]\n for i, value_dict in enumerate(values_dict):\n for j, prob_grad_result in enumerate(prob_grad.assign_parameters(value_dict).eval()):\n np.testing.assert_array_almost_equal(\n prob_grad_result, correct_values[i][j], decimal=1\n )\n\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_prob_hess(self, method):\n \"\"\"Test the probability Hessian using linear combination of unitaries method\n\n d^2p0/da^2 = - sin(a)sin(b) / 2\n d^2p1/da^2 = sin(a)sin(b) / 2\n d^2p0/dadb = cos(a)cos(b) / 2\n d^2p1/dadb = - cos(a)cos(b) / 2\n \"\"\"\n\n a = Parameter(\"a\")\n b = Parameter(\"b\")\n params = [(a, a), (a, b)]\n\n q = QuantumRegister(1)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.rz(a, q[0])\n qc.rx(b, q[0])\n\n op = CircuitStateFn(primitive=qc, coeff=1.0)\n\n prob_hess = Hessian(hess_method=method).convert(operator=op, params=params)\n values_dict = [{a: np.pi / 4, b: 0}, {a: np.pi / 4, b: np.pi / 4}, {a: np.pi / 2, b: np.pi}]\n correct_values = [\n [[0, 0], [1 / (2 * np.sqrt(2)), -1 / (2 * np.sqrt(2))]],\n [[-1 / 4, 1 / 4], [1 / 4, -1 / 4]],\n [[0, 0], [0, 0]],\n ]\n for i, value_dict in enumerate(values_dict):\n for j, prob_hess_result in enumerate(prob_hess.assign_parameters(value_dict).eval()):\n np.testing.assert_array_almost_equal(\n prob_hess_result, correct_values[i][j], decimal=1\n )\n\n @idata(\n product(\n [\"lin_comb\", \"param_shift\", \"fin_diff\"],\n [None, \"lasso\", \"ridge\", \"perturb_diag\", \"perturb_diag_elements\"],\n )\n )\n @unpack\n def test_natural_gradient(self, method, regularization):\n \"\"\"Test the natural gradient\"\"\"\n try:\n for params in (ParameterVector(\"a\", 2), [Parameter(\"a\"), Parameter(\"b\")]):\n ham = 0.5 * X - 1 * Z\n\n q = QuantumRegister(1)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.rz(params[0], q[0])\n qc.rx(params[1], q[0])\n\n op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.0)\n nat_grad = NaturalGradient(\n grad_method=method, regularization=regularization\n ).convert(operator=op)\n values_dict = [{params[0]: np.pi / 4, params[1]: np.pi / 2}]\n\n # reference values obtained by classically computing the natural gradients\n correct_values = [[-3.26, 1.63]] if regularization == \"ridge\" else [[-4.24, 0]]\n\n for i, value_dict in enumerate(values_dict):\n np.testing.assert_array_almost_equal(\n nat_grad.assign_parameters(value_dict).eval(), correct_values[i], decimal=1\n )\n except MissingOptionalLibraryError as ex:\n self.skipTest(str(ex))\n\n def test_natural_gradient2(self):\n \"\"\"Test the natural gradient 2\"\"\"\n with self.assertRaises(TypeError):\n _ = NaturalGradient().convert(None, None)\n\n @idata(\n zip(\n [\"lin_comb_full\", \"overlap_block_diag\", \"overlap_diag\"],\n [LinCombFull, OverlapBlockDiag, OverlapDiag],\n )\n )\n @unpack\n def test_natural_gradient3(self, qfi_method, circuit_qfi):\n \"\"\"Test the natural gradient 3\"\"\"\n nat_grad = NaturalGradient(qfi_method=qfi_method)\n self.assertIsInstance(nat_grad.qfi_method, circuit_qfi)\n\n @idata(\n product(\n [\"lin_comb\", \"param_shift\", \"fin_diff\"],\n [\"lin_comb_full\", \"overlap_block_diag\", \"overlap_diag\"],\n [None, \"ridge\", \"perturb_diag\", \"perturb_diag_elements\"],\n )\n )\n @unpack\n def test_natural_gradient4(self, grad_method, qfi_method, regularization):\n \"\"\"Test the natural gradient 4\"\"\"\n\n # Avoid regularization = lasso intentionally because it does not converge\n try:\n ham = 0.5 * X - 1 * Z\n a = Parameter(\"a\")\n params = a\n\n q = QuantumRegister(1)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.rz(a, q[0])\n\n op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.0)\n nat_grad = NaturalGradient(\n grad_method=grad_method, qfi_method=qfi_method, regularization=regularization\n ).convert(operator=op, params=params)\n values_dict = [{a: np.pi / 4}]\n correct_values = [[0.0]] if regularization == \"ridge\" else [[-1.41421342]]\n for i, value_dict in enumerate(values_dict):\n np.testing.assert_array_almost_equal(\n nat_grad.assign_parameters(value_dict).eval(), correct_values[i], decimal=3\n )\n except MissingOptionalLibraryError as ex:\n self.skipTest(str(ex))\n\n @unittest.skipIf(not _HAS_JAX, \"Skipping test due to missing jax module.\")\n @idata(product([\"lin_comb\", \"param_shift\", \"fin_diff\"], [True, False]))\n @unpack\n def test_jax_chain_rule(self, method: str, autograd: bool):\n \"\"\"Test the chain rule functionality using Jax\n\n d<H>/d<X> = 2<X>\n d<H>/d<Z> = - sin(<Z>)\n <Z> = Tr(|psi><psi|Z) = sin(a)sin(b)\n <X> = Tr(|psi><psi|X) = cos(a)\n d<H>/da = d<H>/d<X> d<X>/da + d<H>/d<Z> d<Z>/da = - 2 cos(a)sin(a)\n - sin(sin(a)sin(b)) * cos(a)sin(b)\n d<H>/db = d<H>/d<X> d<X>/db + d<H>/d<Z> d<Z>/db = - sin(sin(a)sin(b)) * sin(a)cos(b)\n \"\"\"\n\n a = Parameter(\"a\")\n b = Parameter(\"b\")\n params = [a, b]\n\n q = QuantumRegister(1)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.rz(params[0], q[0])\n qc.rx(params[1], q[0])\n\n def combo_fn(x):\n return jnp.power(x[0], 2) + jnp.cos(x[1])\n\n def grad_combo_fn(x):\n return np.array([2 * x[0], -np.sin(x[1])])\n\n op = ListOp(\n [\n ~StateFn(X) @ CircuitStateFn(primitive=qc, coeff=1.0),\n ~StateFn(Z) @ CircuitStateFn(primitive=qc, coeff=1.0),\n ],\n combo_fn=combo_fn,\n grad_combo_fn=None if autograd else grad_combo_fn,\n )\n\n state_grad = Gradient(grad_method=method).convert(operator=op, params=params)\n values_dict = [\n {a: np.pi / 4, b: np.pi},\n {params[0]: np.pi / 4, params[1]: np.pi / 4},\n {params[0]: np.pi / 2, params[1]: np.pi / 4},\n ]\n correct_values = [[-1.0, 0.0], [-1.2397, -0.2397], [0, -0.45936]]\n for i, value_dict in enumerate(values_dict):\n np.testing.assert_array_almost_equal(\n state_grad.assign_parameters(value_dict).eval(), correct_values[i], decimal=1\n )\n\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_grad_combo_fn_chain_rule(self, method):\n \"\"\"Test the chain rule for a custom gradient combo function.\"\"\"\n np.random.seed(2)\n\n def combo_fn(x):\n amplitudes = x[0].primitive.data\n pdf = np.multiply(amplitudes, np.conj(amplitudes))\n return np.sum(np.log(pdf)) / (-len(amplitudes))\n\n def grad_combo_fn(x):\n amplitudes = x[0].primitive.data\n pdf = np.multiply(amplitudes, np.conj(amplitudes))\n grad = []\n for prob in pdf:\n grad += [-1 / prob]\n return grad\n\n qc = RealAmplitudes(2, reps=1)\n grad_op = ListOp([StateFn(qc.decompose())], combo_fn=combo_fn, grad_combo_fn=grad_combo_fn)\n grad = Gradient(grad_method=method).convert(grad_op)\n value_dict = dict(zip(qc.ordered_parameters, np.random.rand(len(qc.ordered_parameters))))\n correct_values = [\n [(-0.16666259133549044 + 0j)],\n [(-7.244949702732864 + 0j)],\n [(-2.979791752749964 + 0j)],\n [(-5.310186078432614 + 0j)],\n ]\n np.testing.assert_array_almost_equal(\n grad.assign_parameters(value_dict).eval(), correct_values\n )\n\n def test_grad_combo_fn_chain_rule_nat_grad(self):\n \"\"\"Test the chain rule for a custom gradient combo function.\"\"\"\n np.random.seed(2)\n\n def combo_fn(x):\n amplitudes = x[0].primitive.data\n pdf = np.multiply(amplitudes, np.conj(amplitudes))\n return np.sum(np.log(pdf)) / (-len(amplitudes))\n\n def grad_combo_fn(x):\n amplitudes = x[0].primitive.data\n pdf = np.multiply(amplitudes, np.conj(amplitudes))\n grad = []\n for prob in pdf:\n grad += [-1 / prob]\n return grad\n\n try:\n qc = RealAmplitudes(2, reps=1)\n grad_op = ListOp(\n [StateFn(qc.decompose())], combo_fn=combo_fn, grad_combo_fn=grad_combo_fn\n )\n grad = NaturalGradient(grad_method=\"lin_comb\", regularization=\"ridge\").convert(\n grad_op, qc.ordered_parameters\n )\n value_dict = dict(\n zip(qc.ordered_parameters, np.random.rand(len(qc.ordered_parameters)))\n )\n correct_values = [[0.20777236], [-18.92560338], [-15.89005475], [-10.44002031]]\n np.testing.assert_array_almost_equal(\n grad.assign_parameters(value_dict).eval(), correct_values, decimal=3\n )\n except MissingOptionalLibraryError as ex:\n self.skipTest(str(ex))\n\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_operator_coefficient_gradient(self, method):\n \"\"\"Test the operator coefficient gradient\n\n Tr( | psi > < psi | Z) = sin(a)sin(b)\n Tr( | psi > < psi | X) = cos(a)\n \"\"\"\n a = Parameter(\"a\")\n b = Parameter(\"b\")\n q = QuantumRegister(1)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.rz(a, q[0])\n qc.rx(b, q[0])\n\n coeff_0 = Parameter(\"c_0\")\n coeff_1 = Parameter(\"c_1\")\n ham = coeff_0 * X + coeff_1 * Z\n op = StateFn(ham, is_measurement=True) @ CircuitStateFn(primitive=qc, coeff=1.0)\n gradient_coeffs = [coeff_0, coeff_1]\n coeff_grad = Gradient(grad_method=method).convert(op, gradient_coeffs)\n values_dict = [\n {coeff_0: 0.5, coeff_1: -1, a: np.pi / 4, b: np.pi},\n {coeff_0: 0.5, coeff_1: -1, a: np.pi / 4, b: np.pi / 4},\n ]\n correct_values = [[1 / np.sqrt(2), 0], [1 / np.sqrt(2), 1 / 2]]\n for i, value_dict in enumerate(values_dict):\n np.testing.assert_array_almost_equal(\n coeff_grad.assign_parameters(value_dict).eval(), correct_values[i], decimal=1\n )\n\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_operator_coefficient_hessian(self, method):\n \"\"\"Test the operator coefficient hessian\n\n <Z> = Tr( | psi > < psi | Z) = sin(a)sin(b)\n <X> = Tr( | psi > < psi | X) = cos(a)\n d<H>/dc_0 = 2 * c_0 * <X> + c_1 * <Z>\n d<H>/dc_1 = c_0 * <Z>\n d^2<H>/dc_0^2 = 2 * <X>\n d^2<H>/dc_0dc_1 = <Z>\n d^2<H>/dc_1dc_0 = <Z>\n d^2<H>/dc_1^2 = 0\n \"\"\"\n a = Parameter(\"a\")\n b = Parameter(\"b\")\n q = QuantumRegister(1)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.rz(a, q[0])\n qc.rx(b, q[0])\n\n coeff_0 = Parameter(\"c_0\")\n coeff_1 = Parameter(\"c_1\")\n ham = coeff_0 * coeff_0 * X + coeff_1 * coeff_0 * Z\n op = StateFn(ham, is_measurement=True) @ CircuitStateFn(primitive=qc, coeff=1.0)\n gradient_coeffs = [(coeff_0, coeff_0), (coeff_0, coeff_1), (coeff_1, coeff_1)]\n coeff_grad = Hessian(hess_method=method).convert(op, gradient_coeffs)\n values_dict = [\n {coeff_0: 0.5, coeff_1: -1, a: np.pi / 4, b: np.pi},\n {coeff_0: 0.5, coeff_1: -1, a: np.pi / 4, b: np.pi / 4},\n ]\n\n correct_values = [[2 / np.sqrt(2), 0, 0], [2 / np.sqrt(2), 1 / 2, 0]]\n\n for i, value_dict in enumerate(values_dict):\n np.testing.assert_array_almost_equal(\n coeff_grad.assign_parameters(value_dict).eval(), correct_values[i], decimal=1\n )\n\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_circuit_sampler(self, method):\n \"\"\"Test the gradient with circuit sampler\n\n Tr(|psi><psi|Z) = sin(a)sin(b)\n Tr(|psi><psi|X) = cos(a)\n d<H>/da = - 0.5 sin(a) - 1 cos(a)sin(b)\n d<H>/db = - 1 sin(a)cos(b)\n \"\"\"\n\n ham = 0.5 * X - 1 * Z\n a = Parameter(\"a\")\n b = Parameter(\"b\")\n params = [a, b]\n\n q = QuantumRegister(1)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.rz(params[0], q[0])\n qc.rx(params[1], q[0])\n op = ~StateFn(ham) @ CircuitStateFn(primitive=qc, coeff=1.0)\n\n shots = 8000\n if method == \"fin_diff\":\n np.random.seed(8)\n state_grad = Gradient(grad_method=method, epsilon=shots ** (-1 / 6.0)).convert(\n operator=op\n )\n else:\n state_grad = Gradient(grad_method=method).convert(operator=op)\n values_dict = [\n {a: np.pi / 4, b: np.pi},\n {params[0]: np.pi / 4, params[1]: np.pi / 4},\n {params[0]: np.pi / 2, params[1]: np.pi / 4},\n ]\n correct_values = [\n [-0.5 / np.sqrt(2), 1 / np.sqrt(2)],\n [-0.5 / np.sqrt(2) - 0.5, -1 / 2.0],\n [-0.5, -1 / np.sqrt(2)],\n ]\n\n backend = BasicAer.get_backend(\"qasm_simulator\")\n q_instance = QuantumInstance(backend=backend, shots=shots)\n\n for i, value_dict in enumerate(values_dict):\n sampler = CircuitSampler(backend=q_instance).convert(\n state_grad, params={k: [v] for k, v in value_dict.items()}\n )\n np.testing.assert_array_almost_equal(sampler.eval()[0], correct_values[i], decimal=1)\n\n @data(\"lin_comb\", \"param_shift\", \"fin_diff\")\n def test_circuit_sampler2(self, method):\n \"\"\"Test the probability gradient with the circuit sampler\n\n dp0/da = cos(a)sin(b) / 2\n dp1/da = - cos(a)sin(b) / 2\n dp0/db = sin(a)cos(b) / 2\n dp1/db = - sin(a)cos(b) / 2\n \"\"\"\n\n a = Parameter(\"a\")\n b = Parameter(\"b\")\n params = [a, b]\n\n q = QuantumRegister(1)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.rz(params[0], q[0])\n qc.rx(params[1], q[0])\n\n op = CircuitStateFn(primitive=qc, coeff=1.0)\n\n shots = 8000\n if method == \"fin_diff\":\n np.random.seed(8)\n prob_grad = Gradient(grad_method=method, epsilon=shots ** (-1 / 6.0)).convert(\n operator=op, params=params\n )\n else:\n prob_grad = Gradient(grad_method=method).convert(operator=op, params=params)\n values_dict = [\n {a: [np.pi / 4], b: [0]},\n {params[0]: [np.pi / 4], params[1]: [np.pi / 4]},\n {params[0]: [np.pi / 2], params[1]: [np.pi]},\n ]\n correct_values = [\n [[0, 0], [1 / (2 * np.sqrt(2)), -1 / (2 * np.sqrt(2))]],\n [[1 / 4, -1 / 4], [1 / 4, -1 / 4]],\n [[0, 0], [-1 / 2, 1 / 2]],\n ]\n\n backend = BasicAer.get_backend(\"qasm_simulator\")\n q_instance = QuantumInstance(backend=backend, shots=shots)\n\n for i, value_dict in enumerate(values_dict):\n sampler = CircuitSampler(backend=q_instance).convert(prob_grad, params=value_dict)\n result = sampler.eval()[0]\n self.assertTrue(np.allclose(result[0].toarray(), correct_values[i][0], atol=0.1))\n self.assertTrue(np.allclose(result[1].toarray(), correct_values[i][1], atol=0.1))\n\n @idata([\"statevector_simulator\", \"qasm_simulator\"])\n def test_gradient_wrapper(self, backend_type):\n \"\"\"Test the gradient wrapper for probability gradients\n dp0/da = cos(a)sin(b) / 2\n dp1/da = - cos(a)sin(b) / 2\n dp0/db = sin(a)cos(b) / 2\n dp1/db = - sin(a)cos(b) / 2\n \"\"\"\n method = \"param_shift\"\n a = Parameter(\"a\")\n b = Parameter(\"b\")\n params = [a, b]\n\n q = QuantumRegister(1)\n qc = QuantumCircuit(q)\n qc.h(q)\n qc.rz(params[0], q[0])\n qc.rx(params[1], q[0])\n\n op = CircuitStateFn(primitive=qc, coeff=1.0)\n\n shots = 8000\n backend = BasicAer.get_backend(backend_type)\n q_instance = QuantumInstance(\n backend=backend, shots=shots, seed_simulator=2, seed_transpiler=2\n )\n if method == \"fin_diff\":\n np.random.seed(8)\n prob_grad = Gradient(grad_method=method, epsilon=shots ** (-1 / 6.0)).gradient_wrapper(\n operator=op, bind_params=params, backend=q_instance\n )\n else:\n prob_grad = Gradient(grad_method=method).gradient_wrapper(\n operator=op, bind_params=params, backend=q_instance\n )\n values = [[np.pi / 4, 0], [np.pi / 4, np.pi / 4], [np.pi / 2, np.pi]]\n correct_values = [\n [[0, 0], [1 / (2 * np.sqrt(2)), -1 / (2 * np.sqrt(2))]],\n [[1 / 4, -1 / 4], [1 / 4, -1 / 4]],\n [[0, 0], [-1 / 2, 1 / 2]],\n ]\n for i, value in enumerate(values):\n result = prob_grad(value)\n if backend_type == \"qasm_simulator\": # sparse result\n result = [result[0].toarray(), result[1].toarray()]\n\n self.assertTrue(np.allclose(result[0], correct_values[i][0], atol=0.1))\n self.assertTrue(np.allclose(result[1], correct_values[i][1], atol=0.1))\n\n @data((\"statevector_simulator\", 1e-7), (\"qasm_simulator\", 2e-1))\n @unpack\n def test_gradient_wrapper2(self, backend_type, atol):\n \"\"\"Test the gradient wrapper for gradients checking that statevector and qasm gives the\n same results\n\n dp0/da = cos(a)sin(b) / 2\n dp1/da = - cos(a)sin(b) / 2\n dp0/db = sin(a)cos(b) / 2\n dp1/db = - sin(a)cos(b) / 2\n \"\"\"\n method = \"lin_comb\"\n a = Parameter(\"a\")\n b = Parameter(\"b\")\n params = [a, b]\n\n qc = QuantumCircuit(2)\n qc.h(1)\n qc.h(0)\n qc.sdg(1)\n qc.cz(0, 1)\n qc.ry(params[0], 0)\n qc.rz(params[1], 0)\n qc.h(1)\n\n obs = (Z ^ X) - (Y ^ Y)\n op = StateFn(obs, is_measurement=True) @ CircuitStateFn(primitive=qc)\n\n shots = 8192 if backend_type == \"qasm_simulator\" else 1\n\n values = [[0, np.pi / 2], [np.pi / 4, np.pi / 4], [np.pi / 3, np.pi / 9]]\n correct_values = [[-4.0, 0], [-2.0, -4.82842712], [-0.68404029, -7.01396121]]\n for i, value in enumerate(values):\n backend = BasicAer.get_backend(backend_type)\n q_instance = QuantumInstance(\n backend=backend, shots=shots, seed_simulator=2, seed_transpiler=2\n )\n grad = NaturalGradient(grad_method=method).gradient_wrapper(\n operator=op, bind_params=params, backend=q_instance\n )\n result = grad(value)\n self.assertTrue(np.allclose(result, correct_values[i], atol=atol))\n\n @slow_test\n def test_vqe(self):\n \"\"\"Test VQE with gradients\"\"\"\n\n method = \"lin_comb\"\n backend = \"qasm_simulator\"\n q_instance = QuantumInstance(\n BasicAer.get_backend(backend), seed_simulator=79, seed_transpiler=2\n )\n # Define the Hamiltonian\n h2_hamiltonian = (\n -1.05 * (I ^ I) + 0.39 * (I ^ Z) - 0.39 * (Z ^ I) - 0.01 * (Z ^ Z) + 0.18 * (X ^ X)\n )\n h2_energy = -1.85727503\n\n # Define the Ansatz\n wavefunction = QuantumCircuit(2)\n params = ParameterVector(\"theta\", length=8)\n itr = iter(params)\n wavefunction.ry(next(itr), 0)\n wavefunction.ry(next(itr), 1)\n wavefunction.rz(next(itr), 0)\n wavefunction.rz(next(itr), 1)\n wavefunction.cx(0, 1)\n wavefunction.ry(next(itr), 0)\n wavefunction.ry(next(itr), 1)\n wavefunction.rz(next(itr), 0)\n wavefunction.rz(next(itr), 1)\n\n # Conjugate Gradient algorithm\n optimizer = CG(maxiter=10)\n\n grad = Gradient(grad_method=method)\n\n # Gradient callable\n vqe = VQE(\n ansatz=wavefunction, optimizer=optimizer, gradient=grad, quantum_instance=q_instance\n )\n\n result = vqe.compute_minimum_eigenvalue(operator=h2_hamiltonian)\n np.testing.assert_almost_equal(result.optimal_value, h2_energy, decimal=0)\n\n def test_qfi_overlap_works_with_bound_parameters(self):\n \"\"\"Test all QFI methods work if the circuit contains a gate with bound parameters.\"\"\"\n\n x = Parameter(\"x\")\n circuit = QuantumCircuit(1)\n circuit.ry(np.pi / 4, 0)\n circuit.rx(x, 0)\n state = StateFn(circuit)\n\n methods = [\"lin_comb_full\", \"overlap_diag\", \"overlap_block_diag\"]\n reference = 0.5\n\n for method in methods:\n with self.subTest(method):\n qfi = QFI(method)\n value = np.real(qfi.convert(state, [x]).bind_parameters({x: 0.12}).eval())\n self.assertAlmostEqual(value[0][0], reference)\n\n\n@ddt\nclass TestParameterGradients(QiskitOpflowTestCase):\n \"\"\"Test taking the gradient of parameter expressions.\"\"\"\n\n def test_grad(self):\n \"\"\"Test taking the gradient of parameter expressions.\"\"\"\n x, y = Parameter(\"x\"), Parameter(\"y\")\n with self.subTest(\"linear\"):\n expr = 2 * x + y\n\n grad = expr.gradient(x)\n self.assertEqual(grad, 2)\n\n grad = expr.gradient(y)\n self.assertEqual(grad, 1)\n\n with self.subTest(\"polynomial\"):\n expr = x * x * x - x * y + y * y\n\n grad = expr.gradient(x)\n self.assertEqual(grad, 3 * x * x - y)\n\n grad = expr.gradient(y)\n self.assertEqual(grad, -1 * x + 2 * y)\n\n def test_converted_to_float_if_bound(self):\n \"\"\"Test the gradient is a float when no free symbols are left.\"\"\"\n x = Parameter(\"x\")\n expr = 2 * x + 1\n grad = expr.gradient(x)\n self.assertIsInstance(grad, float)\n\n def test_converted_to_complex_if_bound(self):\n \"\"\"Test the gradient is a complex when no free symbols are left.\"\"\"\n x = Parameter(\"x\")\n x2 = 1j * x\n expr = 2 * x2 + 1\n grad = expr.gradient(x)\n self.assertIsInstance(grad, complex)\n\n\n@ddt\nclass TestQFI(QiskitOpflowTestCase):\n \"\"\"Tests for the quantum Fisher information.\"\"\"\n\n @data(\"lin_comb_full\", \"overlap_block_diag\", \"overlap_diag\")\n def test_qfi_simple(self, method):\n \"\"\"Test if the quantum fisher information calculation is correct for a simple test case.\n\n QFI = [[1, 0], [0, 1]] - [[0, 0], [0, cos^2(a)]]\n \"\"\"\n # create the circuit\n a, b = Parameter(\"a\"), Parameter(\"b\")\n qc = QuantumCircuit(1)\n qc.h(0)\n qc.rz(a, 0)\n qc.rx(b, 0)\n\n # convert the circuit to a QFI object\n op = CircuitStateFn(qc)\n qfi = QFI(qfi_method=method).convert(operator=op)\n\n # test for different values\n values_dict = [{a: np.pi / 4, b: 0.1}, {a: np.pi, b: 0.1}, {a: np.pi / 2, b: 0.1}]\n correct_values = [[[1, 0], [0, 0.5]], [[1, 0], [0, 0]], [[1, 0], [0, 1]]]\n\n for i, value_dict in enumerate(values_dict):\n actual = qfi.assign_parameters(value_dict).eval()\n np.testing.assert_array_almost_equal(actual, correct_values[i], decimal=1)\n\n def test_qfi_maxcut(self):\n \"\"\"Test the QFI for a simple MaxCut problem.\n\n This is interesting because it contains the same parameters in different gates.\n \"\"\"\n # create maxcut circuit for the hamiltonian\n # H = (I ^ I ^ Z ^ Z) + (I ^ Z ^ I ^ Z) + (Z ^ I ^ I ^ Z) + (I ^ Z ^ Z ^ I)\n\n x = ParameterVector(\"x\", 2)\n ansatz = QuantumCircuit(4)\n\n # initial hadamard layer\n ansatz.h(ansatz.qubits)\n\n # e^{iZZ} layers\n def expiz(qubit0, qubit1):\n ansatz.cx(qubit0, qubit1)\n ansatz.rz(2 * x[0], qubit1)\n ansatz.cx(qubit0, qubit1)\n\n expiz(2, 1)\n expiz(3, 0)\n expiz(2, 0)\n expiz(1, 0)\n\n # mixer layer with RX gates\n for i in range(ansatz.num_qubits):\n ansatz.rx(2 * x[1], i)\n\n point = {x[0]: 0.4, x[1]: 0.69}\n\n # reference computed via finite difference\n reference = np.array([[16.0, -5.551], [-5.551, 18.497]])\n\n # QFI from gradient framework\n qfi = QFI().convert(CircuitStateFn(ansatz), params=x[:])\n actual = np.array(qfi.bind_parameters(point).eval()).real\n np.testing.assert_array_almost_equal(actual, reference, decimal=3)\n\n def test_qfi_circuit_shared_params(self):\n \"\"\"Test the QFI circuits for parameters shared across some gates.\"\"\"\n # create the test circuit\n x = Parameter(\"x\")\n circuit = QuantumCircuit(1)\n circuit.rx(x, 0)\n circuit.rx(x, 0)\n\n # construct the QFI circuits used in the evaluation\n\n circuit1 = QuantumCircuit(2)\n circuit1.h(1)\n circuit1.x(1)\n circuit1.cx(1, 0)\n circuit1.x(1)\n circuit1.cx(1, 0)\n # circuit1.rx(x, 0) # trimmed\n # circuit1.rx(x, 0) # trimmed\n circuit1.h(1)\n\n circuit2 = QuantumCircuit(2)\n circuit2.h(1)\n circuit2.x(1)\n circuit2.cx(1, 0)\n circuit2.x(1)\n circuit2.rx(x, 0)\n circuit2.cx(1, 0)\n # circuit2.rx(x, 0) # trimmed\n circuit2.h(1)\n\n circuit3 = QuantumCircuit(2)\n circuit3.h(1)\n circuit3.cx(1, 0)\n circuit3.x(1)\n circuit3.rx(x, 0)\n circuit3.cx(1, 0)\n # circuit3.rx(x, 0) # trimmed\n circuit3.x(1)\n circuit3.h(1)\n\n circuit4 = QuantumCircuit(2)\n circuit4.h(1)\n circuit4.rx(x, 0)\n circuit4.x(1)\n circuit4.cx(1, 0)\n circuit4.x(1)\n circuit4.cx(1, 0)\n # circuit4.rx(x, 0) # trimmed\n circuit4.h(1)\n\n # this naming and adding of register is required bc circuit's are only equal if the\n # register have the same names\n circuit5 = QuantumCircuit(2)\n circuit5.h(1)\n circuit5.sdg(1)\n circuit5.cx(1, 0)\n # circuit5.rx(x, 0) # trimmed\n circuit5.h(1)\n\n circuit6 = QuantumCircuit(2)\n circuit6.h(1)\n circuit6.sdg(1)\n circuit6.rx(x, 0)\n circuit6.cx(1, 0)\n circuit6.h(1)\n\n # compare\n qfi = QFI().convert(StateFn(circuit), params=[x])\n\n circuit_sets = (\n [circuit1, circuit2, circuit3, circuit4],\n [circuit5, circuit6],\n [circuit5, circuit6],\n )\n list_ops = (\n qfi.oplist[0].oplist[0].oplist[:-1],\n qfi.oplist[0].oplist[0].oplist[-1].oplist[0].oplist,\n qfi.oplist[0].oplist[0].oplist[-1].oplist[1].oplist,\n )\n\n # compose both on the same circuit such that the comparison works\n base = QuantumCircuit(2)\n\n for i, (circuit_set, list_op) in enumerate(zip(circuit_sets, list_ops)):\n for j, (reference, composed_op) in enumerate(zip(circuit_set, list_op)):\n with self.subTest(f\"set {i} circuit {j}\"):\n self.assertEqual(\n base.compose(composed_op[1].primitive), base.compose(reference)\n )\n\n def test_overlap_qfi_bound_parameters(self):\n \"\"\"Test the overlap QFI works on a circuit with multi-parameter bound gates.\"\"\"\n x = Parameter(\"x\")\n circuit = QuantumCircuit(1)\n circuit.u(1, 2, 3, 0)\n circuit.rx(x, 0)\n\n qfi = QFI(\"overlap_diag\").convert(StateFn(circuit), [x])\n value = qfi.bind_parameters({x: 1}).eval()[0][0]\n ref = 0.87737713\n self.assertAlmostEqual(value, ref)\n\n def test_overlap_qfi_raises_on_multiparam(self):\n \"\"\"Test the overlap QFI raises an appropriate error on multi-param unbound gates.\"\"\"\n x = ParameterVector(\"x\", 2)\n circuit = QuantumCircuit(1)\n circuit.u(x[0], x[1], 2, 0)\n\n with self.assertRaises(NotImplementedError):\n _ = QFI(\"overlap_diag\").convert(StateFn(circuit), [x])\n\n def test_overlap_qfi_raises_on_unsupported_gate(self):\n \"\"\"Test the overlap QFI raises an appropriate error on multi-param unbound gates.\"\"\"\n x = Parameter(\"x\")\n circuit = QuantumCircuit(1)\n circuit.p(x, 0)\n\n with self.assertRaises(NotImplementedError):\n _ = QFI(\"overlap_diag\").convert(StateFn(circuit), [x])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.cosh",
"numpy.asarray",
"numpy.cos",
"numpy.sin",
"numpy.floor",
"numpy.exp",
"numpy.array",
"numpy.tanh"
],
[
"numpy.dot",
"numpy.sqrt",
"numpy.conj",
"numpy.allclose",
"numpy.arange",
"numpy.eye",
"numpy.kron",
"numpy.linalg.norm",
"numpy.ones",
"numpy.iinfo",
"numpy.random.rand",
"numpy.testing.assert_allclose",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.random.default_rng",
"numpy.vdot"
],
[
"numpy.dot",
"numpy.log",
"numpy.cos",
"numpy.sin",
"numpy.angle",
"numpy.array"
],
[
"numpy.diag",
"numpy.dot",
"numpy.sqrt",
"numpy.eye",
"numpy.kron",
"numpy.iinfo",
"numpy.testing.assert_allclose",
"numpy.exp",
"numpy.array",
"numpy.trace",
"numpy.random.default_rng"
],
[
"numpy.diag",
"numpy.eye",
"numpy.kron",
"numpy.testing.assert_allclose",
"numpy.array"
],
[
"numpy.log",
"numpy.sqrt",
"numpy.random.seed",
"numpy.conj",
"numpy.allclose",
"numpy.cos",
"numpy.sin",
"numpy.testing.assert_almost_equal",
"numpy.array",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JunjieChen-2020/ColossalAI | [
"0e121a256ac4f628f5d26a16dc553cd0024ca2d5",
"0e121a256ac4f628f5d26a16dc553cd0024ca2d5"
] | [
"colossalai/context/moe_context.py",
"tests/test_moe/test_grad_handler.py"
] | [
"import torch\r\nimport torch.distributed as dist\r\nfrom .parallel_mode import ParallelMode\r\nfrom typing import Tuple\r\n\r\n\r\ndef _check_sanity():\r\n from colossalai.core import global_context as gpc\r\n if gpc.tensor_parallel_size > 1 or gpc.pipeline_parallel_size > 1:\r\n raise NotImplementedError(\"Moe is not compatible with tensor or \"\r\n \"pipeline parallel at present.\")\r\n\r\n\r\nclass MoeParallelInfo:\r\n \"\"\"Moe parallelism information, storing parallel sizes and groups.\r\n \"\"\"\r\n\r\n def __init__(self, ep_size: int, dp_size: int):\r\n _check_sanity()\r\n self.ep_size = ep_size\r\n self.dp_size = dp_size\r\n self.ep_group = None\r\n # data parallel group for experts, since ep_group is different\r\n # we may have different dp_group from get_group(ParallelMode.DATA)\r\n self.dp_group = None\r\n\r\n # Here we assume tensor parallel size = 1\r\n # Otherwise, MoE can't be used\r\n # Since TENSOR parallel group and DATA parallel group\r\n # have been created, we can use them directly.\r\n if ep_size == 1:\r\n from colossalai.core import global_context as gpc\r\n self.ep_group = gpc.get_group(ParallelMode.TENSOR)\r\n self.dp_group = gpc.get_group(ParallelMode.DATA)\r\n return\r\n\r\n if dp_size == 1:\r\n from colossalai.core import global_context as gpc\r\n self.ep_group = gpc.get_group(ParallelMode.DATA)\r\n self.dp_group = gpc.get_group(ParallelMode.TENSOR)\r\n return\r\n\r\n rank = dist.get_rank()\r\n # Create expert parallel group\r\n for i in range(dp_size):\r\n ranks = [i * ep_size + j for j in range(ep_size)]\r\n group = dist.new_group(ranks)\r\n if rank in ranks:\r\n self.ep_group = group\r\n\r\n # Create data parallel group\r\n for j in range(ep_size):\r\n ranks = [i * ep_size + j for i in range(dp_size)]\r\n group = dist.new_group(ranks)\r\n if rank in ranks:\r\n self.dp_group = group\r\n\r\n\r\nclass MoeContext:\r\n \"\"\"MoE parallel context manager. This class manages different\r\n parallel groups in MoE context and MoE loss in training.\r\n \"\"\"\r\n __instance = None\r\n\r\n @staticmethod\r\n def get_instance():\r\n if MoeContext.__instance is None:\r\n MoeContext.__instance = MoeContext()\r\n return MoeContext.__instance\r\n\r\n def __init__(self):\r\n self.world_size = 1\r\n # Users may want to set maximum expert parallel size smaller than the world size\r\n # since very low bandwidth across nodes may constrain the performance of MoE\r\n # When we have a maximum expert parallel size, we have a minimum data parallel size naturally\r\n self.max_ep_size = 1\r\n self.min_dp_size = 1\r\n self.aux_loss = None\r\n self.use_kernel_optim = True\r\n\r\n self.has_setup = False\r\n self._parallel_info_dict = dict()\r\n\r\n @property\r\n def parallel_info_dict(self):\r\n return self._parallel_info_dict\r\n\r\n @property\r\n def is_initialized(self):\r\n return self.has_setup\r\n\r\n def setup(self, seed: int, use_kernel_optim: bool = True):\r\n\r\n assert not self.is_initialized, \"MoE distributed context shouldn't be set up again\"\r\n _check_sanity()\r\n assert torch.cuda.is_available(), \"MoE requires to enable CUDA first\"\r\n\r\n self.world_size = dist.get_world_size()\r\n\r\n from colossalai.core import global_context as gpc\r\n self.max_ep_size = gpc.config.get('max_ep_size', self.world_size)\r\n assert self.world_size % self.max_ep_size == 0, \\\r\n \"Maximum epxert parallel size must be a factor of the number of GPUs\"\r\n self.min_dp_size = self.world_size // self.max_ep_size\r\n\r\n # Enabling kernel optimization may raise error in some cases\r\n # Users can close kernel optimization manually\r\n self.use_kernel_optim = use_kernel_optim\r\n\r\n from .random import moe_set_seed\r\n moe_set_seed(seed)\r\n self.has_setup = True\r\n\r\n def get_info(self, num_experts: int) -> Tuple[int, MoeParallelInfo]:\r\n \"\"\"Calculate the Data Parallel Group and Expert Parallel Group.\r\n\r\n Parameters\r\n ----------\r\n num_experts : int\r\n The number experts\r\n\r\n Returns\r\n -------\r\n int, MoeParallelInfo\r\n number of local experts, the MoeParallelInfo of the current ep_size\r\n \"\"\"\r\n\r\n gt_flag = num_experts % self.max_ep_size == 0 # check whether num_experts is greater\r\n lt_flag = self.max_ep_size % num_experts == 0 # check whether num_experts is less\r\n\r\n assert gt_flag or lt_flag, \"Automatic experts placement dose not not support expert number\"\\\r\n \" is not a multiple of ep size or vice versa.\"\r\n\r\n # If the number of experts is greater than maximum expert parallel size. a.k.a ep_size,\r\n # there are multiple experts in each GPU and each GPU has different experts\r\n # So it's data parallel size is 1\r\n # Otherwise, there is only one expert in each GPU\r\n # The data parallel size should be calculated\r\n dp_size = 1 if gt_flag else self.max_ep_size // num_experts\r\n ep_size = self.max_ep_size // dp_size\r\n\r\n # Calculate the number of experts for each GPU\r\n num_local_experts = 1 if lt_flag else num_experts // self.max_ep_size\r\n\r\n # Don't forget to multiply minimum data parallel size\r\n dp_size *= self.min_dp_size\r\n if not (ep_size in self.parallel_info_dict):\r\n self.parallel_info_dict[ep_size] = MoeParallelInfo(ep_size, dp_size)\r\n\r\n return num_local_experts, self.parallel_info_dict[ep_size]\r\n\r\n def set_kernel_not_use(self):\r\n self.use_kernel_optim = False\r\n\r\n def reset_loss(self):\r\n self.aux_loss = 0\r\n\r\n def add_loss(self, loss):\r\n self.aux_loss += loss\r\n\r\n def get_loss(self):\r\n return self.aux_loss\r\n",
"from functools import partial\r\nimport pytest\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.multiprocessing as mp\r\nimport torch.distributed as dist\r\nimport colossalai\r\nfrom colossalai.utils import free_port, get_current_device\r\nfrom colossalai.nn.layer.moe import Top1Router, UniformNoiseGenerator, MoeLayer, Experts\r\nfrom colossalai.core import MOE_CONTEXT\r\nfrom colossalai.utils.moe import sync_moe_model_param\r\nfrom colossalai.engine.gradient_handler import MoeGradientHandler\r\nfrom colossalai.testing import assert_equal_in_group\r\n\r\nBATCH_SIZE = 4\r\nDIM = 16\r\nCONFIG = dict()\r\n\r\n\r\ndef run_test(rank, world_size, port):\r\n colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')\r\n expert_module = nn.Linear\r\n expert_factor = dict(in_features=DIM, out_features=DIM, device=get_current_device())\r\n\r\n MOE_CONTEXT.setup(42) # MOE initialization\r\n noisy_func = UniformNoiseGenerator()\r\n router = Top1Router(noisy_func=noisy_func)\r\n num_experts_list = [1, 2, 4]\r\n layer_list = []\r\n for num_experts in num_experts_list:\r\n exp = Experts(expert_module, num_experts, **expert_factor)\r\n moe_layer = MoeLayer(DIM, num_experts, router, exp)\r\n layer_list.append(moe_layer)\r\n\r\n model = nn.Sequential(*layer_list)\r\n model = model.to(get_current_device())\r\n sync_moe_model_param(model)\r\n\r\n dist_dict = MOE_CONTEXT.parallel_info_dict\r\n assert_equal_in_group(layer_list[0].experts.experts[0].weight.data, dist_dict[1].dp_group)\r\n assert_equal_in_group(layer_list[1].experts.experts[0].weight.data, dist_dict[2].dp_group)\r\n # MoE model synchronization passed\r\n\r\n grad_handler = MoeGradientHandler(model, 0)\r\n\r\n rank = dist.get_rank()\r\n torch.cuda.manual_seed(78 + rank)\r\n data = torch.randn(BATCH_SIZE, DIM, device=get_current_device())\r\n grad = torch.randn_like(data)\r\n\r\n MOE_CONTEXT.reset_loss()\r\n outputs = model(data)\r\n outputs.backward(grad)\r\n grad_handler.handle_gradient()\r\n\r\n assert_equal_in_group(layer_list[0].experts.experts[0].weight.grad, dist_dict[1].dp_group)\r\n assert_equal_in_group(layer_list[0].experts.experts[0].bias.grad, dist_dict[1].dp_group)\r\n\r\n assert_equal_in_group(layer_list[1].experts.experts[0].weight.grad, dist_dict[2].dp_group)\r\n assert_equal_in_group(layer_list[1].experts.experts[0].bias.grad, dist_dict[2].dp_group)\r\n # MoE grad handler test passed\r\n\r\n\r\[email protected]\r\ndef test_grad_handler():\r\n world_size = 4\r\n run_func = partial(run_test, world_size=world_size, port=free_port())\r\n mp.spawn(run_func, nprocs=world_size)\r\n\r\n\r\nif __name__ == '__main__':\r\n test_grad_handler()\r\n"
] | [
[
"torch.distributed.get_rank",
"torch.distributed.get_world_size",
"torch.distributed.new_group",
"torch.cuda.is_available"
],
[
"torch.randn_like",
"torch.nn.Sequential",
"torch.multiprocessing.spawn",
"torch.cuda.manual_seed",
"torch.distributed.get_rank"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dprada/molsysmt | [
"83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d",
"83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d",
"83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d"
] | [
"molsysmt/structure/get_sasa.py",
"molsysmt/forms/api_nglview_NGLWidget.py",
"molsysmt/structure/fit.py"
] | [
"from molsysmt import puw\nfrom molsysmt.basic import convert, select, get\nfrom molsysmt._private_tools._digestion import digest_engine, digest_target\nimport numpy as np\n\ndef get_sasa (molecular_system, target='atom', selection='all', frame_indices='all', syntaxis='MolSysMT',\n engine='MDTraj'):\n\n engine = digest_engine(engine)\n target = digest_target(target)\n\n if engine == 'MDTraj':\n\n from mdtraj import shrake_rupley\n\n tmp_item = convert(molecular_system, frame_indices=frame_indices, to_form='mdtraj.Trajectory')\n\n sasa_array = shrake_rupley(tmp_item, mode='atom') # tiene probe_radius y n_sphere_points\n\n if target=='atom':\n\n if selection is not 'all':\n\n atom_indices = select(molecular_system, selection=selection, syntaxis=syntaxis)\n sasa_array = sasa_array[:,atom_indices]\n\n else:\n\n sets_atoms = get(molecular_system, target=target, selection=selection, syntaxis=syntaxis, atom_index=True)\n\n n_sets = len(sets_atoms)\n n_frames = sasa_array.shape[0]\n\n new_sasa_array = np.empty([n_frames, n_sets], dtype='float')\n for ii in range(n_sets):\n new_sasa_array[:,ii] = sasa_array[:,sets_atoms[ii].astype(int)].sum(axis=1)\n sasa_array = new_sasa_array\n\n sasa_array = puw.quantity(sasa_array, 'nm**2')\n sasa_array = puw.standardize(sasa_array)\n\n else:\n\n raise NotImplementedError(\"Engine not implemented yet\")\n\n return sasa_array\n\n",
"from molsysmt._private_tools.exceptions import *\nfrom molsysmt.forms.common_gets import *\nimport numpy as np\nimport importlib\nimport sys\nfrom molsysmt.native.molecular_system import molecular_system_components\nfrom molsysmt import puw\n\nform_name='nglview.NGLWidget'\n\nis_form = {\n 'nglview.NGLWidget': form_name\n }\n\ninfo=[\"NGLView visualization native object\",\"http://nglviewer.org/nglview/latest/_modules/nglview/widget.html\"]\n\nhas = molecular_system_components.copy()\nfor ii in ['elements', 'coordinates', 'box']:\n has[ii]=True\n\ndef to_molsysmt_Topology(item, molecular_system=None, atom_indices='all', frame_indices='all'):\n\n from molsysmt.native.io.topology import from_nglview_NGLWidget as nglview_NGLWidget_to_molsysmt_Topology\n\n tmp_item, tmp_molecular_system = nglview_NGLWidget_to_molsysmt_Topology(item, molecular_system=molecular_system, atom_indices=atom_indices, frame_indices='all')\n\n return tmp_item, tmp_molecular_system\n\ndef to_molsysmt_Trajectory(item, molecular_system=None, atom_indices='all', frame_indices='all'):\n\n from molsysmt.native.io.trajectory import from_nglview_NGLWidget as nglview_NGLWidget_to_molsysmt_Trajectory\n\n tmp_item, tmp_molecular_system = nglview_NGLWidget_to_molsysmt_Trajectory(item,\n molecular_system=molecular_system, atom_indices=atom_indices,\n frame_indices=frame_indices)\n\n return tmp_item, tmp_molecular_system\n\ndef to_molsysmt_MolSys(item, molecular_system=None, atom_indices='all', frame_indices='all'):\n\n from molsysmt.native.io.molsys import from_nglview_NGLWidget as nglview_NGLWidget_to_molsysmt_MolSys\n\n tmp_item, tmp_molecular_system = nglview_NGLWidget_to_molsysmt_MolSys(item,\n molecular_system=molecular_system, atom_indices=atom_indices,\n frame_indices=frame_indices)\n\n return tmp_item, tmp_molecular_system\n\ndef to_openmm_Topology(item, molecular_system=None, atom_indices='all', frame_indices='all'):\n\n from molsysmt.forms.api_string_pdb_text import to_openmm_Topology as string_pdb_to_openmm_Topology\n from molsysmt.forms.api_openmm_Topology import to_openmm_Topology as openmm_Topology_to_openmm_Topology\n\n try:\n tmp_item = item.component_0.get_structure_string()\n except:\n tmp_item = item.get_state()['_ngl_msg_archive'][0]['args'][0]['data']\n\n tmp_item, _ = string_pdb_to_openmm_Topology(tmp_item)\n\n if molecular_system is not None:\n tmp_molecular_system = molecular_system.combine_with_items(tmp_item)\n else:\n tmp_molecular_system = None\n\n tmp_item, tmp_molecular_system = openmm_Topology_to_openmm_Topology(tmp_item,\n molecular_system=tmp_molecular_system, atom_indices=atom_indices, copy_if_all=False)\n\n return tmp_item, tmp_molecular_system\n\ndef to_string_pdb_text(item, molecular_system=None, atom_indices='all', frame_indices='all'):\n\n from molsysmt.forms.api_molsysmt_MolSys import to_string_pdb_text as molsysmt_MolSys_to_string_pdb\n\n tmp_item, tmp_molecular_system = to_molsysmt_MolSys(item, molecular_system=molecular_system,\n atom_indices=atom_indices, frame_indices=frame_indices)\n\n tmp_item, tmp_molecular_system = molsysmt_MolSys_to_string_pdb_text(tmp_item,\n molecular_system=tmp_molecular_system)\n\n return tmp_item, tmp_molecular_system\n\ndef to_string_aminoacids1(item, molecular_system=None, atom_indices='all', frame_indices='all'):\n\n from molsysmt.forms.api_molsysmt_MolSys import to_string_aminoacids1 as molsysmt_MolSys_to_string_aminoacids1\n\n tmp_item, tmp_molecular_system = to_molsysmt_MolSys(item, molecular_system=molecular_system,\n atom_indices=atom_indices, frame_indices=frame_indices)\n\n tmp_item, tmp_molecular_system = molsysmt_MolSys_to_string_aminoacids1(tmp_item,\n molecular_system=tmp_molecular_system)\n\n return tmp_item, tmp_molecular_system\n\ndef to_nglview_NGLWidget(item, molecular_system=None, atom_indices='all', frame_indices='all', copy_if_all=True):\n\n tmp_molecular_system = None\n\n if (atom_indices is 'all') and (frame_indices is 'all'):\n if copy_if_all:\n tmp_item = extract(item)\n if tmp_molecular_system is not None:\n tmp_molecular_system = molecular_system.combine_with_items(tmp_item)\n else:\n tmp_item = item\n if tmp_molecular_system is not None:\n tmp_molecular_system = molecular_system\n else:\n tmp_item = extract(item, atom_indices=atom_indices, frame_indices=frame_indices)\n if tmp_molecular_system is not None:\n tmp_molecular_system = molecular_system.combine_with_items(tmp_item, atom_indices=atom_indices, frame_indices=frame_indices)\n\n return tmp_item, tmp_molecular_system\n\ndef extract(item, atom_indices='all', frame_indices='all'):\n\n if (atom_indices is 'all') and (frame_indices is 'all'):\n from copy import copy\n return copy(item)\n else:\n from molsysmt.forms.api_molsysmt_MolSys import to_nglview_NGLWidget as molsysmt_MolSys_to_nglview_NGLWidget\n tmp_item, _ = to_molsysmt_MolSys(item, atom_indices=atom_indices, frame_indices=frame_indices)\n tmp_item, _ = molsysmt_MolSys_to_nglview_NGLWidget(tmp_item)\n return tmp_item\n\n return tmp_item\n\ndef merge(item_1, item_2):\n\n from molsysmt.forms.api_molsysmt_MolSys import to_nglview_NGLWidget as molsysmt_MolSys_to_nglview_NGLWidget\n from molsysmt.forms.api_molsysmt_MolSys import merge as merge_molsysmt_MolSys\n tmp_item_1, _ = to_molsysmt_MolSys(item_1)\n tmp_item_2, _ = to_molsysmt_MolSys(item_2)\n tmp_item = merge_molsysmt_MolSys(tmp_item_1, tmp_item_2)\n tmp_item, _ = molsysmt_MolSys_to_nglview_NGLWidget(tmp_item)\n return tmp_item\n\ndef add(to_item, item):\n\n raise NotWithThisForm()\n\ndef append_frames(item, step=None, time=None, coordinates=None, box=None):\n\n raise NotWithThisForm()\n\ndef concatenate_frames(item, step=None, time=None, coordinates=None, box=None):\n\n from molsysmt.forms.api_molsysmt_MolSys import to_nglview_NGLWidget as molsysmt_MolSys_to_nglview_NGLWidget\n from molsysmt.forms.api_molsysmt_MolSys import append_frames as append_frames_molsysmt_MolSys\n tmp_item, _ = to_molsysmt_MolSys(item)\n append_frames_molsysmt_MolSys(tmp_item, step=step, time=time, coordinates=coordinates, box=box)\n tmp_item, _ = molsysmt_MolSys_to_nglview_NGLWidget(tmp_item)\n return tmp_item\n\n###### Get\n\ndef aux_get(item, indices='all', frame_indices='all'):\n\n from molsysmt.forms import forms\n\n method_name = sys._getframe(1).f_code.co_name\n\n if 'openmm.Topology' in forms:\n\n tmp_item, _ = to_openmm_Topology(item, frame_indices=frame_indices)\n module = importlib.import_module('molsysmt.forms.api_openmm_Topology')\n _get = getattr(module, method_name)\n output = _get(tmp_item, indices=indices)\n\n else:\n\n raise NotImplementedError\n\n return output\n\n## Atom\n\ndef get_atom_index_from_atom(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_atom_id_from_atom(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_atom_name_from_atom(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_atom_type_from_atom(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_group_index_from_atom (item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_component_index_from_atom (item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_chain_index_from_atom (item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_molecule_index_from_atom (item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_entity_index_from_atom (item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_inner_bonded_atoms_from_atom (item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_n_inner_bonds_from_atom (item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_coordinates_from_atom(item, indices='all', frame_indices='all'):\n\n if frame_indices is 'all':\n n_frames = get_n_frames_from_system(item)\n frame_indices = np.arange(n_frames)\n\n coordinates = []\n\n for ii in frame_indices:\n if indices is 'all':\n coordinates.append(item.component_0.get_coordinates(ii))\n else:\n coordinates.append(item.component_0.get_coordinates(ii)[indices,:])\n\n coordinates = np.array(coordinates)\n coordinates = puw.quantity(coordinates, unit='angstroms')\n coordinates = puw.standardize(coordinates)\n\n return coordinates\n\ndef get_frame_from_atom(item, indices='all', frame_indices='all'):\n\n coordinates = get_coordinates_from_atom(item, indices=indices, frame_indices=frame_indices)\n box = get_box_from_system(item, frame_indices=frame_indices)\n step = get_step_from_system(item, frame_indices=frame_indices)\n time = get_time_from_system(item, frame_indices=frame_indices)\n\n return step, time, coordinates, box\n\n## group\n\ndef get_group_id_from_group(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_group_name_from_group(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_group_type_from_group(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\n## component\n\ndef get_component_id_from_component (item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_component_name_from_component (item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_component_type_from_component (item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\n## molecule\n\ndef get_molecule_id_from_molecule (item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_molecule_name_from_molecule (item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_molecule_type_from_molecule (item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\n## chain\n\ndef get_chain_id_from_chain (item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_chain_name_from_chain (item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_chain_type_from_chain (item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\n## entity\n\ndef get_entity_id_from_entity (item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_entity_name_from_entity (item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_entity_type_from_entity (item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\n## system\n\ndef get_n_atoms_from_system(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_n_groups_from_system(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_n_components_from_system(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_n_chains_from_system(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_n_molecules_from_system(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_n_entities_from_system(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_n_bonds_from_system(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_coordinates_from_system(item, indices='all', frame_indices='all'):\n\n if frame_indices is 'all':\n n_frames = get_n_frames_from_system(item)\n frame_indices = np.arange(n_frames)\n\n coordinates = []\n\n for ii in frame_indices:\n coordinates.append(item.component_0.get_coordinates(ii))\n\n coordinates = np.array(coordinates)\n coordinates = puw.quantity(coordinates, unit='angstroms')\n coordinates = puw.standardize(coordinates)\n\n return\n\ndef get_box_from_system(item, indices='all', frame_indices='all'):\n\n # We can only get the box from frame 0\n\n from molsysmt.forms.api_openmm_Topology import get_box_from_system as get_box_from_system_openmm_Topology\n\n if frame_indices is 'all':\n n_frames = get_n_frames_from_system(item)\n else:\n n_frames = frame_indices.shape[0]\n\n openmm_Topology, _ = to_openmm_Topology(item, atom_indices='all', frame_indices=0)\n\n aux_box = get_box_from_system_openmm_Topology(openmm_Topology)\n\n if aux_box is not None:\n aux_box_value_frame_0 = puw.get_value(aux_box[0])\n aux_box_unit = puw.get_unit(aux_box)\n\n box = [aux_box_value_frame_0 for ii in range(n_frames)]\n box = np.array(box)\n box = puw.quantity(box, unit=aux_box_unit)\n box = puw.standardize(box)\n else:\n box = None\n\n return box\n\ndef get_box_shape_from_system(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_box_lengths_from_system(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_box_angles_from_system(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_box_volume_from_system(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_time_from_system(item, indices='all', frame_indices='all'):\n\n return None\n\ndef get_step_from_system(item, indices='all', frame_indices='all'):\n\n return None\n\ndef get_n_frames_from_system(item, indices='all', frame_indices='all'):\n\n if frame_indices is 'all':\n n_frames = item.component_0.n_frames\n else:\n n_frames = frame_indices.shape[0]\n\n return n_frames\n\ndef get_bonded_atoms_from_system(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\n## bond\n\ndef get_bond_order_from_bond(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_bond_type_from_bond(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\ndef get_atom_index_from_bond(item, indices='all', frame_indices='all'):\n\n return aux_get(item, indices=indices, frame_indices=frame_indices)\n\n###### Set\n\ndef set_box_to_system(item, indices='all', frame_indices='all', value=None):\n\n raise NotImplementedError\n\ndef set_coordinates_to_system(item, indices='all', frame_indices='all', value=None):\n\n raise NotImplementedError\n\n",
"from molsysmt._private_tools._digestion import digest_molecular_system, digest_engine\nfrom molsysmt._private_tools._digestion import digest_frame_indices\nfrom molsysmt.basic import select, get, set, convert, copy\nimport numpy as np\nfrom molsysmt.lib import rmsd as librmsd\nfrom molsysmt import puw\n\ndef fit (molecular_system=None, selection='backbone', frame_indices='all',\n reference_molecular_system=None, reference_selection=None, reference_frame_index=0,\n to_form=None, parallel=True, syntaxis='MolSysMT', method='least rmsd', engine='MolSysMT'):\n\n molecular_system = digest_molecular_system(molecular_system)\n engine = digest_engine(engine)\n\n if engine=='MolSysMT':\n\n n_atoms, n_frames = get(molecular_system, n_atoms=True, n_frames=True)\n atom_indices = select(molecular_system, selection=selection, syntaxis=syntaxis)\n n_atom_indices = atom_indices.shape[0]\n frame_indices = digest_frame_indices(frame_indices)\n if frame_indices is 'all':\n frame_indices = np.arange(n_frames)\n n_frame_indices = frame_indices.shape[0]\n\n if reference_molecular_system is None:\n reference_molecular_system = molecular_system\n\n if reference_selection is None:\n reference_selection = selection\n\n reference_atom_indices = select(reference_molecular_system, selection=reference_selection, syntaxis=syntaxis)\n\n reference_coordinates = get(reference_molecular_system, target='atom', indices=reference_atom_indices,\n frame_indices=reference_frame_index, coordinates=True)\n\n coordinates = get(molecular_system, coordinates=True, frame_indices='all')\n units = puw.get_unit(coordinates)\n coordinates = np.asfortranarray(puw.get_value(coordinates), dtype='float64')\n reference_coordinates = np.asfortranarray(puw.get_value(reference_coordinates, to_unit=units), dtype='float64')\n\n if reference_coordinates.shape[1]!=n_atom_indices:\n raise ValueError(\"reference selection and selection needs to have the same number of atoms\")\n\n librmsd.least_rmsd_fit(coordinates, atom_indices, reference_coordinates, frame_indices,\n n_atoms, n_frames, n_atom_indices, n_frame_indices)\n\n coordinates=np.ascontiguousarray(coordinates)*units\n coordinates=puw.standardize(coordinates)\n\n if to_form is None:\n tmp_molecular_system = copy(molecular_system)\n else:\n tmp_molecular_system = convert(molecular_system, to_form=to_form)\n\n set(tmp_molecular_system, target='system', coordinates=coordinates)\n del(coordinates, units)\n return tmp_molecular_system\n\n elif engine=='MDTraj':\n\n #tmp_item.superpose(tmp_ref_item,frame=ref_frame_indices,atom_indices=atom_indices,ref_atom_indices=ref_atom_indices,parallel=parallel)\n\n #if in_form==x_form:\n # item=tmp_item\n #elif in_form=='molsysmt.Trajectory':\n # item._import_mdtraj_data(tmp_item)\n #elif in_form=='molsysmt.MolSys':\n # item.trajectory._import_mdtraj_data(tmp_item)\n #else:\n # item=_convert(tmp_item, to_form=in_form)\n\n raise NotImplementedError\n\n else:\n\n raise NotImplementedError\n\n"
] | [
[
"numpy.empty"
],
[
"numpy.arange",
"numpy.array"
],
[
"numpy.ascontiguousarray",
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
agrinh/nih_prediction | [
"e94ae81935452e7928cda6b101ef58163525d81c"
] | [
"nih_prediction/data.py"
] | [
"\"\"\"Produce metadata and datasets of NIH Chest Xray images\n\"\"\"\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\n\ndef get_metadata(path):\n \"\"\"Produce metadata with relevant columns from NIH Chest Xray images\n\n Args:\n path: Path to NIH dataset\n\n Returns:\n metadata Dataframe with image and label\n \"\"\"\n raw_meta = pd.read_csv(os.path.join(path, 'Data_Entry_2017.csv'))\n meta = raw_meta[['Image Index', 'Finding Labels']].copy()\n meta.columns = ['image', 'label']\n meta.image = os.path.join(path, 'images/') + meta.image\n return meta\n\n\ndef build_dataset(meta, mean=None, std=None, num_parallel_calls=32):\n \"\"\"Produce tf Dataset from metadata\n\n If mean and std are provided those values will be used to normalise the\n image intensities to zero mean and unit variance.\n\n Args:\n meta: Dataframe with paths to images under column name image\n mean:\n std: If both provided will be used to normalize images\n num_parallel_calls: Number of threads for loading images\n \"\"\"\n encoded_labels = meta.label.str.get_dummies(sep='|').sort_index(axis=1)\n ds = tf.data.Dataset.from_tensor_slices({\n 'index': meta.index,\n 'path': meta['image'].values,\n 'label': encoded_labels.values.astype(np.float32)\n })\n if None in (mean, std):\n mean = 0\n std = 1\n return ds.map(\n lambda item: normalize_image(decode_image(read_file(item)), mean, std),\n num_parallel_calls=num_parallel_calls\n )\n\n\ndef read_file(item):\n \"\"\"Read file in key path into key image\n \"\"\"\n item['image'] = tf.read_file(item['path'])\n return item\n\n\ndef decode_image(item):\n \"\"\"Decode raw image file into float32 image tensor with key image\n \"\"\"\n decoded = tf.image.decode_image(item['image'])\n item['image'] = tf.image.convert_image_dtype(decoded, tf.float32)\n # All images are B&W, but some seem to have the channel replicated,\n # to avoid issues we simply select the first channel\n item['image'] = tf.expand_dims(item['image'][:, :, 0], axis=-1)\n item['image'].set_shape([None, None, 1])\n return item\n\n\ndef normalize_image(item, mean, std):\n \"\"\"Normalize image with key image to zero mean and unit variance\n \"\"\"\n item['image'] = (item['image'] - mean) / std\n return item\n"
] | [
[
"tensorflow.expand_dims",
"tensorflow.image.decode_image",
"tensorflow.image.convert_image_dtype",
"tensorflow.read_file"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Matrix-King-Studio/MaskDetection | [
"5fed65833a8c08380299d606f66e14df814b022f",
"5fed65833a8c08380299d606f66e14df814b022f"
] | [
"yolov5/detect.py",
"yolov5/utils/activations.py"
] | [
"import argparse\r\nimport time\r\nfrom pathlib import Path\r\n\r\nimport cv2\r\nimport torch\r\nimport torch.backends.cudnn as cudnn\r\nfrom numpy import random\r\n\r\nfrom models.experimental import attempt_load\r\nfrom utils.datasets import LoadStreams, LoadImages\r\nfrom utils.general import check_img_size, check_requirements, non_max_suppression, apply_classifier, scale_coords\r\nfrom utils.general import xyxy2xywh, strip_optimizer, set_logging, increment_path\r\nfrom utils.plots import plot_one_box\r\nfrom utils.torch_utils import select_device, load_classifier, time_synchronized\r\n\r\nfrom utils.draw_name import draw_name\r\n\r\n\r\ndef detect(save_img=False):\r\n source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size\r\n webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(\r\n ('rtsp://', 'rtmp://', 'http://'))\r\n\r\n # Directories\r\n save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run\r\n (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir\r\n\r\n # Initialize\r\n set_logging()\r\n device = select_device(opt.device)\r\n half = device.type != 'cpu' # half precision only supported on CUDA\r\n\r\n # Load model\r\n model = attempt_load(weights, map_location=device) # load FP32 model\r\n imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size\r\n if half:\r\n model.half() # to FP16\r\n\r\n # Second-stage classifier\r\n classify = False\r\n if classify:\r\n modelc = load_classifier(name='resnet101', n=2) # initialize\r\n modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()\r\n\r\n # Set Dataloader\r\n vid_path, vid_writer = None, None\r\n if webcam:\r\n view_img = True\r\n cudnn.benchmark = True # set True to speed up constant image size inference\r\n dataset = LoadStreams(source, img_size=imgsz)\r\n else:\r\n save_img = True\r\n dataset = LoadImages(source, img_size=imgsz)\r\n\r\n # Get names and colors\r\n names = model.module.names if hasattr(model, 'module') else model.names\r\n colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]\r\n\r\n # Run inference\r\n t0 = time.time()\r\n img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img\r\n _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once\r\n for path, img, im0s, vid_cap in dataset:\r\n img = torch.from_numpy(img).to(device)\r\n img = img.half() if half else img.float() # uint8 to fp16/32\r\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\r\n if img.ndimension() == 3:\r\n img = img.unsqueeze(0)\r\n\r\n # Inference\r\n t1 = time_synchronized()\r\n pred = model(img, augment=opt.augment)[0]\r\n\r\n # Apply NMS\r\n pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)\r\n t2 = time_synchronized()\r\n\r\n # Apply Classifier\r\n if classify:\r\n pred = apply_classifier(pred, modelc, img, im0s)\r\n\r\n # Process detections\r\n for i, det in enumerate(pred): # detections per image\r\n if webcam: # batch_size >= 1\r\n p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count\r\n else:\r\n p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)\r\n\r\n p = Path(p) # to Path\r\n save_path = str(save_dir / p.name) # img.jpg\r\n txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt\r\n s += '%gx%g ' % img.shape[2:] # print string\r\n gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh\r\n if len(det):\r\n # Rescale boxes from img_size to im0 size\r\n det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()\r\n\r\n # Print results\r\n for c in det[:, -1].unique():\r\n n = (det[:, -1] == c).sum() # detections per class\r\n s += f'{n} {names[int(c)]}s, ' # add to string\r\n\r\n # Write results\r\n for *xyxy, conf, cls in reversed(det):\r\n if save_txt: # Write to file\r\n xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh\r\n line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format\r\n with open(txt_path + '.txt', 'a') as f:\r\n f.write(('%g ' * len(line)).rstrip() % line + '\\n')\r\n\r\n if save_img or view_img: # Add bbox to image\r\n cv2.imwrite(\"img.jpg\", im0)\r\n im0 = draw_name(im0, colors[int(cls)]) # 填上人名\r\n label = f'{names[int(cls)]} {conf:.2f}'\r\n plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)\r\n\r\n # Print time (inference + NMS)\r\n print(f'{s}Done. ({t2 - t1:.3f}s)')\r\n\r\n # Stream results\r\n if view_img:\r\n cv2.imshow('Masks detect', im0)\r\n\r\n # Save results (image with detections)\r\n if save_img:\r\n if dataset.mode == 'image':\r\n cv2.imwrite(save_path, im0)\r\n else: # 'video'\r\n if vid_path != save_path: # new video\r\n vid_path = save_path\r\n if isinstance(vid_writer, cv2.VideoWriter):\r\n vid_writer.release() # release previous video writer\r\n\r\n fourcc = 'mp4v' # output video codec\r\n fps = vid_cap.get(cv2.CAP_PROP_FPS)\r\n w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))\r\n vid_writer.write(im0)\r\n\r\n if save_txt or save_img:\r\n s = f\"\\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}\" if save_txt else ''\r\n print(f\"Results saved to {save_dir}{s}\")\r\n\r\n print(f'Done. ({time.time() - t0:.3f}s)')\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('--weights', nargs='+', type=str, default='runs/train/exp/weights/best.pt',\r\n help='model.pt path(s)')\r\n parser.add_argument('--source', type=str, default='0', help='source') # file/folder, 0 for webcam\r\n parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')\r\n parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')\r\n parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')\r\n parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')\r\n parser.add_argument('--view-img', action='store_true', help='display results')\r\n parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')\r\n parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')\r\n parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')\r\n parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')\r\n parser.add_argument('--augment', action='store_true', help='augmented inference')\r\n parser.add_argument('--update', action='store_true', help='update all models')\r\n parser.add_argument('--project', default='runs/detect', help='save results to project/name')\r\n parser.add_argument('--name', default='exp', help='save results to project/name')\r\n parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')\r\n global opt\r\n opt = parser.parse_args()\r\n print(opt)\r\n check_requirements()\r\n\r\n with torch.no_grad():\r\n if opt.update: # update all models (to fix SourceChangeWarning)\r\n for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:\r\n detect()\r\n strip_optimizer(opt.weights)\r\n else:\r\n detect()\r\n",
"# Activation functions\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\n\r\n# SiLU https://arxiv.org/pdf/1905.02244.pdf ----------------------------------------------------------------------------\r\nclass SiLU(nn.Module): # export-friendly version of nn.SiLU()\r\n @staticmethod\r\n def forward(x):\r\n return x * torch.sigmoid(x)\r\n\r\n\r\nclass Hardswish(nn.Module): # export-friendly version of nn.Hardswish()\r\n @staticmethod\r\n def forward(x):\r\n # return x * F.hardsigmoid(x) # for torchscript and CoreML\r\n return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX\r\n\r\n\r\nclass MemoryEfficientSwish(nn.Module):\r\n class F(torch.autograd.Function):\r\n @staticmethod\r\n def forward(ctx, x):\r\n ctx.save_for_backward(x)\r\n return x * torch.sigmoid(x)\r\n\r\n @staticmethod\r\n def backward(ctx, grad_output):\r\n x = ctx.saved_tensors[0]\r\n sx = torch.sigmoid(x)\r\n return grad_output * (sx * (1 + x * (1 - sx)))\r\n\r\n def forward(self, x):\r\n return self.F.apply(x)\r\n\r\n\r\n# Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------\r\nclass Mish(nn.Module):\r\n @staticmethod\r\n def forward(x):\r\n return x * F.softplus(x).tanh()\r\n\r\n\r\nclass MemoryEfficientMish(nn.Module):\r\n class F(torch.autograd.Function):\r\n @staticmethod\r\n def forward(ctx, x):\r\n ctx.save_for_backward(x)\r\n return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))\r\n\r\n @staticmethod\r\n def backward(ctx, grad_output):\r\n x = ctx.saved_tensors[0]\r\n sx = torch.sigmoid(x)\r\n fx = F.softplus(x).tanh()\r\n return grad_output * (fx + x * sx * (1 - fx * fx))\r\n\r\n def forward(self, x):\r\n return self.F.apply(x)\r\n\r\n\r\n# FReLU https://arxiv.org/abs/2007.11824 -------------------------------------------------------------------------------\r\nclass FReLU(nn.Module):\r\n def __init__(self, c1, k=3): # ch_in, kernel\r\n super().__init__()\r\n self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)\r\n self.bn = nn.BatchNorm2d(c1)\r\n\r\n def forward(self, x):\r\n return torch.max(x, self.bn(self.conv(x)))\r\n"
] | [
[
"torch.zeros",
"torch.load",
"torch.from_numpy",
"torch.tensor",
"torch.no_grad",
"numpy.random.randint"
],
[
"torch.sigmoid",
"torch.nn.Conv2d",
"torch.nn.functional.hardtanh",
"torch.nn.BatchNorm2d",
"torch.nn.functional.softplus"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rolandbernard/adventofcode-2021 | [
"9249815af62d0fcf79b71357330a1456ea3be1ed",
"9249815af62d0fcf79b71357330a1456ea3be1ed",
"9249815af62d0fcf79b71357330a1456ea3be1ed"
] | [
"20.trench-map/py/part1.py",
"20.trench-map/py/part2.py",
"09.smoke-basin/py/part1.py"
] | [
"\nimport sys\nimport numpy as np\n\nrawalgo, rawimg = sys.stdin.read().strip().split('\\n\\n')\n\nalgo = np.array([1 if c == '#' else 0 for c in rawalgo], dtype=np.int8)\nimg = np.array([[1 if c == '#' else 0 for c in line] for line in rawimg.split('\\n')], dtype=np.int8)\n\ndef enhance(img, algo):\n img = np.pad(img, 2, 'edge')\n new = np.copy(img)\n for i in range(1, img.shape[0] - 1):\n for j in range(1, img.shape[1] - 1):\n values = img[i-1:i+2,j-1:j+2].flatten()\n index = (values * 2**np.arange(9)[::-1]).sum()\n new[i,j] = algo[index]\n return new[1:-1,1:-1]\n\nimg = np.pad(img, 1)\nfor _ in range(2):\n img = enhance(img, algo)\n\nprint(\"Result:\", img.sum())\n\n",
"\nimport sys\nimport numpy as np\n\nfrom scipy.ndimage import convolve\n\nrawalgo, rawimg = sys.stdin.read().strip().split('\\n\\n')\n\nalgo = np.array([1 if c == '#' else 0 for c in rawalgo])\nimg = np.array([[1 if c == '#' else 0 for c in line] for line in rawimg.split('\\n')])\n\nbin = 2**np.arange(9).reshape(3, 3)\n\ndef enhance(img, algo):\n img = np.pad(img, 1, 'edge')\n return algo[convolve(img, bin)]\n\nimg = np.pad(img, 1)\nfor _ in range(50):\n img = enhance(img, algo)\n\nprint(\"Result:\", img.sum())\n\n",
"\nimport sys\nimport numpy as np\n\nraw = sys.stdin.read()\nmap = np.array([[c for c in l] for l in raw.split('\\n') if len(l) != 0], dtype=int)\n\npadded = np.pad(map, 1, constant_values = 9)\nad = np.dstack([ np.roll(padded, i, axis=j) for i, j in [(x, y) for x in [1, -1] for y in [0, 1]] ])\nmin = ad.min(2)[1:-1,1:-1]\n\nprint('Result:', (map[min > map] + 1).sum())\n\n"
] | [
[
"numpy.arange",
"numpy.copy",
"numpy.array",
"numpy.pad"
],
[
"numpy.arange",
"numpy.array",
"numpy.pad",
"scipy.ndimage.convolve"
],
[
"numpy.roll",
"numpy.pad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.