repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
Becky-pty/Wind
[ "af19aca04f9967f85c2a471293b6ca827b4ffe71" ]
[ "windpowerlib/wind_turbine.py" ]
[ "\"\"\"\nThe ``wind_turbine`` module contains the class WindTurbine that implements\na wind turbine in the windpowerlib and functions needed for the modelling of a\nwind turbine.\n\nSPDX-FileCopyrightText: 2019 oemof developer group <[email protected]>\nSPDX-License-Identifier: MIT\n\"\"\"\nimport pandas as pd\nimport logging\nimport warnings\nimport requests\nimport os\nfrom windpowerlib.tools import WindpowerlibUserWarning\nfrom typing import NamedTuple\n\n\nclass WindTurbine(object):\n r\"\"\"\n Defines a standard set of wind turbine attributes.\n\n Parameters\n ----------\n hub_height : float\n Hub height of the wind turbine in m.\n power_curve : :pandas:`pandas.DataFrame<frame>` or dict (optional)\n If provided directly sets the power curve. DataFrame/dictionary must\n have 'wind_speed' and 'value' columns/keys with wind speeds in m/s and\n the corresponding power curve value in W. If not set the value is\n retrieved from 'power_curve.csv' file in `path`. In that case a\n `turbine_type` is needed. Default: None.\n power_coefficient_curve : :pandas:`pandas.DataFrame<frame>` or dict (optional)\n If provided directly sets the power coefficient curve.\n DataFrame/dictionary must have 'wind_speed' and 'value' columns/keys\n with wind speeds in m/s and the corresponding power coefficient curve\n value. If not set the value is retrieved from\n 'power_coefficient_curve.csv' file in `path`. In that case a\n `turbine_type` is needed. Default: None.\n turbine_type : str (optional)\n Name of the wind turbine type. Must be provided if power (coefficient)\n curve, nominal power or rotor diameter is retrieved from self-provided\n or oedb turbine library csv files. If turbine_type is None it is not\n possible to retrieve turbine data from file.\n Use :py:func:`~.get_turbine_types` to see a table of all wind turbines\n for which power (coefficient) curve data and other turbine data is\n provided in the oedb turbine library.\n Default: None.\n rotor_diameter : float (optional)\n Diameter of the rotor in m. If not set the value is\n retrieved from 'turbine_data.csv' file in `path`. In that case a\n `turbine_type` is needed.\n The rotor diameter only needs to be set if power output\n is calculated using the power coefficient curve. Default: None.\n nominal_power : float (optional)\n The nominal power of the wind turbine in W. If not set the value is\n retrieved from 'turbine_data.csv' file in `path`. In that case a\n `turbine_type` is needed. Default: None.\n path : str (optional)\n Directory where the turbine database files are located. The files need\n to be named 'power_coefficient_curve.csv', 'power_curve.csv', and\n 'turbine_data.csv'. By default the oedb turbine library files are used.\n Set path to `None` to ignore turbine data from files. Default: 'oedb'.\n\n Attributes\n ----------\n turbine_type : str\n Name of the wind turbine.\n hub_height : float\n Hub height of the wind turbine in m.\n rotor_diameter : None or float\n Diameter of the rotor in m. Default: None.\n power_coefficient_curve : None, pandas.DataFrame or dictionary\n Power coefficient curve of the wind turbine. DataFrame/dictionary\n containing 'wind_speed' and 'value' columns/keys with wind speeds\n in m/s and the corresponding power coefficients. Default: None.\n power_curve : None, pandas.DataFrame or dictionary\n Power curve of the wind turbine. DataFrame/dictionary containing\n 'wind_speed' and 'value' columns/keys with wind speeds in m/s and the\n corresponding power curve value in W. Default: None.\n nominal_power : None or float\n The nominal output of the wind turbine in W. Default: None.\n \n Notes\n ------\n Your wind turbine object needs to have a power coefficient or power curve.\n By default they are fetched from the oedb turbine library that is provided\n along with the windpowerlib. In that case `turbine_type` must be specified.\n You can also set the curves directly or provide your own csv files with\n power coefficient and power curves. See `example_power_curves.csv',\n `example_power_coefficient_curves.csv` and `example_turbine_data.csv`\n in example/data for the required format of such csv files.\n\n Examples\n --------\n >>> import os\n >>> from windpowerlib import WindTurbine\n >>> enerconE126={\n ... 'hub_height': 135,\n ... 'turbine_type': 'E-126/4200'}\n >>> e126=WindTurbine(**enerconE126)\n >>> print(e126.nominal_power)\n 4200000.0\n >>> # Example with own path\n >>> path=os.path.join(os.path.dirname(__file__), '../tests/data')\n >>> example_turbine={\n ... 'hub_height': 100,\n ... 'rotor_diameter': 70,\n ... 'turbine_type': 'DUMMY 3',\n ... 'path' : path}\n >>> e_t_1=WindTurbine(**example_turbine)\n >>> print(e_t_1.power_curve['value'][7])\n 18000.0\n >>> print(e_t_1.nominal_power)\n 1500000.0\n \"\"\"\n\n def __init__(\n self,\n hub_height,\n nominal_power=None,\n path=\"oedb\",\n power_curve=None,\n power_coefficient_curve=None,\n rotor_diameter=None,\n turbine_type=None,\n **kwargs,\n ):\n\n self.hub_height = hub_height\n self.turbine_type = turbine_type\n self.rotor_diameter = rotor_diameter\n self.nominal_power = nominal_power\n self.power_curve = power_curve\n self.power_coefficient_curve = power_coefficient_curve\n\n if path == \"oedb\":\n path = os.path.join(os.path.dirname(__file__), \"oedb\")\n\n if turbine_type is not None and path is not None:\n if power_curve is None:\n try:\n fn = os.path.join(path, \"power_curves.csv\")\n self.power_curve = get_turbine_data_from_file(\n self.turbine_type, fn\n )\n except KeyError:\n msg = \"No power curve found for {0}\"\n logging.debug(msg.format(self.turbine_type))\n if power_coefficient_curve is None:\n try:\n fn = os.path.join(path, \"power_coefficient_curves.csv\")\n self.power_coefficient_curve = get_turbine_data_from_file(\n self.turbine_type, fn\n )\n except KeyError:\n msg = \"No power coefficient curve found for {0}\"\n logging.debug(msg.format(self.turbine_type))\n\n if nominal_power is None or (\n rotor_diameter is None\n and self.power_coefficient_curve is not None\n ):\n turbine_data = None\n try:\n fn = os.path.join(path, \"turbine_data.csv\")\n turbine_data = get_turbine_data_from_file(\n self.turbine_type, fn\n )\n except KeyError:\n msg = \"No turbine data found for {0}\"\n logging.debug(msg.format(self.turbine_type))\n\n if self.nominal_power is None and turbine_data is not None:\n self.nominal_power = float(turbine_data[\"nominal_power\"])\n if self.rotor_diameter is None and turbine_data is not None:\n self.rotor_diameter = float(turbine_data[\"rotor_diameter\"])\n\n if self.rotor_diameter:\n if self.hub_height <= 0.5 * self.rotor_diameter:\n msg = \"1/2rotor_diameter cannot be greater than hub_height\"\n raise ValueError(msg)\n\n if self.power_curve is None and self.power_coefficient_curve is None:\n msg = (\n \"The WindTurbine has been initialised without a power curve\"\n \" and without a power coefficient curve.\\nYou will not be\"\n \" able to calculate the power output.\\n\"\n \" Check if the turbine type {0} is in your database file\"\n \" or if you passed a valid curve.\"\n )\n warnings.warn(msg.format(turbine_type), WindpowerlibUserWarning)\n else:\n # power (coefficient) curve to pd.DataFrame in case of being dict\n if isinstance(self.power_curve, dict):\n self.power_curve = pd.DataFrame(self.power_curve)\n if isinstance(self.power_coefficient_curve, dict):\n self.power_coefficient_curve = pd.DataFrame(\n self.power_coefficient_curve\n )\n # sort power (coefficient) curve by wind speed\n if isinstance(self.power_curve, pd.DataFrame):\n self.power_curve.sort_values(by=\"wind_speed\")\n elif self.power_curve is not None:\n msg = (\n \"Type of power curve of {} is {} but should be \"\n \"pd.DataFrame or dict.\"\n )\n raise TypeError(\n msg.format(self.__repr__(), type(self.power_curve))\n )\n if isinstance(self.power_coefficient_curve, pd.DataFrame):\n self.power_coefficient_curve.sort_values(by=\"wind_speed\")\n elif self.power_coefficient_curve is not None:\n msg = (\n \"Type of power coefficient curve of {} is {} but \"\n \"should be pd.DataFrame or dict.\"\n )\n raise TypeError(\n msg.format(\n self.__repr__(), type(self.power_coefficient_curve)\n )\n )\n\n def __repr__(self):\n info = []\n if self.nominal_power is not None:\n info.append(\"nominal power={} W\".format(self.nominal_power))\n if self.hub_height is not None:\n info.append(\"hub height={} m\".format(self.hub_height))\n if self.rotor_diameter is not None:\n info.append(\"rotor diameter={} m\".format(self.rotor_diameter))\n if self.power_coefficient_curve is not None:\n info.append(\"power_coefficient_curve={}\".format(\"True\"))\n else:\n info.append(\"power_coefficient_curve={}\".format(\"False\"))\n if self.power_curve is not None:\n info.append(\"power_curve={}\".format(\"True\"))\n else:\n info.append(\"power_curve={}\".format(\"False\"))\n\n if self.turbine_type is not None:\n turbine_repr = \"Wind turbine: {name} {info}\".format(\n name=self.turbine_type, info=info\n )\n else:\n turbine_repr = \"Wind turbine: {info}\".format(info=info)\n\n return turbine_repr\n\n def to_group(self, number_turbines=None, total_capacity=None):\n r\"\"\"\n Creates a :class:`~windpowerlib.wind_turbine.WindTurbineGroup`, a\n NamedTuple data container with the fields 'number_of_turbines' and\n 'wind_turbine'. If no parameter is passed the number of turbines is\n set to one.\n\n It can be used to calculate the number of turbines for a given total\n capacity or to create a namedtuple that can be used to define a\n :class:`~windpowerlib.wind_farm.WindFarm` object.\n\n Parameters\n ----------\n number_turbines : float\n Number of turbines of the defined type. Default: 1\n total_capacity : float\n Total capacity of the group of wind turbines of the same type.\n\n Returns\n -------\n :class:`~windpowerlib.wind_turbine.WindTurbineGroup`\n A namedtuple with two fields: 'number_of_turbines' and\n 'wind_turbine'.\n\n Examples\n --------\n >>> from windpowerlib import WindTurbine\n >>> enerconE126={\n ... 'hub_height': 135,\n ... 'turbine_type': 'E-126/4200'}\n >>> e126=WindTurbine(**enerconE126)\n >>> e126.to_group(5).number_of_turbines\n 5\n >>> e126.to_group().number_of_turbines\n 1\n >>> e126.to_group(number_turbines=7).number_of_turbines\n 7\n >>> e126.to_group(total_capacity=12600000).number_of_turbines\n 3.0\n >>> e126.to_group(total_capacity=14700000).number_of_turbines\n 3.5\n >>> e126.to_group(total_capacity=12600000).wind_turbine.nominal_power\n 4200000.0\n >>> type(e126.to_group(5))\n <class 'windpowerlib.wind_turbine.WindTurbineGroup'>\n >>> e126.to_group(5) # doctest: +NORMALIZE_WHITESPACE\n WindTurbineGroup(wind_turbine=Wind turbine: E-126/4200 ['nominal\n power=4200000.0 W', 'hub height=135 m', 'rotor diameter=127.0 m',\n 'power_coefficient_curve=True', 'power_curve=True'],\n number_of_turbines=5)\n \"\"\"\n\n if number_turbines is not None and total_capacity is not None:\n raise ValueError(\n \"The 'number' and the 'total_capacity' parameter \"\n \"are mutually exclusive. Use just one of them.\"\n )\n elif total_capacity is not None:\n number_turbines = total_capacity / self.nominal_power\n elif number_turbines is None:\n number_turbines = 1\n\n return WindTurbineGroup(\n wind_turbine=self, number_of_turbines=number_turbines\n )\n\n\n# This is working for Python >= 3.5.\n# There a cleaner solutions for Python >= 3.6, once the support of 3.5 is\n# dropped: https://stackoverflow.com/a/50038614\nclass WindTurbineGroup(\n NamedTuple(\n \"WindTurbineGroup\",\n [(\"wind_turbine\", WindTurbine), (\"number_of_turbines\", float)],\n )\n):\n \"\"\"\n A simple data container to define more than one turbine of the same type.\n Use the :func:`~windpowerlib.wind_turbine.WindTurbine.to_group` method to\n easily create a WindTurbineGroup from a\n :class:`~windpowerlib.wind_turbine.WindTurbine` object.\n\n Parameters\n ----------\n 'wind_turbine' : WindTurbine\n A WindTurbine object with all necessary attributes.\n 'number_of_turbines' : float\n The number of turbines. The number is not restricted to integer values.\n \"\"\"\n\n __slots__ = ()\n\n\nWindTurbineGroup.wind_turbine.__doc__ = (\n \"A :class:`~windpowerlib.wind_farm.WindTurbine` object.\"\n)\nWindTurbineGroup.number_of_turbines.__doc__ = (\n \"Number of turbines of type WindTurbine\"\n)\n\n\ndef get_turbine_data_from_file(turbine_type, path):\n r\"\"\"\n Fetches turbine data from a csv file.\n\n See `example_power_curves.csv', `example_power_coefficient_curves.csv` and\n `example_turbine_data.csv` in example/data for the required format of\n a csv file. Make sure to provide wind speeds in m/s and power in W or\n convert units after loading the data.\n\n Parameters\n ----------\n turbine_type : str\n Specifies the turbine type data is fetched for.\n path : str\n Specifies the source of the turbine data.\n See the example below for how to use the example data.\n\n Returns\n -------\n :pandas:`pandas.DataFrame<frame>` or float\n Power curve or power coefficient curve (pandas.DataFrame) or nominal\n power (float) of one wind turbine type. Power (coefficient) curve\n DataFrame contains power coefficient curve values (dimensionless) or\n power curve values (in dimension given in file) with the corresponding\n wind speeds (in dimension given in file).\n\n Examples\n --------\n >>> from windpowerlib import wind_turbine\n >>> import os\n >>> my_path = os.path.join(os.path.dirname(__file__), '../tests/data',\n ... 'power_curves.csv')\n >>> d3 = get_turbine_data_from_file('DUMMY 3', my_path)\n >>> print(d3['value'][7])\n 18000.0\n >>> print(d3['value'].max())\n 1500000.0\n \"\"\"\n\n try:\n df = pd.read_csv(path, index_col=0)\n except FileNotFoundError:\n raise FileNotFoundError(\"The file '{}' was not found.\".format(path))\n wpp_df = df[df.index == turbine_type].copy()\n # if turbine not in data file\n if wpp_df.shape[0] == 0:\n msg = \"Wind converter type {0} not provided. Possible types: {1}\"\n raise KeyError(msg.format(turbine_type, list(df.index)))\n # if turbine in data file\n # get nominal power or power (coefficient) curve\n if \"turbine_data\" in path:\n return wpp_df\n else:\n wpp_df.dropna(axis=1, inplace=True)\n wpp_df = wpp_df.transpose().reset_index()\n wpp_df.columns = [\"wind_speed\", \"value\"]\n # transform wind speeds to floats\n wpp_df[\"wind_speed\"] = wpp_df[\"wind_speed\"].apply(lambda x: float(x))\n return wpp_df\n\n\ndef create_power_curve(wind_speed, power):\n \"\"\"\n A list, numpy.array, pandas.Series or other iterables can be passed to\n define the wind speed and the power output. Make sure that the order is\n not mutable because, values from both parameters will be used as value\n pairs.\n\n Parameters\n ----------\n wind_speed : iterable\n A series of wind speed values in meter per second [m/s].\n power : iterable\n A series of power values in Watt [W].\n\n Returns\n -------\n pandas.DataFrame\n \"\"\"\n return pd.DataFrame(data={\"value\": power, \"wind_speed\": wind_speed})\n\n\ndef load_turbine_data_from_oedb(schema=\"supply\", table=\"wind_turbine_library\"):\n r\"\"\"\n Loads turbine library from the OpenEnergy database (oedb).\n\n Turbine data is saved to csv files ('oedb_power_curves.csv',\n 'oedb_power_coefficient_curves.csv' and 'oedb_nominal_power') for offline\n usage of the windpowerlib. If the files already exist they are overwritten.\n\n Parameters\n ----------\n schema : str\n Database schema of the turbine library.\n table : str\n Table name of the turbine library.\n\n Returns\n -------\n :pandas:`pandas.DataFrame<frame>`\n Turbine data of different turbines such as 'manufacturer',\n 'turbine_type', 'nominal_power'.\n\n \"\"\"\n # url of OpenEnergy Platform that contains the oedb\n oep_url = \"http://oep.iks.cs.ovgu.de/\"\n url = oep_url + \"/api/v0/schema/{}/tables/{}/rows/?\".format(schema, table)\n\n # load data\n result = requests.get(url)\n print(result.json())\n if not result.status_code == 200:\n raise ConnectionError(\n \"Database (oep) connection not successful. \\nURL: {2}\\n\"\n \"Response: [{0}] \\n{1}\".format(\n result.status_code, result.text, url\n )\n )\n\n # extract data to dataframe\n turbine_data = pd.DataFrame(result.json())\n # standard file name for saving data\n filename = os.path.join(os.path.dirname(__file__), \"oedb\", \"{}.csv\")\n # get all power (coefficient) curves and save to file\n # for curve_type in ['power_curve', 'power_coefficient_curve']:\n for curve_type in [\"power_curve\", \"power_coefficient_curve\"]:\n curves_df = pd.DataFrame(columns=[\"wind_speed\"])\n for index in turbine_data.index:\n if (\n turbine_data[\"{}_wind_speeds\".format(curve_type)][index]\n and turbine_data[\"{}_values\".format(curve_type)][index]\n ):\n df = (\n pd.DataFrame(\n data=[\n eval(\n turbine_data[\n \"{}_wind_speeds\".format(curve_type)\n ][index]\n ),\n eval(\n turbine_data[\"{}_values\".format(curve_type)][\n index\n ]\n ),\n ]\n )\n .transpose()\n .rename(\n columns={\n 0: \"wind_speed\",\n 1: turbine_data[\"turbine_type\"][index],\n }\n )\n )\n curves_df = pd.merge(\n left=curves_df, right=df, how=\"outer\", on=\"wind_speed\"\n )\n curves_df = curves_df.set_index(\"wind_speed\").sort_index().transpose()\n # power curve values in W\n if curve_type == \"power_curve\":\n curves_df *= 1000\n curves_df.index.name = \"turbine_type\"\n curves_df.to_csv(filename.format(\"{}s\".format(curve_type)))\n\n # get turbine data and save to file (excl. curves)\n turbine_data_df = turbine_data.drop(\n [\n \"power_curve_wind_speeds\",\n \"power_curve_values\",\n \"power_coefficient_curve_wind_speeds\",\n \"power_coefficient_curve_values\",\n \"thrust_coefficient_curve_wind_speeds\",\n \"thrust_coefficient_curve_values\",\n ],\n axis=1,\n ).set_index(\"turbine_type\")\n # nominal power in W\n turbine_data_df[\"nominal_power\"] *= 1000\n turbine_data_df.to_csv(filename.format(\"turbine_data\"))\n return turbine_data\n\n\ndef get_turbine_types(turbine_library=\"local\", print_out=True, filter_=True):\n r\"\"\"\n Get all provided wind turbine types provided.\n\n Choose by `turbine_library` whether to get wind turbine types provided by\n the OpenEnergy Database ('oedb') or wind turbine types provided in your\n local file(s) ('local').\n By default only turbine types for which a power coefficient curve or power\n curve is provided are returned. Set `filter_=False` to see all turbine\n types for which any data (e.g. hub height, rotor diameter, ...) is\n provided.\n\n Parameters\n ----------\n turbine_library : str\n Specifies if the oedb turbine library ('oedb') or your local turbine\n data file ('local') is evaluated. Default: 'local'.\n print_out : bool\n Directly prints a tabular containing the turbine types in column\n 'turbine_type', the manufacturer in column 'manufacturer' and\n information about whether a power (coefficient) curve exists (True) or\n not (False) in columns 'has_power_curve' and 'has_cp_curve'.\n Default: True.\n filter_ : bool\n If True only turbine types for which a power coefficient curve or\n power curve is provided in the oedb turbine library are\n returned. Default: True.\n\n Returns\n -------\n :pandas:`pandas.DataFrame<frame>`\n Contains turbine types in column 'turbine_type', the manufacturer in\n column 'manufacturer' and information about whether a power\n (coefficient) curve exists (True) or not (False) in columns\n 'has_power_curve' and 'has_cp_curve'.\n\n Notes\n -----\n If the power (coefficient) curve of the desired turbine type (or the\n turbine type itself) is missing you can contact us via github or\n [email protected]. You can help us by providing data in the\n format as shown in\n `the data base <https://openenergy-platform.org/dataedit/view/supply/wind_turbine_library>`_.\n\n Examples\n --------\n >>> from windpowerlib import wind_turbine\n >>> my_df=wind_turbine.get_turbine_types(print_out=False)\n >>> print(my_df[my_df[\"turbine_type\"].str.contains(\"E-126\")].iloc[0])\n manufacturer Enercon\n turbine_type E-126/4200\n has_power_curve True\n has_cp_curve True\n Name: 5, dtype: object\n >>> print(my_df[my_df[\"manufacturer\"].str.contains(\"Enercon\")].iloc[0])\n manufacturer Enercon\n turbine_type E-101/3050\n has_power_curve True\n has_cp_curve True\n Name: 1, dtype: object\n\n \"\"\"\n if turbine_library == \"local\":\n filename = os.path.join(\n os.path.dirname(__file__), \"oedb\", \"turbine_data.csv\"\n )\n df = pd.read_csv(filename, index_col=0).reset_index()\n elif turbine_library == \"oedb\":\n df = load_turbine_data_from_oedb()\n else:\n raise ValueError(\n \"`turbine_library` is '{}' \".format(turbine_library)\n + \"but must be 'local' or 'oedb'.\"\n )\n if filter_:\n cp_curves_df = df.loc[df[\"has_cp_curve\"]][\n [\"manufacturer\", \"turbine_type\", \"has_cp_curve\"]\n ]\n p_curves_df = df.loc[df[\"has_power_curve\"]][\n [\"manufacturer\", \"turbine_type\", \"has_power_curve\"]\n ]\n curves_df = pd.merge(\n p_curves_df, cp_curves_df, how=\"outer\", sort=True\n ).fillna(False)\n else:\n curves_df = df[\n [\"manufacturer\", \"turbine_type\", \"has_power_curve\", \"has_cp_curve\"]\n ]\n if print_out:\n pd.set_option(\"display.max_rows\", len(curves_df))\n print(curves_df)\n pd.reset_option(\"display.max_rows\")\n return curves_df\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv", "pandas.merge", "pandas.reset_option" ] ]
amandalynne/Seattle-Mobility-Index
[ "f21d2fa6913ce9474aedc298e9e4a6e7c9390e64" ]
[ "seamo/tests/test_geocoder.py" ]
[ "\"\"\"\nThis is a test file for universal_geocoder.py\n\"\"\"\nimport init\nimport geocoder \nimport constants as cn\nimport unittest\nimport pandas as pd\nimport os\n#from scripts.core import geopandas_geocoder\n# from .core import geopandas_geocoder\n# import core.geopandas_geocoder as gg\n\nclass UniGeoTest(unittest.TestCase):\n\tdef setUp(self):\n\t\tpass\n\n\tdef test_get_reference(self):\n\t\treference_gdf = geocoder.get_reference()\n\t\tEXPECTED_COLUMNS = ['key', 'geometry', 'geography']\n\t\tself.assertTrue(all(EXPECTED_COLUMNS == reference_gdf.columns))\n\t\tself.assertGreater(len(reference_gdf), 0)\n\n\tdef test_geocode_csv_shape(self):\n\t\tEXPECTED_ROWS = 2\n\t\tEXPECTED_COLUMNS = 8\n\t\tDATA = os.path.join(cn.TEST_DIR, 'test.csv')\n\t\ttest_data = geocoder.geocode_csv(DATA)\n\t\tself.assertTrue(test_data.shape[0] == EXPECTED_ROWS)\n\t\tself.assertTrue(test_data.shape[1] == EXPECTED_COLUMNS)\n\n\tdef test_geocode_csv_blkgroup_classification_small(self):\n\t\tRANDOM_POINTS = os.path.join(cn.TEST_DIR, 'blkgrp_classification.csv')\n\t\tRANDOM_POINT_KEYS = os.path.join(cn.TEST_DIR, 'blkgrp_classification_key.csv')\n\t\tkeys = pd.read_csv(RANDOM_POINT_KEYS)\n\t\ttest_data = geocoder.geocode_csv(RANDOM_POINTS)\n\t\tself.assertTrue(all(test_data[cn.BLOCK_GROUP] == keys['key']))\n\n\t\t\n\tdef test_geocode_csv_blkgroup_classification_LARGE(self):\n\t\tRANDOM_POINTS = os.path.join(cn.TEST_DIR, 'blkgrp_classification_LARGE.csv')\n\t\tRANDOM_POINT_KEYS = os.path.join(cn.TEST_DIR, 'blkgrp_classification_key_LARGE.csv')\n\t\tkeys = pd.read_csv(RANDOM_POINT_KEYS)\n\t\ttest_data = geocoder.geocode_csv(RANDOM_POINTS)\n\t\tself.assertTrue(all(test_data[cn.BLOCK_GROUP] == keys['key']))\n\n\n\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(UniGeoTest)\n_ = unittest.TextTestRunner().run(suite)" ]
[ [ "pandas.read_csv" ] ]
castacks/cvar-energy-risk-deep-model
[ "a0b2295e45139d19570a82daeb081f63d6f9b671" ]
[ "Risk.py" ]
[ "import numpy as np\n \nclass Risk:\n \"\"\"\n A class to find the risk.\n\n ...\n\n Attributes\n ----------\n powers : np.ndarray\n array of power on a path across several monte carlo runs\n limit : float\n value for confidence\n a : float\n coeff a of risk profile\n b : float\n coeff b of risk profile\n B : float\n battery capacity\n\n Methods\n -------\n cvar(new_limit=None):\n Returns the value of risky power value for a path using CVaR\n var(new_limit=None):\n Returns the value of risky power value for a path using VaR\n \"\"\"\n \n def __init__(self, powers, limit=95.,a = None ,b = 300 ,B = 1200):\n \"\"\"\n Constructs all the necessary attributes for the risk object.\n\n Parameters\n ----------\n powers : np.ndarray\n array of power on a path across several monte carlo runs\n limit : float\n value for confidence\n \"\"\"\n self.powers = powers\n self.limit = limit\n if a == None:\n self.a = np.log(2)*b\n else:\n self.a = a\n \n self.b = b\n self.B = B\n \n self.risk_profile()\n \n def risk_profile(self):\n '''\n Returns the risk value for a path using CVaR.\n\n Parameters:\n None\n\n Returns:\n risk (np.ndarray): Array of risk values\n ''' \n\n self.risk = np.exp(np.divide(self.a,np.maximum(self.B-self.powers,self.b))) - 1\n \n \n \n def cvar(self, new_limit=None):\n '''\n Returns the value of risky power value for a path using CVaR.\n\n Parameters:\n new_limit (float): percentage of confidence\n\n Returns:\n cvar (float): Value for risk power based on cvar\n '''\n \n if new_limit != None:\n self.limit = new_limit\n \n assert self.limit < 100\n\n var = np.percentile(self.risk, self.limit)\n cvar = self.risk[self.risk >= var].mean()\n \n return cvar\n \n def var(self, new_limit=None):\n '''\n Returns the value of risky power value for a path using VaR.\n\n Parameters:\n new_limit (float): percentage of confidence\n\n Returns:\n var (float): Value for risk power based on cvar\n '''\n \n if new_limit != None:\n self.limit = new_limit\n \n assert self.limit < 100\n\n var = np.percentile(self.risk, self.limit)\n \n return var" ]
[ [ "numpy.percentile", "numpy.log", "numpy.maximum" ] ]
joshuawall/amuse
[ "c2034074ee76c08057c4faa96c32044ab40952e9" ]
[ "src/amuse/datamodel/grids.py" ]
[ "from amuse.support.core import CompositeDictionary, late\nfrom amuse.units import constants\nfrom amuse.units import units\nfrom amuse.units import generic_unit_system\nfrom amuse.units import quantities\nfrom amuse.units.quantities import Quantity\nfrom amuse.units.quantities import VectorQuantity\nfrom amuse.units.quantities import new_quantity\nfrom amuse.units.quantities import zero\nfrom amuse.units.quantities import column_stack\nfrom amuse.support import exceptions\nfrom amuse.datamodel.base import *\nfrom amuse.datamodel.memory_storage import *\nimport numpy\n\nfrom amuse.datamodel import indexing\n\nclass AbstractGrid(AbstractSet):\n \n GLOBAL_DERIVED_ATTRIBUTES = {}\n \n def _get_value_of_attribute(self, particle, index, attribute):\n if attribute in self._derived_attributes:\n return self._derived_attributes[attribute].get_value_for_entity(self, particle, index)\n else:\n return self._convert_to_entities_or_quantities(self.get_values_in_store(index, [attribute])[0])\n \n def _set_value_of_attribute(self, key, attribute, value): \n if attribute in self._derived_attributes: \t \t \n return self._derived_attributes[attribute].set_value_for_entity(self, key, value) \t \t \n else:\n return self.set_values_in_store(key, [attribute], [value])\n \n def _get_values_for_entity(self, key, attributes):\n return self.get_values_in_store(key, attributes)\n \n def _set_values_for_entity(self, key, attributes, values):\n return self.set_values_in_store(key, attributes, values)\n \n def _get_particle(self, index):\n return GridPoint(index, self._original_set())\n \n \n def previous_state(self):\n return self._private.previous\n \n \n def savepoint(self, timestamp=None, **attributes):\n try:\n instance = type(self)()\n instance._private.attribute_storage = self._private.attribute_storage.copy()\n except:\n instance=self.copy() # for the case of subgrid, maybe always ok\n\n instance.collection_attributes.timestamp = timestamp\n \n for name, value in attributes.iteritems():\n setattr(instance.collection_attributes, name, value)\n \n instance._private.previous = self._private.previous\n self._private.previous = instance\n return instance\n \n \n def get_timestamp(self):\n return self.collection_attributes.timestamp\n \n def new_channel_to(self, other):\n return GridInformationChannel(self, other)\n def new_remapping_channel_to(self, other, remapper):\n return GridRemappingChannel(self, other, remapper)\n \n def copy(self, memento = None, keep_structure = False, filter_attributes = lambda particle_set, x : True):\n attributes = self.get_attribute_names_defined_in_store()\n attributes = [x for x in attributes if filter_attributes(self, x)]\n \n values = self.get_values_in_store(Ellipsis, attributes)\n result = self._factory_for_new_collection()(*self.shape)\n \n if memento is None:\n memento = {}\n memento[id(self._original_set())] = result\n \n converted = []\n for x in values:\n if isinstance(x, LinkedArray):\n converted.append(x.copy(memento, keep_structure))\n else:\n converted.append(x)\n result.set_values_in_store(Ellipsis, attributes, converted)\n \n object.__setattr__(result, \"_derived_attributes\", CompositeDictionary(self._derived_attributes))\n result._private.collection_attributes = self._private.collection_attributes._copy_for_collection(result)\n return result\n \n \n def _factory_for_new_collection(self):\n return self.__class__\n \n def empty_copy(self):\n result = self._factory_for_new_collection()(*self.shape)\n result.set_values_in_store(None, [],[])\n object.__setattr__(result, \"_derived_attributes\", CompositeDictionary(self._derived_attributes))\n result._private.collection_attributes = self._private.collection_attributes._copy_for_collection(result)\n return result\n \n def samplePoint(self, position=None, method=\"nearest\", **kwargs):\n if method in [\"nearest\"]:\n return SamplePointOnCellCenter(self, position=position, **kwargs)\n elif method in [\"interpolation\", \"linear\"]:\n return SamplePointWithInterpolation(self, position=position, **kwargs)\n else:\n raise Exception(\"unknown sample method\")\n \n def samplePoints(self, positions=None, method=\"nearest\", **kwargs):\n if method in [\"nearest\"]:\n return SamplePointsOnGrid(self, positions, SamplePointOnCellCenter, **kwargs)\n elif method in [\"interpolation\", \"linear\"]:\n return SamplePointsOnGrid(self, positions, SamplePointWithInterpolation, **kwargs)\n else:\n raise Exception(\"unknown sample method\")\n\n \n def __len__(self):\n return self.shape[0]\n \n def __iter__(self):\n for i in range(self.shape[0]):\n yield self[i]\n \n def get_all_indices_in_store(self):\n return self.get_all_keys_in_store()\n \n def can_extend_attributes(self):\n return self._original_set().can_extend_attributes()\n \n def __str__(self):\n dimensionstr = ' x '.join(([str(x) for x in self.shape]))\n attributes=self.get_attribute_names_defined_in_store()\n settable=self.get_defined_settable_attribute_names()\n strings=[a if a in settable else a+\" (ro)\" for a in attributes]\n attrstr= ', '.join(strings)\n return \"{0}({1}) ( {2} )\".format(\n self.__class__.__name__, \n dimensionstr,\n attrstr\n )\n\n def iter_history(self):\n raise Exception(\"not implemented\")\n \n @property\n def history(self):\n return reversed(list(self.iter_history()))\n\n def get_timeline_of_attribute(self, attribute):\n timeline = []\n for x in self.history:\n timeline.append((x.collection_attributes.timestamp, getattr(x,attribute)))\n return timeline\n\n def get_timeline_of_attribute_as_vector(self, attribute):\n timestamps = AdaptingVectorQuantity()\n timeline = AdaptingVectorQuantity()\n for x in self.history:\n timestamps.append(x.collection_attributes.timestamp)\n timeline.append(getattr(x,attribute))\n return timestamps,timeline\n \nclass BaseGrid(AbstractGrid):\n def __init__(self, *args, **kwargs):\n AbstractGrid.__init__(self)\n \n if \"storage\" in kwargs:\n self._private.attribute_storage = kwargs['storage']\n else:\n self._private.attribute_storage = InMemoryGridAttributeStorage(*args)\n \n self._private.previous = None\n self.collection_attributes.timestamp = None\n \n def can_extend_attributes(self):\n return self._private.attribute_storage.can_extend_attributes()\n \n def get_values_in_store(self, indices, attributes, by_key = True):\n result = self._private.attribute_storage.get_values_in_store(indices, attributes)\n return result\n \n def set_values_in_store(self, indices, attributes, values, by_key = True):\n self._private.attribute_storage.set_values_in_store(indices, attributes, values)\n \n def set_values_in_store_async(self, indices, attributes, values, by_key = True):\n return self._private.attribute_storage.set_values_in_store_async(indices, attributes, values)\n\n def get_attribute_names_defined_in_store(self):\n return self._private.attribute_storage.get_defined_attribute_names()\n \n def get_defined_settable_attribute_names(self):\n return self._private.attribute_storage.get_defined_settable_attribute_names()\n\n\n def _original_set(self):\n return self\n \n def get_all_keys_in_store(self):\n return self._private.attribute_storage.get_all_keys_in_store()\n \n def __getitem__(self, index):\n return new_subgrid_from_index(self, index)\n \n def iter_cells(self):\n shape = numpy.asarray(self.shape)\n \n index = 0 * shape\n \n while index[0] < shape[0]:\n yield self._get_gridpoint(tuple(index))\n \n index[-1] += 1\n for i in range(len(self.shape) - 1, 0, -1):\n if index[i] >= shape[i]:\n index[i] = 0\n index[i-1] += 1\n \n \n def _get_gridpoint(self, index):\n return GridPoint(index, self)\n \n def number_of_dimensions(self):\n return len(self.shape)\n \n @property\n def shape(self):\n return self._private.attribute_storage.storage_shape()\n \n @property\n def size(self):\n return numpy.prod(self.shape)\n \n \n def indices(self):\n return numpy.indices(self.shape)\n \n def iter_history(self):\n current = self._private.previous\n while not current is None:\n yield current\n current = current._private.previous\n \n @classmethod\n def create(cls,*args,**kwargs):\n print (\"Grid.create deprecated, use new_regular_grid instead\")\n return new_regular_grid(*args,**kwargs)\n\n def get_axes_names(self):\n if \"position\" in self.GLOBAL_DERIVED_ATTRIBUTES:\n result=self.GLOBAL_DERIVED_ATTRIBUTES[\"position\"].attribute_names\n elif \"position\" in self._derived_attributes:\n result=self._derived_attributes[\"position\"].attribute_names\n else:\n try:\n result=self._axes_names\n except:\n raise Exception(\"do not know how to find axes_names\")\n return list(result)\n\nclass UnstructuredGrid(BaseGrid):\n GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(BaseGrid.GLOBAL_DERIVED_ATTRIBUTES)\nclass StructuredBaseGrid(BaseGrid):\n GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(BaseGrid.GLOBAL_DERIVED_ATTRIBUTES)\nclass StructuredGrid(StructuredBaseGrid):\n GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(StructuredBaseGrid.GLOBAL_DERIVED_ATTRIBUTES)\nclass RectilinearBaseGrid(StructuredBaseGrid):\n GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(StructuredBaseGrid.GLOBAL_DERIVED_ATTRIBUTES)\nclass RectilinearGrid(RectilinearBaseGrid):\n GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(RectilinearBaseGrid.GLOBAL_DERIVED_ATTRIBUTES)\nclass RegularBaseGrid(RectilinearBaseGrid):\n GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(RectilinearBaseGrid.GLOBAL_DERIVED_ATTRIBUTES)\nclass RegularGrid(RegularBaseGrid):\n GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(RegularBaseGrid.GLOBAL_DERIVED_ATTRIBUTES)\nclass CartesianBaseGrid(RegularBaseGrid):\n GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(RegularBaseGrid.GLOBAL_DERIVED_ATTRIBUTES)\nclass CartesianGrid(CartesianBaseGrid):\n GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(CartesianBaseGrid.GLOBAL_DERIVED_ATTRIBUTES)\n\n# maintains compatibility with previous def.\nclass Grid(RegularGrid):\n GLOBAL_DERIVED_ATTRIBUTES=CompositeDictionary(RegularBaseGrid.GLOBAL_DERIVED_ATTRIBUTES)\n \ndef new_cartesian_grid(shape, cellsize, axes_names = \"xyz\",offset=None):\n \"\"\"Returns a cartesian grid with cells of size cellsize.\n \"\"\"\n if len(axes_names)<len(shape):\n raise Exception(\"provide enough axes names\")\n\n result = CartesianGrid(*shape)\n \n all_indices = numpy.indices(shape)+0.5\n \n if offset is None:\n offset=[0.*cellsize]*len(shape)\n \n def positions(indices):\n return cellsize * indices\n \n for indices, n, axis_name, of in zip(all_indices, shape, axes_names,offset):\n setattr(result, axis_name, positions(indices)+of)\n \n result.add_vector_attribute(\"position\", axes_names[0:len(shape)])\n\n object.__setattr__(result,\"_grid_type\",\"cartesian\") # for now for convenience, eventually to be converted in seperate classes\n object.__setattr__(result,\"_cellsize\",cellsize)\n \n return result\n\ndef new_regular_grid(shape, lengths, axes_names = \"xyz\",offset=None):\n \"\"\"Returns a regular grid with cells between 0 and lengths.\n \"\"\"\n if len(axes_names)<len(shape):\n raise Exception(\"provide enough axes names\")\n if len(lengths)!=len(shape):\n raise Exception(\"shape and lengths do not conform\")\n\n result = RegularGrid(*shape)\n \n all_indices = numpy.indices(shape)+0.5\n \n if offset is None:\n offset=[0.*l for l in lengths]\n \n def positions(indices, length, n):\n return length * (indices/n)\n \n for indices, length, n, axis_name, of in zip(all_indices, lengths, shape, axes_names,offset):\n setattr(result, axis_name, positions(indices, length, n)+of)\n \n result.add_vector_attribute(\"position\", axes_names[0:len(shape)])\n\n object.__setattr__(result,\"_grid_type\",\"regular\")\n object.__setattr__(result,\"_lengths\",lengths)\n \n return result\n\ndef new_rectilinear_grid(shape, axes_cell_boundaries=None, cell_centers=None, axes_names = \"xyz\",offset=None):\n \"\"\"Returns a rectilinear grid with cells at positions midway given cell boundaries.\n \"\"\"\n if len(axes_names)<len(shape):\n raise Exception(\"provide enough axes names\")\n if not (axes_cell_boundaries or cell_centers):\n raise Exception(\"provide cell boundaries or cell_centers\")\n if axes_cell_boundaries and len(axes_cell_boundaries)!=len(shape):\n raise Exception(\"length of shape and axes positions do not conform\")\n if axes_cell_boundaries:\n for s,b in zip(shape,axes_cell_boundaries):\n if len(b)!=s+1:\n raise Exception(\"number of cell boundary arrays error (must be {0} instead of {1})\".format(s+1,len(b)))\n if cell_centers and len(cell_centers)!=len(shape):\n raise Exception(\"length of shape and axes positions do not conform\")\n if cell_centers:\n for s,b in zip(shape,cell_centers):\n if len(b)!=s:\n raise Exception(\"number of cell_center arrays error (must be {0} instead of {1})\".format(s+1,len(b)))\n\n result = RectilinearGrid(*shape)\n\n all_indices = numpy.indices(shape)\n \n #~ axes_cell_boundaries=[numpy.sort(b) for b in axes_cell_boundaries]\n \n if axes_cell_boundaries:\n positions=[(b[1:]+b[:-1])/2 for b in axes_cell_boundaries]\n if cell_centers:\n positions=cell_centers\n \n if offset is None:\n offset=[0.*l[0] for l in positions]\n \n for indices, axis_pos, axis_name, of in zip(all_indices, positions, axes_names, offset):\n setattr(result, axis_name, axis_pos[indices]+of)\n \n result.add_vector_attribute(\"position\", axes_names[0:len(shape)])\n \n object.__setattr__(result,\"_grid_type\",\"rectilinear\")\n object.__setattr__(result,\"_axes_cell_boundaries\",axes_cell_boundaries)\n object.__setattr__(result,\"_cell_centers\",cell_centers)\n \n return result\n\ndef new_structured_grid(shape, cell_corners, cell_positions=None, axes_names = \"xyz\", offset=None):\n \"\"\"Returns a structured grid with cells with given corners and cell_positions.\n if not present, cell positions default to average of corner positions.\n \"\"\"\n if len(axes_names)<len(shape):\n raise Exception(\"provide enough axes names\")\n if len(cell_corners)!=len(shape):\n raise Exception(\"dimensions of shape and cell_boundaries do not conform\")\n for c in cell_corners:\n if not numpy.all([s1==s2+1 for s1,s2 in zip(c.shape,shape)]):\n shape1=[s+1 for s in shape]\n raise Exception(\"size of cell_corner arrays must be {0} instead of {1}\".format(shape1.__str__(),c.shape.__str__())) \n\n if cell_positions is None:\n cell_positions=[]\n for cc in cell_corners:\n cp=numpy.zeros(shape) * cc.flat[0]\n for i in range(2**len(shape)):\n slicing=[]\n for j in range(len(shape)):\n if i & 2**j:\n slicing.append(slice(1,None)) \n else:\n slicing.append(slice(None,-1))\n cp=cp+cc[slicing] \n cell_positions.append(cp/2**len(shape)) \n\n if len(cell_positions)!=len(shape):\n raise Exception(\"dimensions of shape and cell_positions do not conform\")\n for c in cell_positions:\n if not numpy.all([s1==s2 for s1,s2 in zip(c.shape,shape)]):\n raise Exception(\"size of cell_position arrays must be {0} instead of {1}\".format(shape1.__str__(),c.shape.__str__())) \n\n if offset is None:\n offset=[0.*l.flat[0] for l in cell_positions]\n\n result = StructuredGrid(*shape)\n\n for axis_name, pos, of in zip(axes_names, cell_positions, offset):\n setattr(result, axis_name, pos + of)\n\n result.add_vector_attribute(\"position\", axes_names[0:len(shape)])\n \n object.__setattr__(result,\"_grid_type\",\"structured\")\n object.__setattr__(result,\"_cell_corners\", cell_corners)\n \n return result\n\ndef new_unstructured_grid(size, num_corners, cell_corners, cell_positions=None, axes_names=\"xyz\", offset=None):\n \"\"\"Returns an unstructured grid with cells with given corners and cell_positions.\n if not present, cell positions default to average of corner positions.\n \"\"\"\n dimensions = cell_corners.size / (num_corners * size)\n if len(axes_names)<dimensions:\n raise Exception(\"provide enough axes names\")\n if len(cell_corners.shape) != 2:\n raise Exception(\"incorrect shape for cell_corners, the number of dimensions of the array should be exactly three (dimensions, size, corners)\")\n if cell_corners.shape[0] != dimensions:\n raise Exception(\"incorrect shape for cell_corners, first dimension should equal the number of dimensions of the space in which the grid is defined\")\n if cell_corners.shape[1] != size * num_corners:\n raise Exception(\"incorrect shape for cell_corners, second dimension should equal the grid size times the number of corners per cell\")\n\n if cell_positions is None:\n cell_positions=[]\n for cc in cell_corners:\n c = cc.reshape(size, num_corners)\n cp=numpy.zeros(size)\n for i in range(size):\n cp[i] = c[i].sum() / num_corners\n\n cell_positions.append(cp)\n\n cell_positions = numpy.array(cell_positions)\n\n if len(cell_positions.shape) != 2:\n raise Exception(\"incorrect shape for cell_positions, the number of dimensions of the array should be exactly two (dimensions, size)\")\n if cell_positions.shape[0] != dimensions:\n raise Exception(\"dimensions of cell_positions and cell_corners do not conform\")\n if cell_positions.shape[1] != size:\n raise Exception(\"size of cell_positions and size do not conform\")\n\n if offset is None:\n offset=[0.*l.flat[0] for l in cell_positions]\n\n result = UnstructuredGrid(size)\n\n for axis_name, pos, of in zip(axes_names, cell_positions, offset):\n setattr(result, axis_name, pos + of)\n\n result.add_vector_attribute(\"position\", axes_names[0:dimensions])\n \n object.__setattr__(result,\"_grid_type\",\"unstructured\")\n object.__setattr__(result,\"_num_corners\", num_corners)\n object.__setattr__(result,\"_cell_corners\", cell_corners)\n \n return result\n\nclass SubGrid(AbstractGrid):\n def __init__(self, grid, indices):\n AbstractGrid.__init__(self, grid)\n \n self._private.previous=None\n self._private.grid = grid\n self._private.indices = indexing.normalize_slices(grid.shape,indices)\n self._private.collection_attributes=grid.collection_attributes\n \n def _original_set(self):\n return self._private.grid\n \n def previous_state(self):\n previous=self._private.previous\n if previous:\n return previous\n previous=self._private.grid.previous_state()\n if previous:\n return previous[self._private.indices]\n return previous\n \n def get_values_in_store(self, indices, attributes, by_key = True):\n normalized_indices = indexing.normalize_slices(self.shape,indices)\n combined_index = indexing.combine_indices(self._private.indices, normalized_indices)\n result = self._private.grid.get_values_in_store(combined_index, attributes)\n return result\n \n def set_values_in_store(self, indices, attributes, values, by_key = True):\n normalized_indices = indexing.normalize_slices(self.shape,indices)\n combined_index = indexing.combine_indices(self._private.indices, normalized_indices)\n self._private.grid.set_values_in_store(combined_index, attributes, values)\n \n def set_values_in_store_async(self, indices, attributes, values, by_key = True):\n normalized_indices = indexing.normalize_slices(self.shape,indices)\n combined_index = indexing.combine_indices(self._private.indices, normalized_indices)\n return self._private.grid.set_values_in_store_async(combined_index, attributes, values)\n \n def get_all_keys_in_store(self):\n return Ellipsis\n\n def number_of_dimensions(self):\n return indexing.number_of_dimensions_after_index(self._original_set().number_of_dimensions(), self._private.indices)\n \n def __getitem__(self, index):\n normalized_index= indexing.normalize_slices(self.shape,index)\n combined_index = indexing.combine_indices(self._private.indices, normalized_index)\n return new_subgrid_from_index(self._original_set(), combined_index)\n \n def get_attribute_names_defined_in_store(self):\n return self._private.grid.get_attribute_names_defined_in_store()\n \n def get_defined_settable_attribute_names(self):\n return self._private.grid.get_defined_settable_attribute_names()\n \n @property\n def shape(self):\n return indexing.shape_after_index(self._private.grid.shape, self._private.indices )\n\n def indices(self):\n return [x[self._private.indices] for x in self._original_set().indices()]\n \n def __eq__(self, other):\n if self._private.grid!=other._private.grid:\n return False\n else:\n if numpy.all(numpy.array(self.indices())==numpy.array(other.indices())):\n return True\n else:\n return False\n \n def __ne__(self,other):\n return not(self==other)\n\n def _factory_for_new_collection(self):\n return Grid\n\n def iter_history(self):\n if self._private.previous:\n current = self._private.previous\n while not current is None:\n yield current\n current = current._private.previous\n return\n\n current = self._original_set().previous_state()\n while not current is None:\n yield current[self._private.indices]\n current = current.previous_state()\n \nclass GridPoint(object):\n\n def __init__(self, index, grid):\n object.__setattr__(self,\"index\",index)\n object.__setattr__(self,\"grid\",grid) \n \n def __setattr__(self, name_of_the_attribute, new_value_for_the_attribute):\n try:\n self.grid._set_value_of_attribute(self.index, name_of_the_attribute, new_value_for_the_attribute)\n except Exception as ex:\n raise\n raise AttributeError(\"Could not assign to attribute {0}.\".format(name_of_the_attribute))\n \n def __getattr__(self, name_of_the_attribute):\n return self.grid._get_value_of_attribute(self, self.index, name_of_the_attribute)\n \n def __eq__(self, other):\n return isinstance(other, type(self)) and other.index == self.index and other.grid == self.grid\n\n def __ne__(self, other):\n return not(isinstance(other, type(self)) and other.index == self.index and other.grid == self.grid)\n \n def get_containing_set(self):\n return self.grid\n\n def iter_history(self):\n current = self.get_containing_set().previous_state()\n while not current is None:\n yield current[self.index]\n current = current.previous_state()\n\n @property\n def history(self):\n return reversed(list(self.iter_history()))\n\n def get_timeline_of_attribute(self, attribute):\n timeline = []\n for x in self.history:\n timeline.append((x.grid.collection_attributes.timestamp, getattr(x,attribute)))\n return timeline\n\n def get_timeline_of_attribute_as_vector(self, attribute):\n timestamps = AdaptingVectorQuantity()\n timeline = AdaptingVectorQuantity()\n for x in self.history:\n timestamps.append(x.grid.collection_attributes.timestamp)\n timeline.append(getattr(x,attribute))\n return timestamps,timeline\n\n\n \ndef new_subgrid_from_index(grid, index):\n if indexing.number_of_dimensions_after_index(grid.number_of_dimensions(), index) == 0:\n return GridPoint(index, grid)\n else:\n return SubGrid(grid, index)\n\nclass GridRemappingChannel(object):\n \"\"\"\n A channel to remap attributes from one grid to another.\n \"\"\"\n \n def __init__(self, source, target, remapper):\n self.source = source\n self.target = target\n if callable(remapper):\n self.remapper = remapper( source, target)\n else:\n self.remapper = remapper\n\n def get_overlapping_attributes(self):\n from_names = self.source.get_attribute_names_defined_in_store()\n to_names = self.target.get_defined_settable_attribute_names()\n names_to_copy = set(from_names).intersection(set(to_names))\n return list(names_to_copy)\n\n def copy_attributes(self, attributes):\n self.remapper.forward_mapping(attributes)\n \n def copy(self):\n if not self.target.can_extend_attributes():\n self.copy_overlapping_attributes()\n else:\n self.copy_all_attributes()\n \n def copy_all_attributes(self):\n names_to_copy = self.source.get_attribute_names_defined_in_store()\n self.copy_attributes(list(names_to_copy)) \n\n def copy_overlapping_attributes(self):\n names_to_copy = self.get_overlapping_attributes()\n self.copy_attributes(names_to_copy) \n\n\nclass GridInformationChannel(object):\n \"\"\"\n A channel to copy attributes from one grid to another.\n For each dimension copies cells from 0 - min(grid0.size, grid1.size).\n \"\"\"\n \n def __init__(self, source, target):\n self.source = source\n self.target = target\n self._reindex()\n \n def _reindex(self):\n source_shape = self.source.shape\n target_shape = self.target.shape\n if len(source_shape) != len(target_shape):\n raise exceptions.AmuseException(\"The source and target grids do not have the same dimensions, cannot use this channel\")\n index = [numpy.s_[0:min(x,y)] for x,y in zip(source_shape, target_shape)]\n index = tuple(index)\n \n self.index = index\n \n def get_values(self, attributes):\n values = self.source.get_values_in_store(self.index, attributes)\n converted = []\n for x in values:\n if isinstance(x, LinkedArray):\n converted.append(x.copy_with_link_transfer(self.source, self.target))\n else:\n converted.append(x)\n return converted\n\n def get_overlapping_attributes(self):\n from_names = self.source.get_attribute_names_defined_in_store()\n to_names = self.target.get_defined_settable_attribute_names()\n names_to_copy = set(from_names).intersection(set(to_names))\n return list(names_to_copy)\n \n def copy_attributes(self, attributes):\n converted=self.get_values(attributes) \n self.target.set_values_in_store(self.index, attributes, converted)\n \n def copy(self):\n if not self.target.can_extend_attributes():\n self.copy_overlapping_attributes()\n else:\n self.copy_all_attributes()\n \n def copy_all_attributes(self):\n names_to_copy = self.source.get_attribute_names_defined_in_store()\n self.copy_attributes(list(names_to_copy)) \n\n def copy_overlapping_attributes(self):\n names_to_copy = self.get_overlapping_attributes()\n self.copy_attributes(names_to_copy)\n\n def transform_values(self, attributes, f):\n values = self.source.get_values_in_store(self.index, attributes)\n return f(*values)\n \n def transform(self, target, function, source):\n \"\"\" Copy and transform values of one attribute from the source set to the target set.\n\n :argument target: name of the attributes in the target set\n :argument function: function used for transform, should return tuple\n :argument source: name of the attribute in the source set\n\n >>> from amuse.datamodel import Grid\n >>> grid1 = Grid(2)\n >>> grid2 = Grid(2)\n >>> grid1.attribute1 = 1\n >>> grid1.attribute2 = 2\n >>> channel = grid1.new_channel_to(grid2)\n >>> channel.transform([\"attribute3\",\"attribute4\"], lambda x,y: (y+x,y-x), [\"attribute1\",\"attribute2\"])\n >>> print grid2.attribute3\n [3 3]\n >>> print grid2.attribute4\n [1 1]\n\n \"\"\"\n if function is None:\n function=lambda *x : x\n \n if not self.target.can_extend_attributes():\n target_attributes = self.target.get_defined_settable_attribute_names()\n if not set(target).issubset(set(target_attributes)):\n raise Exception(\"trying to set unsettable attributes {0}\".format(\n list(set(target)-set(target_attributes))) )\n converted=self.transform_values(source, function)\n if len(converted) != len(target):\n raise Exception(\"function {0} returns {1} values while target attributes are {2} of length {3}\".format(\n function.__name__, len(converted), target, len(target)))\n self.target.set_values_in_store(self.index, target, converted) \n\nclass SamplePointOnCellCenter(object):\n def __init__(self, grid, point=None, **kwargs):\n self.grid = grid\n self.point = self.grid._get_array_of_positions_from_arguments(pos=point, **kwargs)\n \n @late\n def position(self):\n return self.cell.position\n \n @late\n def index(self):\n return self.grid.get_index(self.point)\n \n @late\n def isvalid(self):\n return numpy.logical_and(\n numpy.all(self.index >= self.grid.get_minimum_index()),\n numpy.all(self.index <= self.grid.get_maximum_index())\n )\n \n @late\n def cell(self):\n return self.grid[tuple(self.index)]\n \n def get_value_of_attribute(self, name_of_the_attribute):\n return getattr(self.cell, name_of_the_attribute)\n \n def __getattr__(self, name_of_the_attribute):\n return self.get_value_of_attribute(name_of_the_attribute)\n\nclass SamplePointWithInterpolation(object):\n \"\"\"\n Vxyz =\n V000 (1 - x) (1 - y) (1 - z) +\n V100 x (1 - y) (1 - z) + \n V010 (1 - x) y (1 - z) + \n V001 (1 - x) (1 - y) z +\n V101 x (1 - y) z + \n V011 (1 - x) y z + \n V110 x y (1 - z) + \n V111 x y z\n \"\"\"\n \n def __init__(self, grid, point=None, **kwargs):\n self.grid = grid\n self.point = self.grid._get_array_of_positions_from_arguments(pos=point, **kwargs)\n \n @late\n def position(self):\n return self.point\n \n @late\n def index(self):\n return self.grid.get_index(self.point)\n \n @late\n def index_for_000_cell(self):\n offset = self.point - self.grid[0,0,0].position\n indices = (offset / self.grid.cellsize())\n return numpy.floor(indices).astype(numpy.int)\n\n @late\n def index_for_111_cell(self):\n return self.index_for_000_cell + [1,1,1]\n \n @late\n def surrounding_cell_indices(self):\n cell000 = self.index_for_000_cell\n translations = [\n [0, 0, 0],\n [1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n [1, 0, 1],\n [0, 1, 1],\n [1, 1, 0],\n [1, 1, 1],\n ]\n return cell000 + translations\n \n @late\n def weighing_factors(self):\n x0,y0,z0 = self.grid[tuple(self.index_for_000_cell)].position\n x1,y1,z1 = self.grid[tuple(self.index_for_000_cell + [1,1,1])].position\n x,y,z = self.point\n \n \n dx1 = (x1 - x) / (x1 - x0)\n dy1 = (y1 - y) / (y1 - y0)\n dz1 = (z1 - z) / (z1 - z0)\n dx0 = (x - x0) / (x1 - x0)\n dy0 = (y - y0) / (y1 - y0)\n dz0 = (z - z0) / (z1 - z0)\n \n result = numpy.asarray([\n dx1 * dy1 * dz1,\n dx0 * dy1 * dz1,\n dx1 * dy0 * dz1,\n dx1 * dy1 * dz0,\n dx0 * dy1 * dz0,\n dx1 * dy0 * dz0,\n dx0 * dy0 * dz1,\n dx0 * dy0 * dz0\n ] )\n return result \n \n @late\n def surrounding_cells(self):\n return [self.grid[tuple(x)] for x in self.surrounding_cell_indices]\n \n @late\n def isvalid(self):\n return numpy.logical_and(\n numpy.all(self.index_for_000_cell >= self.grid.get_minimum_index()),\n numpy.all(self.index_for_111_cell <= self.grid.get_maximum_index())\n )\n \n def get_values_of_attribute(self, name_of_the_attribute):\n result = quantities.AdaptingVectorQuantity()\n for x in self.surrounding_cells:\n result.append(getattr(x, name_of_the_attribute))\n return result\n \n def __getattr__(self, name_of_the_attribute):\n values = self.get_values_of_attribute(name_of_the_attribute)\n return (values * self.weighing_factors).sum()\n \n \n\nclass SamplePointsOnGrid(object):\n \n def __init__(self, grid, points=None, samples_factory = SamplePointWithInterpolation, **kwargs):\n self.grid = grid\n points=self.grid._get_array_of_positions_from_arguments(pos=points,**kwargs)\n self.samples = [samples_factory(grid, x) for x in points]\n self.samples = [x for x in self.samples if x.isvalid ]\n \n @late\n def indices(self):\n for x in self.samples:\n yield x.index\n \n @late\n def positions(self):\n for x in self.samples:\n yield x.position\n \n def __getattr__(self, name_of_the_attribute):\n result = quantities.AdaptingVectorQuantity()\n for x in self.samples:\n result.append(getattr(x, name_of_the_attribute))\n return result\n \n def __iter__(self):\n for x in len(self):\n yield self[x]\n \n def __getitem__(self, index):\n return self.samples[index]\n \n def __len__(self):\n return len(self.samples)\n\nclass SamplePointsOnMultipleGrids(object):\n \n def __init__(self, grids, points, samples_factory = SamplePointWithInterpolation, index_factory = None):\n self.grids = grids\n self.points = points\n self.samples_factory = samples_factory\n if index_factory is None:\n self.index = None\n else:\n self.index = index_factory(self.grids)\n \n def _grid_for_point(self, point):\n if self.index is None:\n for grid in self.grids:\n if (numpy.all(point >= grid.get_minimum_position()) and\n numpy.all(point < grid.get_maximum_position())):\n return grid\n return None \n else:\n return self.index.grid_for_point(point)\n \n def filterout_duplicate_indices(self):\n previous_grid = None\n previous_index = None \n filteredout = [] \n for x in self.samples:\n if x.grid is previous_grid and numpy.all(x.index == previous_index):\n pass\n else:\n previous_grid= x.grid\n previous_index = x.index\n filteredout.append(x)\n self.samples = filteredout\n \n def get_samples(self):\n result = []\n for x in self.points:\n grid = self._grid_for_point(x)\n if grid is None:\n continue\n sample = self.samples_factory(grid, x)\n if not sample.isvalid:\n continue\n result.append(sample)\n return result\n \n @late\n def samples(self):\n result = []\n for x in self.points:\n grid = self._grid_for_point(x)\n if grid is None:\n continue\n sample = self.samples_factory(grid, x)\n if not sample.isvalid:\n continue\n result.append(sample)\n return result\n \n @late\n def indices(self):\n for x in self.samples:\n yield x.index\n \n @late\n def positions(self):\n for x in self.samples:\n yield x.position\n \n def __getattr__(self, name_of_the_attribute):\n self.get_samples()\n result = quantities.AdaptingVectorQuantity()\n for x in self.samples:\n result.append(getattr(x, name_of_the_attribute))\n return result\n \n def __iter__(self):\n for x in len(self):\n yield self[x]\n \n def __getitem__(self, index):\n return self.samples[index]\n \n def __len__(self):\n return len(self.samples)\n \n \n\nclass NonOverlappingGridsIndexer(object):\n \n def __init__(self, grids):\n self.grids = grids\n self.setup_index()\n \n @late\n def minimum_position(self):\n result = self.grids[0].get_minimum_position()\n for x in self.grids[1:]:\n minimum = x.get_minimum_position()\n result = result.minimum(minimum)\n return result\n \n def setup_index(self):\n smallest_boxsize = None\n for x in self.grids:\n boxsize = x.get_maximum_position() - x.get_minimum_position()\n if smallest_boxsize is None:\n smallest_boxsize = boxsize\n else:\n smallest_boxsize = boxsize.minimum(smallest_boxsize)\n \n self.smallest_boxsize = smallest_boxsize\n max_index = [0,0,0]\n \n for x in self.grids:\n index = (x.get_maximum_position() / smallest_boxsize)\n index = numpy.floor(index).astype(numpy.int)\n max_index = numpy.where(index > max_index, index, max_index)\n \n self.grids_on_index = numpy.zeros(max_index, 'int')\n \n for index,x in enumerate(self.grids):\n bottom_left = x.get_minimum_position()\n index_of_grid = (bottom_left / smallest_boxsize)\n size = ((x.get_maximum_position() - x.get_minimum_position()) / smallest_boxsize)\n i,j,k = numpy.floor(index_of_grid).astype(numpy.int)\n ni,nj,nk = numpy.floor(size).astype(numpy.int)\n self.grids_on_index[i:i+ni,j:j+nj,k:k+nk] = index\n \n \n def grid_for_point(self, position):\n index = ((position - self.minimum_position) / self.smallest_boxsize)\n index = numpy.floor(index).astype(numpy.int)\n index_of_grid = self.grids_on_index[tuple(index)]\n return self.grids[index_of_grid]\n \n def grids_for_points(self, points):\n index = ((points - self.minimum_position) / self.smallest_boxsize)\n index = numpy.floor(index).astype(numpy.int)\n index_of_grid = self.grids_on_index[tuple(index)]\n return self.grids[index_of_grid]\n\n\n# convenience function to convert input arguments to positions (or vector of \"points\")\ndef _get_array_of_positions_from_arguments(axes_names, **kwargs):\n if kwargs.get('pos',None):\n return kwargs['pos']\n if kwargs.get('position',None):\n return kwargs['position']\n \n coordinates=[kwargs[x] for x in axes_names]\n if numpy.ndim(coordinates[0])==0:\n return VectorQuantity.new_from_scalar_quantities(*coordinates)\n return column_stack(coordinates)\n" ]
[ [ "numpy.array", "numpy.asarray", "numpy.zeros", "numpy.where", "numpy.prod", "numpy.ndim", "numpy.all", "numpy.indices", "numpy.floor" ] ]
Michaelzhouisnotwhite/motion-detection-dashboard-restfulapi
[ "772064e9d2a53855488ad66b34abc2fd9edb1f7c" ]
[ "video_streamer/utils/augmentations.py" ]
[ "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nImage augmentation functions\n\"\"\"\n\nimport math\nimport random\n\nimport cv2\nimport numpy as np\n\nfrom .general import LOGGER, check_version, colorstr, resample_segments, segment2box\nfrom .metrics import bbox_ioa\n\n\nclass Albumentations:\n # YOLOv5 Albumentations class (optional, only used if package is installed)\n def __init__(self):\n self.transform = None\n try:\n import albumentations as A\n check_version(A.__version__, '1.0.3', hard=True) # version requirement\n\n self.transform = A.Compose([\n A.Blur(p=0.01),\n A.MedianBlur(p=0.01),\n A.ToGray(p=0.01),\n A.CLAHE(p=0.01),\n A.RandomBrightnessContrast(p=0.0),\n A.RandomGamma(p=0.0),\n A.ImageCompression(quality_lower=75, p=0.0)],\n bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))\n\n LOGGER.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p))\n except ImportError: # package not installed, skip\n pass\n except Exception as e:\n LOGGER.info(colorstr('albumentations: ') + f'{e}')\n\n def __call__(self, im, labels, p=1.0):\n if self.transform and random.random() < p:\n new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed\n im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])\n return im, labels\n\n\ndef augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):\n # HSV color-space augmentation\n if hgain or sgain or vgain:\n r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains\n hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))\n dtype = im.dtype # uint8\n\n x = np.arange(0, 256, dtype=r.dtype)\n lut_hue = ((x * r[0]) % 180).astype(dtype)\n lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)\n lut_val = np.clip(x * r[2], 0, 255).astype(dtype)\n\n im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))\n cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed\n\n\ndef hist_equalize(im, clahe=True, bgr=False):\n # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255\n yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)\n if clahe:\n c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n yuv[:, :, 0] = c.apply(yuv[:, :, 0])\n else:\n yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram\n return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB\n\n\ndef replicate(im, labels):\n # Replicate labels\n h, w = im.shape[:2]\n boxes = labels[:, 1:].astype(int)\n x1, y1, x2, y2 = boxes.T\n s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)\n for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices\n x1b, y1b, x2b, y2b = boxes[i]\n bh, bw = y2b - y1b, x2b - x1b\n yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y\n x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]\n im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax]\n labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)\n\n return im, labels\n\n\ndef letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):\n # Resize and pad image while meeting stride-multiple constraints\n shape = im.shape[:2] # current shape [height, width]\n if isinstance(new_shape, int):\n new_shape = (new_shape, new_shape)\n\n # Scale ratio (new / old)\n r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n if not scaleup: # only scale down, do not scale up (for better val mAP)\n r = min(r, 1.0)\n\n # Compute padding\n ratio = r, r # width, height ratios\n new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding\n if auto: # minimum rectangle\n dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding\n elif scaleFill: # stretch\n dw, dh = 0.0, 0.0\n new_unpad = (new_shape[1], new_shape[0])\n ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios\n\n dw /= 2 # divide padding into 2 sides\n dh /= 2\n\n if shape[::-1] != new_unpad: # resize\n im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)\n top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border\n return im, ratio, (dw, dh)\n\n\ndef random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,\n border=(0, 0)):\n # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10))\n # targets = [cls, xyxy]\n\n height = im.shape[0] + border[0] * 2 # shape(h,w,c)\n width = im.shape[1] + border[1] * 2\n\n # Center\n C = np.eye(3)\n C[0, 2] = -im.shape[1] / 2 # x translation (pixels)\n C[1, 2] = -im.shape[0] / 2 # y translation (pixels)\n\n # Perspective\n P = np.eye(3)\n P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)\n P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)\n\n # Rotation and Scale\n R = np.eye(3)\n a = random.uniform(-degrees, degrees)\n # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations\n s = random.uniform(1 - scale, 1 + scale)\n # s = 2 ** random.uniform(-scale, scale)\n R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)\n\n # Shear\n S = np.eye(3)\n S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)\n S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)\n\n # Translation\n T = np.eye(3)\n T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)\n T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)\n\n # Combined rotation matrix\n M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT\n if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed\n if perspective:\n im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))\n else: # affine\n im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))\n\n # Visualize\n # import matplotlib.pyplot as plt\n # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()\n # ax[0].imshow(im[:, :, ::-1]) # base\n # ax[1].imshow(im2[:, :, ::-1]) # warped\n\n # Transform label coordinates\n n = len(targets)\n if n:\n use_segments = any(x.any() for x in segments)\n new = np.zeros((n, 4))\n if use_segments: # warp segments\n segments = resample_segments(segments) # upsample\n for i, segment in enumerate(segments):\n xy = np.ones((len(segment), 3))\n xy[:, :2] = segment\n xy = xy @ M.T # transform\n xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine\n\n # clip\n new[i] = segment2box(xy, width, height)\n\n else: # warp boxes\n xy = np.ones((n * 4, 3))\n xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1\n xy = xy @ M.T # transform\n xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine\n\n # create new boxes\n x = xy[:, [0, 2, 4, 6]]\n y = xy[:, [1, 3, 5, 7]]\n new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T\n\n # clip\n new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)\n new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)\n\n # filter candidates\n i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)\n targets = targets[i]\n targets[:, 1:5] = new[i]\n\n return im, targets\n\n\ndef copy_paste(im, labels, segments, p=0.5):\n # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)\n n = len(segments)\n if p and n:\n h, w, c = im.shape # height, width, channels\n im_new = np.zeros(im.shape, np.uint8)\n for j in random.sample(range(n), k=round(p * n)):\n l, s = labels[j], segments[j]\n box = w - l[3], l[2], w - l[1], l[4]\n ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area\n if (ioa < 0.30).all(): # allow 30% obscuration of existing labels\n labels = np.concatenate((labels, [[l[0], *box]]), 0)\n segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))\n cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)\n\n result = cv2.bitwise_and(src1=im, src2=im_new)\n result = cv2.flip(result, 1) # augment segments (flip left-right)\n i = result > 0 # pixels to replace\n # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch\n im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug\n\n return im, labels, segments\n\n\ndef cutout(im, labels, p=0.5):\n # Applies image cutout augmentation https://arxiv.org/abs/1708.04552\n if random.random() < p:\n h, w = im.shape[:2]\n scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction\n for s in scales:\n mask_h = random.randint(1, int(h * s)) # create random masks\n mask_w = random.randint(1, int(w * s))\n\n # box\n xmin = max(0, random.randint(0, w) - mask_w // 2)\n ymin = max(0, random.randint(0, h) - mask_h // 2)\n xmax = min(w, xmin + mask_w)\n ymax = min(h, ymin + mask_h)\n\n # apply random color mask\n im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]\n\n # return unobscured labels\n if len(labels) and s > 0.03:\n box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)\n ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area\n labels = labels[ioa < 0.60] # remove >60% obscured labels\n\n return labels\n\n\ndef mixup(im, labels, im2, labels2):\n # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf\n r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0\n im = (im * r + im2 * (1 - r)).astype(np.uint8)\n labels = np.concatenate((labels, labels2), 0)\n return im, labels\n\n\ndef box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)\n # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio\n w1, h1 = box1[2] - box1[0], box1[3] - box1[1]\n w2, h2 = box2[2] - box2[0], box2[3] - box2[1]\n ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio\n return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates\n" ]
[ [ "numpy.concatenate", "numpy.clip", "numpy.array", "numpy.zeros", "numpy.ones", "numpy.eye", "numpy.random.beta", "numpy.random.uniform", "numpy.arange", "numpy.append", "numpy.mod", "numpy.maximum" ] ]
paulfjacobs/py-mep
[ "da418a10f0c13537560c91b05b699c433076f314" ]
[ "tests/mep/genetics/test_gene.py" ]
[ "import unittest\nfrom mep.genetics.gene import VariableGene, OperatorGene\nimport numpy as np\n\n\nclass TestGene(unittest.TestCase):\n \"\"\"\n Tests for the genes.\n \"\"\"\n\n def test_basic_constant(self):\n \"\"\"\n Simple check of a constant gene with just 1 gene in the chromosome.\n \"\"\"\n # construct\n constant_index = 0\n gene = VariableGene(constant_index, is_feature=False)\n\n # simple eval matrix; 1 gene in a chromosome, 3 examples, 2 constants\n num_examples = 2\n num_genes = 1\n num_features = 3\n\n # create\n constants = [1., 2.]\n eval_matrix = np.zeros((num_genes, num_examples))\n data_matrix = np.zeros((num_examples, num_features))\n targets = [0] * num_examples\n\n # expected; only one gene and it is going to be using the first constant;\n gene_index = 0\n expected_eval_matrix = np.array([[constants[constant_index], constants[constant_index]]])\n\n # run the evaluate\n error = gene.evaluate(gene_index, eval_matrix, data_matrix, constants, targets)\n self.assertTrue(np.array_equal(expected_eval_matrix, eval_matrix))\n self.assertEqual((1. - 0) + (1. - 0), error)\n\n def test_basic_feature_gene(self):\n \"\"\"\n Simple check of a feature/input gene with just 1 gene in the chromosome.\n \"\"\"\n # construct\n feature_index = 1\n gene = VariableGene(feature_index, is_feature=True)\n\n # simple eval matrix; 1 gene in a chromosome, 3 examples, 2 constants\n num_examples = 2\n num_genes = 1\n num_features = 3\n\n # create\n constants = [1., 2.]\n eval_matrix = np.zeros((num_genes, num_examples))\n data_matrix = np.zeros((num_examples, num_features))\n targets = [0] * num_examples\n\n # set the data matrix for the feature that we care about\n data_matrix[0, feature_index] = 5.\n data_matrix[1, feature_index] = 7.\n\n # expected; only one gene and it is going to be using the first constant;\n gene_index = 0\n expected_eval_matrix = np.array([[data_matrix[0, feature_index], data_matrix[1, feature_index]]])\n\n # run the evaluate\n error = gene.evaluate(gene_index, eval_matrix, data_matrix, constants, targets)\n self.assertTrue(np.array_equal(expected_eval_matrix, eval_matrix))\n self.assertEqual((5. - 0.) + (7. - 0.), error)\n\n def test_constant_and_feature_gene(self):\n \"\"\"\n Intermix constant and feature genes.\n \"\"\"\n # construct\n feature_index = 1\n constant_index = 0\n constant_gene = VariableGene(constant_index, is_feature=False)\n feature_gene = VariableGene(feature_index, is_feature=True)\n\n # simple eval matrix; 1 gene in a chromosome, 3 examples, 2 constants\n num_examples = 2\n num_genes = 2\n num_features = 3\n\n # create\n constants = [1., 2.]\n eval_matrix = np.zeros((num_genes, num_examples))\n data_matrix = np.zeros((num_examples, num_features))\n targets = [0] * num_examples\n\n # set the data matrix for the feature that we care about\n data_matrix[0, feature_index] = 5.\n data_matrix[1, feature_index] = 7.\n\n # expected;\n expected_eval_matrix = np.array([[data_matrix[0, feature_index], data_matrix[1, feature_index]],\n [constants[constant_index], constants[constant_index]]])\n\n # run the evaluate\n feature_error = feature_gene.evaluate(0, eval_matrix, data_matrix, constants, targets)\n constant_error = constant_gene.evaluate(1, eval_matrix, data_matrix, constants, targets)\n self.assertTrue(np.array_equal(expected_eval_matrix, eval_matrix))\n\n def test_operator_gene_basic(self):\n \"\"\"\n This is a test of the operator gene. We need at least two genes as the operator needs to be able to reference\n another gene evaluation.\n \"\"\"\n # construct; using the same address on both sides of the operator; in other words we will be adding the previous\n # gene (at 0) to itself\n address_index = 0\n gene = OperatorGene(lambda a, b: a + b, address1=address_index, address2=address_index)\n\n # simple eval matrix; 2 gene in a chromosome, 3 examples, 0 constants\n num_examples = 1\n num_genes = 2\n num_features = 3\n targets = [0] * num_examples\n\n # create\n constants = []\n eval_matrix = np.zeros((num_genes, num_examples))\n data_matrix = np.zeros((num_examples, num_features))\n\n # simulate the use of a constant in the other, first gene,\n eval_matrix[0, 0] = 2\n\n # expected; first gene is unchanged; the 2nd one is the sum of the first with itself (i.e. 4)\n expected_eval_matrix = np.array([[2], [4]])\n\n # run the evaluate\n error = gene.evaluate(1, eval_matrix, data_matrix, constants, targets)\n self.assertTrue(np.array_equal(expected_eval_matrix, eval_matrix))\n" ]
[ [ "numpy.array", "numpy.array_equal", "numpy.zeros" ] ]
mcusi/pytorch-faster-rcnn
[ "da2df26ff1386a5c7df16f3845e9154a0ad1aa44" ]
[ "tools/combineNetAnswers.py" ]
[ "#!/usr/bin/env python\n\n# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Xinlei Chen, based on code from Ross Girshick\n# --------------------------------------------------------\n\n\"\"\"\nDemo script showing detections in sample images.\n\nSee README.md for installation instructions before running.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nfrom model.config import cfg, cfg_from_file, cfg_from_list\nfrom model.test import im_detect\nfrom model.nms_wrapper import nms\n\nfrom utils.timer import Timer\nimport matplotlib\nmatplotlib.use('Agg') ##can't visualize inside terminal\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os, cv2\nimport argparse\nimport json\n\n#use symlink to get to om2\nimport gammatonegram as gtg\n\nfrom nets.vgg16 import vgg16\nfrom nets.resnet_v1 import resnetv1\n\nimport torch\n\nCLASSES = ('__background__',\n 'noise','tone')\n\nNETS = {'vgg16': ('vgg16_faster_rcnn_iter_%d.pth',),'res101': ('res101_faster_rcnn_iter_%d.pth',)}\nDATASETS= {'pascal_voc': ('voc_2007_trainval',),'pascal_voc_0712': ('voc_2007_trainval+voc_2012_trainval',)}\n\n\ndef vis_detections(im, class_name, dets, fn, thresh=0.5):\n \"\"\"Draw detected bounding boxes.\"\"\"\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n\n bboxcolor = ['red','green','blue'][i % 3]\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor=bboxcolor, linewidth=3.5)\n )\n ax.text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f}'.format(class_name, score),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14, color='white')\n\n ax.set_title(('{} detections with '\n 'p({} | box) >= {:.1f}').format(class_name, class_name,\n thresh),\n fontsize=14)\n plt.axis('off')\n plt.tight_layout()\n plt.savefig(fn)\n\ndef convert_detections(class_name, dets, t, f, thresh=0.5):\n \n \"\"\"Draw detected bounding boxes.\"\"\"\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return []\n\n elements = []\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n\n \"\"\"Convert bounding boxes to times and frequencies\"\"\"\n onset = t[int(bbox[0])]\n duration = t[int(bbox[2])] - t[int(bbox[0])]\n centreBin = f[int((bbox[3] - bbox[1])/2. + bbox[1])]\n logf0 = np.log(centreBin) if class_name == 'tone' else -1\n\n \"\"\"Add element to list\"\"\"\n elements.append({\"onset\":np.float64(onset), \"duration\":np.float64(duration),\"voice\":class_name,\"logf0\":np.float64(logf0),\"score\":np.float64(score)})\n\n return elements\n\ndef compute_conversions(params, timepx):\n y = np.zeros(params['sr'])\n sxx, cfs = gtg.gammatonegram(y, sr=params['sr'],twin=params['twin'],thop=params['thop'],\n N=params[\"nFilts\"],fmin=50,fmax=int(params['sr']/2.),width=params[\"filtWidth\"])\n # samples in image * 1 second per np.shape(sxx)[1] samples\n t = np.arange(0,timepx)*(1./np.shape(sxx)[1])\n return t, cfs\n\n\ndef basademo(nets, image_name, dataname, exptnames):\n \"\"\"Detect object classes in an image using pre-computed object proposals.\"\"\"\n\n # Load the demo image\n #im_file = os.path.join('/om/user/mcusi/nnInit/pytorch-faster-rcnn/data/bASA/JPEGImages', image_name)\n im_file = '/om/user/mcusi/nnInit/pytorch-faster-rcnn/data/'+dataname+'/demos/' + image_name + '.jpg'\n print(im_file)\n im = cv2.imread(im_file)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores={};boxes={};\n for exptname in exptnames:\n scores[exptname], boxes[exptname] = im_detect(nets[exptname], im)\n timer.toc()\n print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time(), boxes[exptname].shape[0]))\n\n # Find frequency and time conversions\n with open('/om/user/mcusi/nnInit/pytorch-faster-rcnn/data/' + dataname + '/' +dataname+'dream.json') as infile:\n params = json.load(infile)\n t, f = compute_conversions(params, np.shape(im)[1])\n\n # Visualize detections for each class\n CONF_THRESH = 0.8\n NMS_THRESH = 0.5\n elements = []\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes={};cls_scores={};_dets={};\n for exptname in exptnames:\n cls_boxes[exptname] = boxes[exptname][:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores[exptname] = scores[exptname][:, cls_ind]\n _dets[exptname] = np.hstack((cls_boxes[exptname],\n cls_scores[exptname][:, np.newaxis])).astype(np.float32)\n dets=np.vstack((_dets['vgg16_ar4'],_dets['vgg16_ar8'],_dets['vgg16_ar16']))\n keep = nms(torch.from_numpy(dets), NMS_THRESH)\n dets = dets[keep.numpy(), :]\n fn = '/om/user/mcusi/nnInit/pytorch-faster-rcnn/data/'+dataname+'/demos/' + image_name + '_' + cls + '_combined.jpg'\n vis_detections(im, cls, dets, fn, thresh=CONF_THRESH) \n els = convert_detections(cls, dets, t, f, thresh=CONF_THRESH)\n for e in els:\n elements.append(e)\n\n jfn = '/om/user/mcusi/nnInit/pytorch-faster-rcnn/data/'+dataname+'/demos/' + image_name + '_combined.json'\n with open(jfn, 'w') as outfile:\n outfile.write(json.dumps(elements))\n\nif __name__ == '__main__':\n\n dataname = os.environ.get('dataname','bASAGP')\n exptnames = ['vgg16_ar4','vgg16_ar8','vgg16_ar16']\n nets={}\n for exptname in exptnames:\n folder = ''\n iteration = '50000'\n\n cfg_from_file('experiments/cfgs/vgg16.yml') # Add usual config options for network type \n cfg_from_file('output/vgg16/'+dataname+'_train/default/' + folder + exptname + '.yml') # add config options for that particular trained network\n cfg.TEST.HAS_RPN = True # Use RPN for proposals\n\n # model path\n saved_model = 'output/vgg16/'+dataname+'_train/default/' + folder + exptname + '_iter_' + iteration + '.pth'\n\n # load network\n nets[exptname] = vgg16()\n nets[exptname].create_architecture(3,tag='default', anchor_scales=cfg.ANCHOR_SCALES, anchor_ratios=cfg.ANCHOR_RATIOS)\n nets[exptname].load_state_dict(torch.load(saved_model))\n nets[exptname].eval()\n nets[exptname].cuda()\n print('Loaded network {:s}'.format(saved_model))\n\n #im_names = ['%06d' % i for i in range(50002,50022)]\n im_names = ['continuity','Track01fast','Track01slow','Track15higherBetweens','Track15lowerBetweens','Track16capture','Track16nocapture','Track17X','Track32fast','Track32slow','1i','1ii','1iii','1iv','1v','2i','2ii','2iii','2iv','2v','3ai','3aii','3aiii','3aiv','3av','3bi','3bii','3biii','3biv','3bv','4i','4ii','4iii','4iv','4v'];\n for im_name in im_names:\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n print('Demo for data/bASA/JPEGImages/{}'.format(im_name))\n basademo(nets, im_name, dataname, exptnames)\n\n plt.show()\n" ]
[ [ "matplotlib.use", "numpy.zeros", "numpy.log", "matplotlib.pyplot.savefig", "numpy.hstack", "matplotlib.pyplot.subplots", "numpy.shape", "torch.from_numpy", "numpy.where", "numpy.float64", "numpy.arange", "matplotlib.pyplot.tight_layout", "numpy.vstack", "torch.load", "matplotlib.pyplot.Rectangle", "matplotlib.pyplot.show", "matplotlib.pyplot.axis" ] ]
portchester1989/neural-style-keras
[ "7f6b6784c4b28d82cf844928402ae01ff3fe877e" ]
[ "layers.py" ]
[ "'''\nCustom Keras layers used on the pastiche model.\n'''\n\nimport tensorflow as tf\nimport keras\nfrom keras import initializers\nfrom keras.layers import ZeroPadding2D, Layer, InputSpec\n\n# Extending the ZeroPadding2D layer to do reflection padding instead.\nclass ReflectionPadding2D(ZeroPadding2D):\n def call(self, x, mask=None):\n pattern = [[0, 0],\n [self.top_pad, self.bottom_pad],\n [self.left_pad, self.right_pad],\n [0, 0]]\n return tf.pad(x, pattern, mode='REFLECT')\n\n\nclass InstanceNormalization(Layer):\n def __init__(self, epsilon=1e-5, weights=None,\n beta_init='zero', gamma_init='one', **kwargs):\n self.beta_init = initializers.get(beta_init)\n self.gamma_init = initializers.get(gamma_init)\n self.epsilon = epsilon\n super(InstanceNormalization, self).__init__(**kwargs)\n\n def build(self, input_shape):\n # This currently only works for 4D inputs: assuming (B, H, W, C)\n self.input_spec = [InputSpec(shape=input_shape)]\n shape = (1, 1, 1, input_shape[-1])\n\n self.gamma = self.gamma_init(shape, name='{}_gamma'.format(self.name))\n self.beta = self.beta_init(shape, name='{}_beta'.format(self.name))\n self.trainable_weights = [self.gamma, self.beta]\n\n self.built = True\n\n def call(self, x, mask=None):\n # Do not regularize batch axis\n reduction_axes = [1, 2]\n\n mean, var = tf.nn.moments(x, reduction_axes,\n shift=None, name=None, keep_dims=True)\n x_normed = tf.nn.batch_normalization(x, mean, var, self.beta, self.gamma, self.epsilon)\n return x_normed\n\n def get_config(self):\n config = {\"epsilon\": self.epsilon}\n base_config = super(InstanceNormalization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass ConditionalInstanceNormalization(InstanceNormalization):\n def __init__(self, targets, nb_classes, **kwargs):\n self.targets = targets\n self.nb_classes = nb_classes\n super(ConditionalInstanceNormalization, self).__init__(**kwargs)\n\n def build(self, input_shape):\n # This currently only works for 4D inputs: assuming (B, H, W, C)\n self.input_spec = [InputSpec(shape=input_shape)]\n shape = (self.nb_classes, 1, 1, input_shape[-1])\n\n self.gamma = self.gamma_init(shape, name='{}_gamma'.format(self.name))\n self.beta = self.beta_init(shape, name='{}_beta'.format(self.name))\n self.trainable_weights = [self.gamma, self.beta]\n\n self.built = True\n\n def call(self, x, mask=None):\n # Do not regularize batch axis\n reduction_axes = [1, 2]\n\n mean, var = tf.nn.moments(x, reduction_axes,\n shift=None, name=None, keep_dims=True)\n\n # Get the appropriate lines of gamma and beta\n beta = tf.gather(self.beta, self.targets)\n gamma = tf.gather(self.gamma, self.targets)\n x_normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, self.epsilon)\n\n return x_normed\n" ]
[ [ "tensorflow.pad", "tensorflow.nn.batch_normalization", "tensorflow.nn.moments", "tensorflow.gather" ] ]
hawkaa/trimesh
[ "f4c152208dd197443162f1e2ddf8fbd226bdafb1" ]
[ "examples/offscreen_render.py" ]
[ "\nimport numpy as np\nimport trimesh\n\n\nif __name__ == '__main__':\n # print logged messages\n trimesh.util.attach_to_log()\n\n # load a mesh\n mesh = trimesh.load('../models/featuretype.STL')\n\n # get a scene object containing the mesh, this is equivalent to:\n # scene = trimesh.scene.Scene(mesh)\n scene = mesh.scene()\n\n # a 45 degree homogeneous rotation matrix around\n # the Y axis at the scene centroid\n rotate = trimesh.transformations.rotation_matrix(\n angle=np.radians(10.0),\n direction=[0, 1, 0],\n point=scene.centroid)\n\n for i in range(4):\n trimesh.constants.log.info('Saving image %d', i)\n\n # rotate the camera view transform\n camera_old, _geometry = scene.graph[scene.camera.name]\n camera_new = np.dot(rotate, camera_old)\n\n # apply the new transform\n scene.graph[scene.camera.name] = camera_new\n\n # saving an image requires an opengl context, so if -nw\n # is passed don't save the image\n try:\n # increment the file name\n file_name = 'render_' + str(i) + '.png'\n # save a render of the object as a png\n png = scene.save_image(resolution=[1920, 1080], visible=True)\n with open(file_name, 'wb') as f:\n f.write(png)\n f.close()\n\n except BaseException as E:\n print(\"unable to save image\", str(E))\n" ]
[ [ "numpy.radians", "numpy.dot" ] ]
YichaoOU/pyDNA_melting
[ "78744c8849fef8bca99a9868bffef772d6853e70" ]
[ "pyDNA_melting/utils.py" ]
[ "\r\n\r\nimport os\r\nimport uuid\r\nimport pandas as pd\r\nimport numpy as np\r\nimport argparse\r\np_dir = os.path.dirname(os.path.realpath(__file__)) + \"/\"\r\ncode_dir = p_dir+\"scripts/\"\r\ndef run_matlab_code(seq):\r\n\t\r\n\t# step 1: copy matlab code to users folder because matlab scripts can't take parameters\r\n\trandom_folder = \".\"+str(uuid.uuid4()).split(\"-\")[-1]\r\n\tos.system(\"mkdir {0};cd {0};ln -s {1}* .\".format(random_folder,code_dir))\r\n\t\r\n\t# step 2: run matlab\r\n\tcommand = \"\"\"cd %s;matlab -nodisplay -nodesktop -nosplash -nojvm -r \"weka_run(char('%s'),10,0.1,6); exit ; exit()\" \"\"\"%(random_folder,seq)\r\n\tprint (command)\r\n\tos.system(command)\r\n\t\r\n\t# step3 parse job.out\r\n\tdf = pd.read_csv(\"%s/job.out\"%(random_folder))\r\n\tprint (df.head())\r\n\t\r\n\t# step4 delete dir\r\n\tos.system(\"rm -r %s\"%(random_folder))\r\n\t\r\n\treturn df\r\n\r\n\r\ndef ML_features(seq):\r\n\t\"\"\"Return a list of 8 values for ML table\"\"\"\r\n\tdf = run_matlab_code(seq)\t\r\n\tmelting_mean = df['predicted'].mean()\r\n\tdf['ratio'] = df['predicted']/melting_mean\r\n\tdf['ratio'] = df['ratio'].apply(lambda x: np.log2(x))\r\n\tfirst_two = df['ratio'].tolist()[:2]\r\n\tlast_two = df['ratio'].tolist()[-2:]\r\n\tratio_max = df['ratio'].max()\r\n\tratio_min = df['ratio'].min()\r\n\tratio_std = df['ratio'].std()\r\n\treturn first_two+last_two+[melting_mean,ratio_max,ratio_min,ratio_std]\r\n\r\n\r\n" ]
[ [ "pandas.read_csv", "numpy.log2" ] ]
BuildJet/distdl
[ "28b0dcf2c0a762de924cc310398a2eab9c35297f" ]
[ "examples/ex_halo_mixin.py" ]
[ "import numpy as np\nfrom mpi4py import MPI\n\nfrom distdl.backends.mpi.partition import MPIPartition\nfrom distdl.nn.mixins.halo_mixin import HaloMixin\nfrom distdl.nn.mixins.pooling_mixin import PoolingMixin\nfrom distdl.utilities.debug import print_sequential\n\n\nclass MockPoolLayer(HaloMixin, PoolingMixin):\n pass\n\n\nP_world = MPIPartition(MPI.COMM_WORLD)\nranks = np.arange(P_world.size)\n\nshape = [1, 1, 4]\nP_size = np.prod(shape)\nuse_ranks = ranks[:P_size]\n\nP = P_world.create_subpartition(use_ranks)\nP_x = P.create_cartesian_subpartition(shape)\nrank = P_x.rank\ncart_comm = P_x._comm\n\nlayer = MockPoolLayer()\n\nif P_x.active:\n x_global_shape = np.array([1, 1, 10])\n kernel_size = np.array([2])\n stride = np.array([2])\n padding = np.array([0])\n dilation = np.array([1])\n\n halo_shape, recv_buffer_shape, send_buffer_shape, needed_ranges = \\\n layer._compute_exchange_info(x_global_shape,\n kernel_size,\n stride,\n padding,\n dilation,\n P_x.active,\n P_x.shape,\n P_x.index)\n\n print_sequential(cart_comm, f'rank = {rank}:\\nhalo_shape =\\n{halo_shape}\\n\\\nrecv_buffer_shape =\\n{recv_buffer_shape}\\nsend_buffer_shape =\\n{send_buffer_shape}\\nneeded_ranges =\\n{needed_ranges}')\n" ]
[ [ "numpy.array", "numpy.prod", "numpy.arange" ] ]
sns-chops/multiphonon
[ "508ab3111e584eb5684f1c6f1408f81e0b0c9ce5" ]
[ "tests/dos/dos_interp_TestCase.py" ]
[ "#!/usr/bin/env python\n#\n\ninteractive = False\n\nimport os, numpy as np, histogram.hdf as hh\nhere = os.path.dirname(__file__)\n\nfrom multiphonon.dos import interp\n\nimport unittest\nclass TestCase(unittest.TestCase):\n\n def test1(self):\n \"interp\"\n dos = hh.load(os.path.join(here, 'dos_to_interp.h5'))\n newE = np.arange(0, 45, 0.5)\n newdos = interp(dos, newE)\n expected = hh.load(os.path.join(here, './expected/dos_interped.h5'))\n np.testing.assert_allclose(newdos.I, expected.I)\n np.testing.assert_allclose(newdos.E2, expected.E2)\n return\n pass # end of TestCase\n\n\nif __name__ == \"__main__\":\n interactive = True\n unittest.main()\n\n# End of file\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.arange" ] ]
AIVIS-inc/SegPC2021
[ "d73a6b1c7818f756f2dc8cded972adf27f04a108" ]
[ "demo/getResultImage2.py" ]
[ "from argparse import ArgumentParser\n\nfrom mmdet.apis import inference_detector, init_detector, show_result_pyplot\nimport numpy as np\nimport os\nfrom PIL import Image, ImageOps\nimport pycocotools.mask as maskUtils\nfrom fvcore.common.file_io import PathManager\nimport mmcv\nimport cv2\nimport math\n\n\ndef allfiles(path):\n res = []\n \n for root, dirs, files in os.walk(path):\n rootpath = os.path.join(os.path.abspath(path), root)\n\n for file in files:\n filepath = os.path.join(rootpath, file)\n res.append(filepath)\n\n return res\n\ndef read_image(file_name, format=None):\n \"\"\"\n Read an image into the given format.\n Will apply rotation and flipping if the image has such exif information.\n\n Args:\n file_name (str): image file path\n format (str): one of the supported image modes in PIL, or \"BGR\"\n\n Returns:\n image (np.ndarray): an HWC image in the given format.\n \"\"\"\n with PathManager.open(file_name, \"rb\") as f:\n image = Image.open(f)\n\n # capture and ignore this bug: https://github.com/python-pillow/Pillow/issues/3973\n try:\n image = ImageOps.exif_transpose(image)\n except Exception:\n pass\n\n if format is not None:\n # PIL only supports RGB, so convert to RGB and flip channels over below\n conversion_format = format\n if format == \"BGR\":\n conversion_format = \"RGB\"\n image = image.convert(conversion_format)\n image = np.asarray(image)\n if format == \"BGR\":\n # flip channels if needed\n image = image[:, :, ::-1]\n # PIL squeezes out the channel dimension for \"L\", so make it HWC\n if format == \"L\":\n image = np.expand_dims(image, -1)\n return image\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('img', help='Image file')\n parser.add_argument('output', help='Image file')\n parser.add_argument('config', help='Config file')\n parser.add_argument('checkpoint', help='Checkpoint file')\n parser.add_argument(\n '--device', default='cuda:0', help='Device used for inference')\n parser.add_argument(\n '--score-thr', type=float, default=0.01, help='bbox score threshold')\n args = parser.parse_args()\n\n # build the model from a config file and a checkpoint file\n model = init_detector(args.config, args.checkpoint, device=args.device) \n for file in allfiles(args.img):\n result = inference_detector(model, file)\n # show the results\n #show_result_pyplot(model, args.img, result, score_thr=args.score_thr)\n img = mmcv.imread(file)\n img = img.copy()\n\n bbox_result, segm_result = result\n bboxes = np.vstack(bbox_result)\n labels = [\n np.full(bbox.shape[0], i, dtype=np.int32)\n for i, bbox in enumerate(bbox_result)\n ]\n labels = np.concatenate(labels)\n segms = None\n if segm_result is not None: \n segms = mmcv.concat_list(segm_result)\n inds = np.where(bboxes[:, -1] > args.score_thr)[0]\n # np.random.seed(42)\n # color_masks = [\n # np.random.randint(0, 256, (1, 3), dtype=np.uint8)\n # for _ in range(max(labels) + 1)\n # ]\n # for i in inds:\n # i = int(i)\n # color_mask = color_masks[labels[i]]\n # mask = maskUtils.decode(segms[i]).astype(np.bool)\n # img[mask] = img[mask] * 0.5 + color_mask * 0.5\n\n pred_masks_nuc = []\n pred_masks_cyp = []\n pred_boxes_nuc = []\n pred_boxes_cyp = []\n for i in inds:\n mask = segms[i].astype(np.bool)\n if labels[i] == 0:\n pred_masks_cyp.append(mask)\n pred_boxes_cyp.append(bboxes[i])\n else :\n pred_masks_nuc.append(mask)\n pred_boxes_nuc.append(bboxes[i])\n kernel = np.ones((3,3), np.uint8) \n\n idx_cyps = []\n for i in range(len(pred_masks_cyp)) :\n idx_cyps.append([])\n\n last_filename_idx = 0\n for i in range(len(pred_masks_nuc)) :\n pred_masks_nuc_ = np.array(pred_masks_nuc[i]).astype(np.uint8)\n pred_masks_nuc_ = cv2.dilate(pred_masks_nuc_, kernel, iterations = 2)\n pred_masks_nuc_dilate = cv2.dilate(pred_masks_nuc_, kernel, iterations = 5)\n\n cnt_fuse = 0\n for j in range(len(pred_masks_cyp)) :\n pred_masks_cyp_ = np.array(pred_masks_cyp[j]).astype(np.uint8)\n mul_nuc_cyp = np.multiply(pred_masks_nuc_dilate, pred_masks_cyp_)\n sum_true = sum(sum(mul_nuc_cyp[:]))\n if sum_true > 0 : \n pred_masks_nucCyp = np.add(40 * pred_masks_nuc_, 20 * pred_masks_cyp_)\n \n index = pred_masks_nucCyp[:,:] > 40\n index_ = np.argwhere(index)\n pred_masks_nucCyp[index_[:, 0], index_[:, 1]] = 40 \n pred_masks_nucCyp.astype(np.uint8)\n\n output_path = args.output \n fileName = file.split('/')[-1].split('.')[0] + \"_\" + str(last_filename_idx) + \".png\"\n print(fileName + \"_\" + str(pred_boxes_nuc[i][4]))\n out_filename = output_path + '/' + fileName\n cv2.imwrite(out_filename, pred_masks_nucCyp) \n \n idx_cyps[j].append(i) \n last_filename_idx += 1\n cnt_fuse += 1\n \n if cnt_fuse == 0 :\n pred_masks_nucCyp = np.add(40 * pred_masks_nuc_, 20 * pred_masks_nuc_dilate)\n index = pred_masks_nucCyp[:,:] > 40\n index_ = np.argwhere(index)\n pred_masks_nucCyp[index_[:, 0], index_[:, 1]] = 40 \n pred_masks_nucCyp.astype(np.uint8)\n \n output_path = args.output \n fileName = file.split('/')[-1].split('.')[0] + \"_\" + str(last_filename_idx) + \".png\"\n print(\"No CyptoPlasm - \" + fileName + \"_\" + str(pred_boxes_nuc[i][4]))\n out_filename = output_path + '/' + fileName\n cv2.imwrite(out_filename, pred_masks_nucCyp) \n last_filename_idx += 1\n\n for i in range(len(idx_cyps)) :\n if len(idx_cyps[i]) == 0 :\n pred_masks_cyp_ = 20 * np.array(pred_masks_cyp[i]).astype(np.uint8) \n\n output_path = args.output \n fileName = file.split('/')[-1].split('.')[0] + \"_\" + str(last_filename_idx) + \".png\"\n print(\"No Nuclei - \" + fileName + \"_\" + str(pred_boxes_cyp[i][4]))\n out_filename = output_path + '/' + fileName\n cv2.imwrite(out_filename, pred_masks_cyp_) \n last_filename_idx += 1\n \n # elif len(idx_cyps[i]) >= 2 :\n # pred_masks_cyp_ = 20 * np.array(pred_masks_cyp[i]).astype(np.uint8)\n\n # for j in range(len(idx_cyps[i])) :\n # idx = idx_cyps[i][j] \n # pred_masks_nuc_ = np.array(pred_masks_nuc[idx]).astype(np.uint8)\n # pred_masks_nuc_ = cv2.dilate(pred_masks_nuc_, kernel, iterations = 2)\n \n # pred_masks_cyp_ = np.add(40 * pred_masks_nuc_, pred_masks_cyp_) \n # index = pred_masks_cyp_[:,:] > 40\n # index_ = np.argwhere(index)\n # pred_masks_cyp_[index_[:, 0], index_[:, 1]] = 40 \n # pred_masks_cyp_.astype(np.uint8)\n\n # output_path = args.output \n # fileName = file.split('/')[-1].split('.')[0] + \"_\" + str(last_filename_idx) + \".png\"\n # print(\"Many Nuclei - \" + fileName + \"_\" + str(pred_boxes_cyp[i][4]))\n # out_filename = output_path + '/' + fileName\n # cv2.imwrite(out_filename, pred_masks_cyp_) \n # last_filename_idx += 1\n \n # out_file = None\n # mmcv.imshow_det_bboxes(\n # img,\n # bboxes,\n # labels,\n # class_names=None,\n # score_thr=args.score_thr,\n # show=False,\n # wait_time=0,\n # out_file=out_file)\n\n # fileName = file.split('/')[-1].split('.')[0] + \".png\"\n # output_path = args.output + '/' + fileName\n # cv2.imwrite(output_path, img) \n\n\nif __name__ == '__main__':\n main()" ]
[ [ "numpy.concatenate", "numpy.full", "numpy.array", "numpy.add", "numpy.asarray", "numpy.ones", "numpy.multiply", "numpy.where", "numpy.vstack", "numpy.argwhere", "numpy.expand_dims" ] ]
Michaelrising/sac-discrete.pytorch
[ "93ae779f5980726db0302c3471fd143c7d1d35ed" ]
[ "sacd/agent/sacd.py" ]
[ "import os\nimport numpy as np\nimport torch\nfrom torch.optim import Adam\n\nfrom .base import BaseAgent\nfrom sacd.model import TwinnedQNetwork, CateoricalPolicy\nfrom sacd.utils import disable_gradients\n\n\nclass SacdAgent(BaseAgent):\n\n def __init__(self, env, test_env, log_dir, num_steps=100000, batch_size=64,\n lr=0.0003, memory_size=1000000, gamma=0.99, multi_step=1,\n target_entropy_ratio=0.98, start_steps=20000,\n update_interval=4, target_update_interval=8000,\n use_per=False, dueling_net=False, num_eval_steps=125000,\n max_episode_steps=27000, log_interval=10, eval_interval=1000,\n cuda=True, seed=0):\n super().__init__(\n env, test_env, log_dir, num_steps, batch_size, memory_size, gamma,\n multi_step, target_entropy_ratio, start_steps, update_interval,\n target_update_interval, use_per, num_eval_steps, max_episode_steps,\n log_interval, eval_interval, cuda, seed)\n\n \n # Define networks.\n #Actor network \n self.policy = CateoricalPolicy(\n self.env.observation_space.shape[0], self.env.action_space.n\n ).to(self.device)\n # critic network\n self.online_critic = TwinnedQNetwork(\n self.env.observation_space.shape[0],self.env.action_space.n,\n dueling_net=dueling_net).to(device=self.device)\n self.target_critic = TwinnedQNetwork(\n self.env.observation_space.shape[0], self.env.action_space.n,\n dueling_net=dueling_net).to(device=self.device).eval()\n\n # Copy parameters of the learning network to the target network.\n self.target_critic.load_state_dict(self.online_critic.state_dict())\n\n # Disable gradient calculations of the target network.\n disable_gradients(self.target_critic)\n\n self.policy_optim = Adam(self.policy.parameters(), lr=lr)\n self.q1_optim = Adam(self.online_critic.Q1.parameters(), lr=lr)\n self.q2_optim = Adam(self.online_critic.Q2.parameters(), lr=lr)\n\n # Target entropy is -log(1/|A|) * ratio (= maximum entropy * ratio).\n self.target_entropy = \\\n -np.log(1.0 / self.env.action_space.n) * target_entropy_ratio\n\n # We optimize log(alpha), instead of alpha.\n self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)\n self.alpha = self.log_alpha.exp()\n self.alpha_optim = Adam([self.log_alpha], lr=lr)\n\n def explore(self, state):\n # Act with randomness.\n state = torch.ByteTensor(\n state[None, ...]).to(self.device).float() / 255.\n with torch.no_grad():\n action, _, _ = self.policy.sample(state)\n return action.item()\n\n def exploit(self, state):\n # Act without randomness.\n state = torch.ByteTensor(\n state[None, ...]).to(self.device).float() / 255.\n with torch.no_grad():\n action = self.policy.act(state)\n return action.item()\n\n def update_target(self):\n self.target_critic.load_state_dict(self.online_critic.state_dict())\n\n def calc_current_q(self, states, actions, rewards):\n curr_q1, curr_q2 = self.online_critic(states)\n curr_q1 = curr_q1.gather(1, actions.long())\n curr_q2 = curr_q2.gather(1, actions.long())\n return curr_q1, curr_q2\n\n def calc_target_q(self, states, actions, rewards, next_states, dones):\n with torch.no_grad():\n _, action_probs, log_action_probs = self.policy.sample(next_states)\n next_q1, next_q2 = self.target_critic(next_states)\n next_q = (action_probs * (\n torch.min(next_q1, next_q2) - self.alpha * log_action_probs\n )).sum(dim=1, keepdim=True)\n\n assert rewards.shape == next_q.shape\n return rewards + (1.0 - dones) * self.gamma_n * next_q\n\n def calc_critic_loss(self, batch, weights):\n batch_state, batch_action, batch_reward, non_final_next_states, non_final_mask, empty_next_state_values = zip(*batch)\n \n curr_q1, curr_q2 = self.calc_current_q(batch_state, batch_action, batch_reward)\n \n target_q = self.calc_target_q(batch_state, batch_action, batch_reward, non_final_next_states,non_final_mask)\n\n # TD errors for updating priority weights\n errors = torch.abs(curr_q1.detach() - target_q)\n\n # We log means of Q to monitor training.\n mean_q1 = curr_q1.detach().mean().item()\n mean_q2 = curr_q2.detach().mean().item()\n\n # Critic loss is mean squared TD errors with priority weights.\n q1_loss = torch.mean((curr_q1 - target_q).pow(2) * weights)\n q2_loss = torch.mean((curr_q2 - target_q).pow(2) * weights)\n\n return q1_loss, q2_loss, errors, mean_q1, mean_q2\n\n def calc_policy_loss(self, batch, weights):\n states, actions, rewards, next_states, dones = batch\n\n # (Log of) probabilities to calculate expectations of Q and entropies.\n _, action_probs, log_action_probs = self.policy.sample(states)\n\n with torch.no_grad():\n # Q for every actions to calculate expectations of Q.\n q1, q2 = self.online_critic(states)\n q = torch.min(q1, q2)\n\n # Expectations of entropies.\n entropies = -torch.sum(\n action_probs * log_action_probs, dim=1, keepdim=True)\n\n # Expectations of Q.\n q = torch.sum(torch.min(q1, q2) * action_probs, dim=1, keepdim=True)\n\n # Policy objective is maximization of (Q + alpha * entropy) with\n # priority weights.\n policy_loss = (weights * (- q - self.alpha * entropies)).mean()\n\n return policy_loss, entropies.detach()\n\n def calc_entropy_loss(self, entropies, weights):\n assert not entropies.requires_grad\n\n # Intuitively, we increse alpha when entropy is less than target\n # entropy, vice versa.\n entropy_loss = -torch.mean(\n self.log_alpha * (self.target_entropy - entropies)\n * weights)\n return entropy_loss\n\n def save_models(self, save_dir):\n super().save_models(save_dir)\n self.policy.save(os.path.join(save_dir, 'policy.pth'))\n self.online_critic.save(os.path.join(save_dir, 'online_critic.pth'))\n self.target_critic.save(os.path.join(save_dir, 'target_critic.pth'))\n" ]
[ [ "torch.zeros", "torch.min", "numpy.log", "torch.no_grad", "torch.optim.Adam", "torch.ByteTensor", "torch.mean", "torch.sum" ] ]
coreyryanhanson/tanzania_water_project
[ "ec00a62181d2ebf33d0571e1fbc0fa563e0169c1" ]
[ "visualization_functions.py" ]
[ "import functools\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass Multiplot(object):\n \"\"\"An object to quickly generate multiple plots for each column in a DataFrame\"\"\"\n\n def __init__(self, df, n_cols=3, figsize=(15, 15), style=\"darkgrid\"):\n \"\"\"Sets up the general parameters to be used across all graphs.\"\"\"\n\n self.df = df\n self.columns = self.df.columns\n self.figsize = figsize\n self.set_cols(n_cols)\n self.linearity_plots = 5\n self.style = style\n\n def _multicol_plot_wrapper(func):\n \"\"\"Decorator to be used to wrap plotting function to generate and plot\n multiple matplotlib figures and axes objects for multiple columns.\"\"\"\n\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n self.fig, self.axes = self._generate_subplots()\n for self.ax_i, self.last_col in enumerate(self.columns):\n self._determine_ax()\n func(self, *args, **kwargs)\n plt.show()\n\n return wrapper\n\n def _determine_ax(self):\n \"\"\"Sets current axis based on iterator and axes object. If only one\n column, it does not look for a column index.\"\"\"\n\n row, col = self.ax_i // self.n_cols, self.ax_i % self.n_cols\n if self.n_cols == 1:\n self.last_ax = self.axes[row]\n else:\n self.last_ax = self.axes[row][col]\n\n def _generate_subplots(self):\n \"\"\"Creates subplots based on current parameter attributes\"\"\"\n\n sns.set_style(self.style)\n return plt.subplots(nrows=self.n_rows, ncols=self.n_cols, figsize=self.figsize)\n\n def _plot_qq_manual(self, comparison_df):\n \"\"\"Class no longer uses this. Replaced with the generated plots from\n statsmodels.\"\"\"\n\n columns = comparison_df.columns\n ax_kwargs = {x: y for x, y in zip([\"x\", \"y\"], columns)}\n qq_data = pd.DataFrame(columns=columns)\n for column in columns:\n qq_data[column] = np.quantile(comparison_df[column], np.arange(0, 1, .01))\n return sns.scatterplot(data=qq_data, ax=self.last_ax, **ax_kwargs)\n\n def _plot_ccpr(self, model):\n \"\"\"Creates a Component and Component Plus Residual plot\"\"\"\n\n sm.graphics.plot_ccpr(model, 1, ax=self.last_ax)\n self.last_ax.lines[1].set_color(\"r\")\n\n def _plot_qq(self, model):\n \"\"\"Creates a qq plot to test residuals for normality.\"\"\"\n\n sm.graphics.qqplot(model.resid, dist=scs.norm, line='45', fit=True, ax=self.last_ax)\n\n def _plot_resid(self, model):\n \"\"\"Plots a scatterplot of residuals along a dependant variable\"\"\"\n\n resid, x = model.resid, df[self.last_col]\n line = np.array([[x.min(), 0], [x.max(), 0]]).T\n sns.scatterplot(x, resid, ax=self.last_ax)\n sns.lineplot(x=line[0], y=line[1], ax=self.last_ax, **{\"color\": \"r\"})\n self.last_ax.set_title('Residual_plot')\n self.last_ax.set_ylabel('Residual values')\n\n def _plot_resid_hist(self, model):\n sns.distplot(model.resid, ax=self.last_ax)\n self.last_ax.set_title('Residual_distribution')\n self.last_ax.set_xlabel('Residual values')\n\n def _plot_yfit_y_pred_v_x(self, model):\n \"\"\"Plots a y and y fitted vs x graph\"\"\"\n\n sm.graphics.plot_fit(model, 1, ax=self.last_ax)\n\n def _prediction_df(self, predictions, actual):\n \"\"\"Currently unused function that combines predictions and test data\n into a single dataframe.\"\"\"\n\n columns, pred_list = [\"predicted\", \"actual\"], np.stack((predictions, actual))\n return pd.DataFrame(pred_list.T, columns=columns)\n\n def _sb_linearity_plots(self, model):\n \"\"\"For loop that creates the axes and plots for linearity checks\"\"\"\n\n self.fig, self.axes = self._generate_subplots()\n for self.ax_i in np.arange(self.linearity_plots):\n self._determine_ax()\n self._sb_linearity_switch(model, self.ax_i)\n plt.show()\n\n def _sb_linearity_switch(self, model, i):\n \"\"\"Uses if statement switches to allow different functions to be inserted\n in the for loop that dynamically sets the axes.\"\"\"\n\n if i == 0:\n self._plot_yfit_y_pred_v_x(model)\n if i == 1:\n self._plot_resid(model)\n if i == 2:\n self._plot_ccpr(model)\n if i == 3:\n self._plot_resid_hist(model)\n if i == 4:\n self._plot_qq(model)\n\n def _set_rows(self, n_plots=False):\n \"\"\"Determines the amount of row axes needed depending on the total\n plots and the column size\"\"\"\n\n if not n_plots:\n n_plots = self.df.columns.size\n self.n_rows = math.ceil(n_plots / self.n_cols)\n\n def _test_goldfeld_quandt(self, model, lq, uq):\n \"\"\"Runs a Goldfeld Quandt test for heteroscadasticity.\"\"\"\n\n column = self.last_col\n lwr = self.df[column].quantile(q=lq)\n upr = self.df[column].quantile(q=uq)\n middle_idx = self.df[(self.df[column] >= lwr) & (self.df[column] <= upr)].index\n\n idx = [x - 1 for x in self.df.index if x not in middle_idx]\n gq_labels = ['F statistic', 'p-value']\n gq = sms.het_goldfeldquandt(model.resid.iloc[idx], model.model.exog[idx])\n return list(zip(gq_labels, gq))\n\n def _test_jarque_bera(self, model):\n \"\"\"Runs a Jarque-Bera test for normality\"\"\"\n\n jb_labels = ['Jarque-Bera', 'Prob', 'Skew', 'Kurtosis']\n jb = sms.jarque_bera(model.resid)\n return list(zip(jb_labels, jb))\n\n def _xyz(self, terms, iterable):\n \"\"\"Grabs axis values from a dictionary and inserts the iterable into\n the first empty instance. Returns a dictionary of only filled axes.\"\"\"\n\n x, y, z = terms.get(\"x\"), terms.get(\"y\"), terms.get(\"z\")\n var_list = [x, y, z]\n for i, var in enumerate(var_list):\n if not var:\n var_list[i] = iterable\n break\n var_dict = {key: value for key, value in zip([\"x\", \"y\", \"z\"], filter(None, var_list))}\n return var_dict\n\n def _xyz_to_kwargs(self, kwargs, iterable, return_axes=False):\n axes = self._xyz(kwargs, iterable)\n new_kwargs = kwargs.copy()\n new_kwargs.update(axes)\n if return_axes:\n return new_kwargs, axes\n else:\n return new_kwargs\n\n def modify_col_list(self, columns, drop=True):\n \"\"\"Allows changes to what columns will be graphed. Default is to drop, but\n can add columns as well.\"\"\"\n\n if drop:\n self.columns = self.columns.drop(columns)\n else:\n columns = pd.Index(columns)\n self.columns = self.columns.append(columns)\n self.columns = self.columns.drop_duplicates()\n self._set_rows()\n\n def set_cols(self, n_cols):\n \"\"\"Changes the amount of plot columns to display and adjusting the\n rows needed accordingly.\"\"\"\n\n self.n_cols = n_cols\n self._set_rows()\n\n def sb_linearity_test(self, column, target):\n \"\"\"Tests for linearity along a single independant feature and plots\n associated visualizations.\"\"\"\n\n self.last_col = column\n self._set_rows(self.linearity_plots)\n formula = f'{target}~{column}'\n model = smf.ols(formula=formula, data=self.df).fit()\n r_squared, mse = model.rsquared, model.mse_model,\n rmse, p_values = math.sqrt(mse), model.pvalues\n coef, intercept = model.params[1], model.params[0]\n\n jb = self._test_jarque_bera(model)\n gq = self._test_goldfeld_quandt(model, .45, .55)\n\n print(f\"{column} predicting {target}:\")\n print(f\"R2: {r_squared}, MSE: {mse}, RMSE: {rmse}:\")\n print(f\"Coeficient: {coef}, Intercept: {intercept}\")\n print(\"\")\n print(\"P-values:\")\n print(p_values)\n print(\"\")\n print(\"Jarque-Bera:\")\n print(*jb)\n print(\"\")\n print(\"Goldfeld-Quandt:\")\n print(*gq)\n self._sb_linearity_plots(model)\n\n # Resets rows to their defaults\n self._set_rows()\n\n @_multicol_plot_wrapper\n def sb_multiplot(self, func, kwargs=None, default_axis=False):\n \"\"\"Flexible way of calling iterating through plots of a passed\n Seaborn function. Default axis determines what axis the iterated\n variables will take on. Leave blank for one dimensional plots.\"\"\"\n\n if default_axis and kwargs:\n kwargs = self._xyz_to_kwargs(kwargs, self.last_col)\n return func(data=self.df, ax=self.last_ax, **kwargs)\n else:\n return func(self.df[self.last_col], ax=self.last_ax, **kwargs)\n\n\n#Changes long numeric values and replaces them with more human readable abbreviations.\ndef scale_units(value):\n if value < .99:\n new_val = str(round(value,3))\n elif value < 1000:\n new_val = str(round(value))\n elif value < 1000000:\n new_val = str(round(value/1000))+\"k\"\n elif value < 1 * 10**9:\n new_val = str(round(value/(10**6)))+\"M\"\n elif value < 1 * 10**12:\n new_val = str(round(value/(10**9)))+\"B\"\n elif value < 1 * 10**15:\n new_val = str(round(value/(10**12)))+\"T\"\n else:\n new_val = str(value)\n return new_val\n\n#Inverts the log functions put on features. To be applied on ticks, so that the scale is visually condensed but the values\n# are human readable.\ndef unlog_plot(values, base):\n to_series = pd.Series(values)\n exponented = base**to_series\n return exponented.map(scale_units).values.tolist()\n\n#Shows the full breadth of possilbe values and nans for a column of a dataframe.\ndef full_value_counts(df, column):\n unique = df[column].unique().size\n totalna = df[column].isna().sum()\n percent_na = totalna/df[column].size\n print(f\"There are {unique} unique values with {totalna} nan values making up {percent_na*100:.1f}%\")\n for value, count in df[column].value_counts().iteritems():\n print(f\"{count}-{value}\")\n\n# Modifications to masked heatmap parameters from lecture notes.\ndef trimmed_heatmap(df, columns, font_scale=1, annot=True):\n plt.figure(figsize=(15, 10))\n corr = df[columns].corr()\n sns.set(style=\"white\")\n\n # Generate a mask for the upper triangle\n mask = np.zeros_like(corr, dtype=np.bool)\n mask[np.triu_indices_from(mask)] = True\n\n # Set up the matplotlib figure\n f, ax = plt.subplots(figsize=(11, 9))\n\n # Generate a custom diverging colormap\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n sns.set_context('talk', font_scale=font_scale)\n\n # Draw the heatmap with the mask and correct aspect ratio\n sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.95, center=0,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5}, annot=annot)\n\n return plt.show()" ]
[ [ "numpy.zeros_like", "pandas.Index", "pandas.DataFrame", "numpy.triu_indices_from", "matplotlib.pyplot.subplots", "matplotlib.pyplot.figure", "numpy.stack", "numpy.arange", "pandas.Series", "matplotlib.pyplot.show" ] ]
techthiyanes/annotated_deep_learning_paper_implementations
[ "8af24da2dd39a9a87482a4d18c2dc829bbd3fd47" ]
[ "labml_nn/optimizers/adam_warmup_cosine_decay.py" ]
[ "\"\"\"\n---\ntitle: Adam optimizer with warm-up and cosine decay\nsummary: A PyTorch implementation/tutorial of Adam optimizer with warm-up and cosine decay for GPT.\n---\n\n# Adam Optimizer with Warmup and Cosine Decay\n\nThis extends [AMSGrad optimizer](adam.html) and adds a warmup stage.\n\"\"\"\nimport math\nfrom typing import Dict\n\nfrom labml_nn.optimizers import WeightDecay\nfrom labml_nn.optimizers.amsgrad import AMSGrad\n\n\nclass AdamWarmupCosineDecay(AMSGrad):\n \"\"\"\n <a id=\"EmbeddingsWithPositionalEncoding\"></a>\n\n ## Adam Optimizer with Warmup and Cosine Decay\n\n This class extends from AMSGrad optimizer defined in [`amsgrad.py`](amsgrad.html).\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16,\n weight_decay: WeightDecay = WeightDecay(),\n optimized_update: bool = True,\n amsgrad=False, warmup=0, total_steps=1e10, defaults=None):\n \"\"\"\n ### Initialize the optimizer\n\n * `params` is the list of parameters\n * `lr` is the learning rate $\\alpha$\n * `betas` is a tuple of ($\\beta_1$, $\\beta_2$)\n * `eps` is $\\hat{\\epsilon}$ or $\\epsilon$ based on `optimized_update`\n * `weight_decay` is an instance of class `WeightDecay` defined in [`__init__.py`](index.html)\n * 'optimized_update' is a flag whether to optimize the bias correction of the second moment\n by doing it after adding $\\epsilon$\n * `amsgrad` is a flag indicating whether to use AMSGrad or fallback to plain Adam\n * `warmup` number of warmup steps\n * `total_steps` total number of steps. Cosine decay reaches 0 at this,\n but stays at 10% of `lr` because we take $\\alpha * \\max(0.1, decay)$\n * `defaults` is a dictionary of default for group values.\n This is useful when you want to extend the class `AdamWarmup`.\n \"\"\"\n\n defaults = {} if defaults is None else defaults\n defaults.update(dict(warmup=warmup, total_steps=total_steps))\n super().__init__(params, lr, betas, eps, weight_decay, optimized_update, amsgrad, defaults)\n\n def get_lr(self, state: Dict[str, any], group: Dict[str, any]):\n \"\"\"\n ### Get learning-rate\n\n $$\\alpha \\min \\bigg(1, \\frac{t}{w}\\bigg)$$\n where $w$ is the number of warmup steps.\n \"\"\"\n # If we are in warmup stage\n if group['warmup'] > state['step']:\n # A linearly increasing learning rate from $0$ to $\\alpha$\n return 1e-8 + state['step'] * group['lr'] / group['warmup']\n else:\n # Constant learning rate $\\alpha$\n progress = (state['step'] - group['warmup']) / max(1, group['total_steps'] - group['warmup'])\n return group['lr'] * max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress)))\n\n\ndef _test_lr():\n \"\"\"\n ### Plot learning rate for different warmups and model sizes\n\n ![Plot of learning rate](noam_lr.png)\n \"\"\"\n import matplotlib.pyplot as plt\n import numpy as np\n from torch import nn\n\n model = nn.Linear(10, 10)\n opt = AdamWarmupCosineDecay(model.parameters(), warmup=5000, lr=1e-4, total_steps=4e6)\n steps = 20_000\n plt.plot(np.arange(1, steps), [opt.get_lr({'step': i}, opt.defaults) for i in range(1, steps)])\n plt.legend([\"5000:4e6\", \"5000:2e6\", \"5000:1e6\"])\n plt.title(\"Learning Rate\")\n plt.show()\n\n steps = int(6e6)\n step_size = 1000\n plt.plot(np.arange(1, steps, step_size), [opt.get_lr({'step': i}, opt.defaults) for i in range(1, steps, step_size)])\n plt.legend([\"5000:4e6\", \"5000:2e6\", \"5000:1e6\"])\n plt.title(\"Learning Rate\")\n plt.show()\n\n\nif __name__ == '__main__':\n _test_lr()\n" ]
[ [ "torch.nn.Linear", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "numpy.arange", "matplotlib.pyplot.show" ] ]
joanRVAllen/drowsiness_detector
[ "af3b7b567134369a43343edc06303b6251beefd5" ]
[ "video_feed/video.py" ]
[ "import cv2\nimport numpy as np\nimport time\nimport tensorflow as tf\nfrom keras.preprocessing.image import array_to_img, img_to_array\nfrom tensorflow.keras.models import load_model\n\nclass Drowsiness:\n def __init__(self):\n self.model = load_model('../model/model_trial')\n self.face_cascade = cv2.CascadeClassifier('../video_feed/haarcascade_frontalface_default.xml')\n self.eye_cascade = cv2.CascadeClassifier('../video_feed/haarcascade_eye.xml')\n \n # TODO: better model for open/closed detection\n # for asian eyes\n # for squinting\n # for higher angle\n\n def detect_eyes(self, img):\n gray_picture = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # grayscale\n det_face = self.face_cascade.detectMultiScale(gray_picture, 1.3, 5)\n for (x, y, w, h) in det_face: # draw square on face\n cv2.rectangle(img, (x,y), (x+w, y+h), (255,255,0),2)\n if det_face == ():\n return img\n x,y,w,h = det_face[0]\n gray_face = gray_picture[y:y+h, x:x+w]\n face = img[y:y+h, x:x+w] # coordinates of face\n\n # crop the face\n det_eyes = self.eye_cascade.detectMultiScale(gray_face)\n \n # TODO: improve eye detection\n # sometimes, it detects ears, nose and background as face/eyes\n # set the face detection to just the largest square (driver)\n # eyes should be leveled and one eye on the right, one on the left\n half = int(np.size(face, 0) / 2)\n upper = [i for i in det_eyes if (i[1]<half)]\n if len(upper) <= 2:\n upper.append([0,0,1,1])\n upper.append([0,0,1,1])\n for (ex,ey,ew,eh) in upper:\n cv2.rectangle(face,(ex,ey),(ex+ew,ey+eh),(0,255,255),2)\n elif len(upper) > 2:\n up_eyes = [i[3] for i in upper]\n biggest = sorted(up_eyes)[-2:]\n ind = [up_eyes.index(i) for i in biggest]\n upper = [upper[i] for i in ind]\n for (ex,ey,ew,eh) in upper:\n cv2.rectangle(face,(ex, ey),(ex+ew, ey+eh),(0,255,255),2)\n left = upper[0]\n right = upper[1]\n self.left = face[left[1]:left[1]+left[3], left[0]:left[0]+left[2]]\n self.right = face[right[1]:right[1]+right[3], right[0]:right[0]+right[2]]\n\n\n def pred(self, array):\n input_ = tf.reshape(array, (-1, 6400))\n prediction = (self.model.predict(input_) > 0.5).astype('int32')[0][0]\n if prediction == 0:\n return 'close'\n elif prediction == 1:\n return 'open'\n \n def drowsy(self, image):\n def format(img):\n image = array_to_img(img)\n image = image.convert('L').resize((80,80))\n return img_to_array(image)\n l = self.pred(format(self.left))\n r = self.pred(format(self.right))\n font = cv2.FONT_HERSHEY_SIMPLEX\n if l == 'open' and r == 'open':\n self.tag = 'open'\n return cv2.putText(image, \n 'OPEN', \n (500, 50), \n font, 2, \n (0, 0, 0), \n 6, \n cv2.LINE_4)\n else:\n self.tag = 'close'\n return cv2.putText(image, \n 'CLOSE', \n (500, 50), \n font, 2, \n (0, 0, 0), \n 6, \n cv2.LINE_4)\n \n def video_feed(self):\n vid = cv2.VideoCapture(0)\n # clo = time.time()\n # TODO: account for drowsy/sleepy detection and lag\n # make another counter for sleepy/drowsy and establish a threshold\n counter = 0\n while (True):\n _, frame = vid.read()\n self.detect_eyes(frame)\n\n self.drowsy(frame)\n\n cv2.imshow('frame', frame)\n\n if self.tag == 'close':\n # print('close', time.time() - clo)\n counter += 1\n # clo = time.time()\n \n elif self.tag == 'open':\n # print('open', time.time() - clo)\n counter = 0\n # clo = time.time()\n if counter > 3 and counter < 6:\n print('You are drowsy')\n # TODO: make alarm instead of printing a statement\n \n elif counter > 5:\n print('you are sleepy')\n # TODO: make alarm instead of printing a statement\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n vid.release()\n cv2.destroyAllWindows()\n\n" ]
[ [ "tensorflow.keras.models.load_model", "numpy.size", "tensorflow.reshape" ] ]
h521822/zvt
[ "c6bcc2b340406da55d920a411f59ab8d4cc7e76d" ]
[ "zvt/factors/pattern/pattern.py" ]
[ "# -*- coding: utf-8 -*-\nfrom enum import Enum\nfrom typing import List, Union, Optional\n\nimport pandas as pd\n\nfrom zvt.contract import EntityMixin\nfrom zvt.contract import IntervalLevel, AdjustType\nfrom zvt.contract.drawer import Rect\nfrom zvt.contract.factor import Transformer, Accumulator\nfrom zvt.domain import Stock\nfrom zvt.factors import TechnicalFactor\nfrom zvt.factors.algorithm import intersect\nfrom zvt.utils import pd_is_not_null\n\n\nclass Direction(Enum):\n up = 'up'\n down = 'down'\n\n def opposite(self):\n if self == Direction.up:\n return Direction.down\n if self == Direction.down:\n return Direction.up\n\n\nclass Fenxing(object):\n def __init__(self, state, kdata, index) -> None:\n super().__init__()\n self.state = state\n self.kdata = kdata\n self.index = index\n\n\nclass KState(Enum):\n # 顶分型\n bi_ding = 'bi_ding'\n # 底分型\n bi_di = 'bi_di'\n # 临时\n tmp_ding = 'tmp_ding'\n tmp_di = 'tmp_di'\n # 候选(candidate)\n can_ding = 'can_ding'\n can_di = 'can_di'\n\n\nclass DuanState(Enum):\n up = 'up'\n down = 'down'\n # Bardo,中阴阶段,不定,变化,易\n yi = 'yi'\n\n\ndef a_include_b(a: pd.Series, b: pd.Series) -> bool:\n \"\"\"\n kdata a includes kdata b\n\n :param a:\n :param b:\n :return:\n \"\"\"\n return (a['high'] >= b['high']) and (a['low'] <= b['low'])\n\n\ndef is_including(kdata1, kdata2):\n return a_include_b(kdata1, kdata2) or a_include_b(kdata2, kdata1)\n\n\ndef get_direction(kdata, pre_kdata, current=Direction.up) -> Direction:\n if is_up(kdata, pre_kdata):\n return Direction.up\n if is_down(kdata, pre_kdata):\n return Direction.down\n\n return current\n\n\ndef is_up(kdata, pre_kdata):\n return kdata['high'] > pre_kdata['high']\n\n\ndef is_down(kdata, pre_kdata):\n return kdata['low'] < pre_kdata['low']\n\n\ndef handle_first_fenxing(one_df, step=11):\n print(f\"gen first fenxing by step {step}\")\n df = one_df.iloc[:step]\n ding_kdata = df[df['high'].max() == df['high']]\n ding_index = ding_kdata.index[-1]\n\n di_kdata = df[df['low'].min() == df['low']]\n di_index = di_kdata.index[-1]\n\n # 确定第一个分型\n if abs(ding_index - di_index) >= 4:\n if ding_index > di_index:\n fenxing = 'bi_di'\n fenxing_index = di_index\n one_df.loc[di_index, 'bi_di'] = True\n # 确定第一个分型后,开始遍历的位置\n start_index = ding_index\n # 目前的笔的方向,up代表寻找 can_ding;down代表寻找can_di\n direction = Direction.up\n else:\n fenxing = 'bi_ding'\n fenxing_index = ding_index\n one_df.loc[ding_index, 'bi_ding'] = True\n start_index = di_index\n direction = Direction.down\n return Fenxing(state=fenxing, index=fenxing_index, kdata=one_df.loc[fenxing_index]), start_index, direction\n else:\n print(\"need add step\")\n return handle_first_fenxing(one_df, step=step + 1)\n\n\ndef handle_duan(fenxing_list: List[Fenxing], pre_duan_state='yi'):\n state = fenxing_list[0].state\n # 1笔区间\n bi1_start = fenxing_list[0].kdata\n bi1_end = fenxing_list[1].kdata\n # 3笔区间\n bi3_start = fenxing_list[2].kdata\n bi3_end = fenxing_list[3].kdata\n\n if state == 'bi_ding':\n # 向下段,下-上-下\n\n # 第一笔区间\n range1 = (bi1_end['low'], bi1_start['high'])\n # 第三笔区间\n range3 = (bi3_end['low'], bi3_start['high'])\n\n # 1,3有重叠,认为第一个段出现\n if intersect(range1, range3):\n return 'down'\n\n else:\n # 向上段,上-下-上\n\n # 第一笔区间\n range1 = (bi1_start['low'], bi1_end['high'])\n # 第三笔区间\n range3 = (bi3_start['low'], bi3_end['high'])\n\n # 1,3有重叠,认为第一个段出现\n if intersect(range1, range3):\n return 'up'\n\n return pre_duan_state\n\n\ndef handle_including(one_df, index, kdata, pre_index, pre_kdata, tmp_direction: Direction):\n # 改kdata\n if a_include_b(kdata, pre_kdata):\n # 长的kdata变短\n if tmp_direction == Direction.up:\n one_df.loc[index, 'low'] = pre_kdata['low']\n else:\n one_df.loc[index, 'high'] = pre_kdata['high']\n # 改pre_kdata\n elif a_include_b(pre_kdata, kdata):\n # 长的pre_kdata变短\n if tmp_direction == Direction.down:\n one_df.loc[pre_index, 'low'] = kdata['low']\n else:\n one_df.loc[pre_index, 'high'] = kdata['high']\n\n\nclass ZenTransformer(Transformer):\n \"\"\"\n 算法和概念\n <实体> 某种状态的k线\n [实体] 连续实体排列\n\n 两k线的关系有三种: 上涨,下跌,包含\n 上涨: k线高点比之前高,低点比之前高\n 下跌: k线低点比之前低,高点比之前低\n 包含: k线高点比之前高,低点比之前低;反方向,即被包含\n 处理包含关系,长的k线缩短,上涨时,低点取max(low1,low2);下跌时,高点取min(high1,high2)\n\n 第一个顶(底)分型: 出现连续4根下跌(上涨)k线\n 之后开始寻找 候选底(顶)分型,寻找的过程中有以下状态\n\n <临时顶>: 中间k线比两边的高点高,是一条特定的k线\n <临时底>: 中间k线比两边的高点高,是一条特定的k线\n\n <候选顶分型>: 连续的<临时顶>取最大\n <候选底分型>: 连续的<临时底>取最小\n 任何时刻只能有一个候选,其之前是一个确定的分型\n\n <上升k线>:\n <下降k线>:\n <连接k线>: 分型之间的k线都可以认为是连接k线,以上为演化过程的中间态\n distance(<候选顶分型>, <连接k线>)>=4 则 <候选顶分型> 变成顶分型\n distance(<候选底分型>, <连接k线>)>=4 则 <候选底分型> 变成底分型\n\n <顶分型><连接k线><候选底分型>\n <底分型><连接k线><候选顶分型>\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self.entity_duan_intervals = {}\n\n def transform_one(self, entity_id, df: pd.DataFrame) -> pd.DataFrame:\n # 记录段区间\n if entity_id not in self.entity_duan_intervals:\n self.entity_duan_intervals[entity_id] = []\n\n df = df.reset_index(drop=True)\n # 笔的底\n df['bi_di'] = False\n # 笔的顶\n df['bi_ding'] = False\n\n # 记录临时分型,不变\n df['tmp_ding'] = False\n df['tmp_di'] = False\n\n df['duan_state'] = 'yi'\n\n # 段的底\n df['duan_di'] = False\n # 段的顶\n df['duan_ding'] = False\n\n fenxing_list: List[Fenxing] = []\n\n # 取前11条k线,至多出现一个顶分型+底分型\n # 注:只是一种方便的确定第一个分型的办法,有了第一个分型,后面的处理就比较统一\n # start_index 为遍历开始的位置\n # direction为一个确定分型后的方向,即顶分型后为:down,底分型后为:up\n fenxing, start_index, direction = handle_first_fenxing(df, step=11)\n fenxing_list.append(fenxing)\n # 临时方向\n tmp_direction = direction\n # 候选分型(candidate)\n can_fenxing = None\n can_fenxing_index = None\n # 正向count\n count = 0\n # 反方向count\n opposite_count = 0\n # 目前段的方向\n current_duan_state = 'yi'\n\n pre_kdata = df.iloc[start_index - 1]\n pre_index = start_index - 1\n for index, kdata in df.iloc[start_index:].iterrows():\n # print(f'timestamp: {kdata.timestamp}')\n # 临时方向\n tmp_direction = get_direction(kdata, pre_kdata, current=tmp_direction)\n\n # 处理包含关系\n handle_including(one_df=df, index=index, kdata=kdata, pre_index=pre_index, pre_kdata=pre_kdata,\n tmp_direction=tmp_direction)\n\n # 根据方向,寻找对应的分型 和 段\n if direction == Direction.up:\n tmp_fenxing_col = 'tmp_ding'\n fenxing_col = 'bi_ding'\n else:\n tmp_fenxing_col = 'tmp_di'\n fenxing_col = 'bi_di'\n\n # 方向一致,延续中\n if tmp_direction == direction:\n opposite_count = 0\n # 反向,寻找反 分型\n else:\n opposite_count = opposite_count + 1\n # 第一次反向\n if opposite_count == 1:\n df.loc[pre_index, tmp_fenxing_col] = True\n\n if pd_is_not_null(can_fenxing):\n # 候选底分型\n if tmp_direction == Direction.up:\n # 取小的\n if pre_kdata['low'] <= can_fenxing['low']:\n can_fenxing = pre_kdata\n can_fenxing_index = pre_index\n\n # 候选顶分型\n else:\n # 取大的\n if pre_kdata['high'] >= can_fenxing['high']:\n can_fenxing = pre_kdata\n can_fenxing_index = pre_index\n else:\n can_fenxing = pre_kdata\n can_fenxing_index = pre_index\n\n # 分型确立\n if pd_is_not_null(can_fenxing):\n if opposite_count >= 4 or (index - can_fenxing_index >= 8):\n df.loc[can_fenxing_index, fenxing_col] = True\n opposite_count = 0\n direction = direction.opposite()\n can_fenxing = None\n\n # 确定第一个段\n if fenxing_list != None:\n fenxing_list.append(Fenxing(state=fenxing_col, kdata=df.loc[can_fenxing_index],\n index=can_fenxing_index))\n\n if len(fenxing_list) == 4:\n duan_state = handle_duan(fenxing_list=fenxing_list,\n pre_duan_state=current_duan_state)\n\n change = duan_state != current_duan_state\n\n if change:\n current_duan_state = duan_state\n\n # 确定状态\n df.loc[fenxing_list[0].index:fenxing_list[-1].index,\n 'duan_state'] = current_duan_state\n\n if current_duan_state == 'up':\n df.loc[fenxing_list[0].index, 'duan_di'] = True\n else:\n df.loc[fenxing_list[0].index, 'duan_ding'] = True\n # 只留最后一个\n fenxing_list = fenxing_list[-1:]\n else:\n # 保持之前的状态并踢出候选\n df.loc[fenxing_list[0].index, 'duan_state'] = current_duan_state\n fenxing_list = fenxing_list[1:]\n\n pre_kdata = kdata\n pre_index = index\n\n return df\n\n\nclass ZenFactor(TechnicalFactor):\n\n def __init__(self, entity_schema: EntityMixin = Stock, provider: str = None, entity_provider: str = None,\n entity_ids: List[str] = None, exchanges: List[str] = None, codes: List[str] = None,\n the_timestamp: Union[str, pd.Timestamp] = None, start_timestamp: Union[str, pd.Timestamp] = None,\n end_timestamp: Union[str, pd.Timestamp] = None, columns: List = None, filters: List = None,\n order: object = None, limit: int = None, level: Union[str, IntervalLevel] = IntervalLevel.LEVEL_1DAY,\n category_field: str = 'entity_id', time_field: str = 'timestamp', computing_window: int = None,\n keep_all_timestamp: bool = False, fill_method: str = 'ffill', effective_number: int = None,\n transformer: Transformer = ZenTransformer(), accumulator: Accumulator = None,\n need_persist: bool = False, dry_run: bool = False, adjust_type: Union[AdjustType, str] = None) -> None:\n self.fenxing_value_df = None\n self.duan_value_df = None\n self.zhongshu_rects = None\n\n super().__init__(entity_schema, provider, entity_provider, entity_ids, exchanges, codes, the_timestamp,\n start_timestamp, end_timestamp, columns, filters, order, limit, level, category_field,\n time_field, computing_window, keep_all_timestamp, fill_method, effective_number, transformer,\n accumulator, need_persist, dry_run, adjust_type)\n\n def do_compute(self):\n super().do_compute()\n one_df = self.factor_df\n\n # annotation_df format:\n # value flag color\n # entity_id timestamp\n\n # 处理分型\n bi_ding = one_df[one_df.bi_ding][['timestamp', 'high']]\n bi_di = one_df[one_df.bi_di][['timestamp', 'low']]\n\n df1 = bi_ding.rename(columns={\"high\": \"value\"})\n df1['flag'] = '顶分型'\n\n df2 = bi_di.rename(columns={\"low\": \"value\"})\n df2['flag'] = '底分型'\n\n flag_df: pd.DataFrame = pd.concat([df1, df2])\n flag_df = flag_df.sort_values(by=['timestamp'])\n flag_df['entity_id'] = self.entity_ids[0]\n flag_df = flag_df.set_index(['entity_id', 'timestamp'])\n\n # 处理段\n up = one_df[one_df.duan_di][['timestamp', 'low']]\n down = one_df[one_df.duan_ding][['timestamp', 'high']]\n df1 = up.rename(columns={\"low\": \"value\"})\n df2 = down.rename(columns={\"high\": \"value\"})\n\n duan_df: pd.DataFrame = pd.concat([df1, df2])\n duan_df = duan_df.sort_values(by=['timestamp'])\n duan_df['entity_id'] = self.entity_ids[0]\n duan_df = duan_df.set_index(['entity_id', 'timestamp'])\n\n # 处理中枢\n rects: List[Rect] = []\n\n # list of (timestamp,value)\n duans = []\n for index, item in duan_df.iterrows():\n duans.append((index[1], item.value))\n if len(duans) == 4:\n x1 = duans[0][0]\n x2 = duans[3][0]\n if duans[0][1] < duans[1][1]:\n # 向下段\n range = intersect((duans[0][1], duans[1][1]), (duans[2][1], duans[3][1]))\n if range:\n y1, y2 = range\n else:\n duans = duans[1:]\n continue\n else:\n # 向上段\n range = intersect((duans[1][1], duans[0][1]), (duans[3][1], duans[2][1]))\n if range:\n y1, y2 = range\n else:\n duans = duans[1:]\n continue\n\n rects.append(Rect(x0=x1, x1=x2, y0=y1, y1=y2))\n duans = duans[-1:]\n\n self.fenxing_value_df = flag_df\n self.duan_value_df = duan_df\n self.zhongshu_rects = rects\n\n def drawer_factor_df_list(self) -> Optional[List[pd.DataFrame]]:\n return [self.fenxing_value_df[['value']], self.duan_value_df]\n\n def drawer_annotation_df(self) -> Optional[pd.DataFrame]:\n # return self.fenxing_value_df\n return None\n\n def drawer_rects(self) -> List[Rect]:\n return self.zhongshu_rects\n\n\nif __name__ == '__main__':\n zen = ZenFactor(entity_ids=['stock_sz_000338'], level='1wk')\n zen.draw(show=True)\n\n# the __all__ is generated\n__all__ = ['Direction', 'Fenxing', 'KState', 'DuanState', 'a_include_b', 'is_including', 'get_direction', 'is_up',\n 'is_down', 'handle_first_fenxing', 'handle_duan', 'handle_including', 'ZenTransformer', 'ZenFactor']\n" ]
[ [ "pandas.concat" ] ]
Elgyii/earthengine-api
[ "8650c1f58f3abc502ea5296d1f628b69bc295243" ]
[ "python/ee/cli/commands.py" ]
[ "#!/usr/bin/env python\n# Lint as: python2, python3\n\"\"\"Commands supported by the Earth Engine command line interface.\n\nEach command is implemented by extending the Command class. Each class\ndefines the supported positional and optional arguments, as well as\nthe actions to be taken when the command is executed.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# pylint: disable=g-bad-import-order\nfrom six.moves import range\nimport argparse\nimport calendar\nfrom collections import Counter\nimport datetime\nimport json\nimport os\nimport re\nimport six\nimport shutil\nimport sys\nimport tempfile\n\n# Prevent TensorFlow from logging anything at the native level.\n# pylint: disable=g-import-not-at-top\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nTENSORFLOW_INSTALLED = False\n# pylint: disable=g-import-not-at-top\ntry:\n import tensorflow.compat.v1 as tf\n from tensorflow.compat.v1.saved_model import utils as saved_model_utils\n from tensorflow.compat.v1.saved_model import signature_constants\n from tensorflow.compat.v1.saved_model import signature_def_utils\n tf.disable_v2_behavior()\n # Prevent TensorFlow from logging anything at the python level.\n tf.logging.set_verbosity(tf.logging.ERROR)\n TENSORFLOW_INSTALLED = True\nexcept ImportError:\n pass\n\nTENSORFLOW_ADDONS_INSTALLED = False\n# pylint: disable=g-import-not-at-top\nif TENSORFLOW_INSTALLED:\n try:\n if sys.version_info[0] >= 3:\n # This import is enough to register TFA ops though isn't directly used\n # (for now).\n # pylint: disable=unused-import\n import tensorflow_addons as tfa\n tfa.register_all(custom_kernels=False)\n TENSORFLOW_ADDONS_INSTALLED = True\n except ImportError:\n pass\n except AttributeError:\n # This can be thrown by \"tfa.register_all()\" which means the\n # tensorflow_addons version is registering ops the old way, i.e.\n # automatically at import time. If this is the case, we've actually\n # successfully registered TFA.\n TENSORFLOW_ADDONS_INSTALLED = True\n\n# pylint: disable=g-import-not-at-top\nimport ee\nfrom ee.cli import utils\n\n# Constants used in ACLs.\nALL_USERS = 'allUsers'\nALL_USERS_CAN_READ = 'all_users_can_read'\nREADERS = 'readers'\nWRITERS = 'writers'\n\n# Constants used in setting metadata properties.\nTYPE_DATE = 'date'\nTYPE_NUMBER = 'number'\nTYPE_STRING = 'string'\nSYSTEM_TIME_START = 'system:time_start'\nSYSTEM_TIME_END = 'system:time_end'\n\n# A regex that parses properties of the form \"[(type)]name=value\". The\n# second, third, and fourth group are type, name, and number, respectively.\nPROPERTY_RE = re.compile(r'(\\(([^\\)]*)\\))?([^=]+)=(.*)')\n\n# Translate internal task type identifiers to user-friendly strings that\n# are consistent with the language in the API and docs.\nTASK_TYPES = {\n 'EXPORT_FEATURES': 'Export.table',\n 'EXPORT_IMAGE': 'Export.image',\n 'EXPORT_TILES': 'Export.map',\n 'EXPORT_VIDEO': 'Export.video',\n 'INGEST': 'Upload',\n 'INGEST_IMAGE': 'Upload',\n 'INGEST_TABLE': 'Upload',\n}\n\nTF_RECORD_EXTENSIONS = ['.tfrecord', 'tfrecord.gz']\n\n# Maximum size of objects in a SavedModel directory that we're willing to\n# download from GCS.\nSAVED_MODEL_MAX_SIZE = 400 * 1024 * 1024\n\n# Default path to SavedModel variables.\nDEFAULT_VARIABLES_PREFIX = '/variables/variables'\n\n\ndef _add_wait_arg(parser):\n parser.add_argument(\n '--wait', '-w', nargs='?', default=-1, type=int, const=sys.maxsize,\n help=('Wait for the task to finish,'\n ' or timeout after the specified number of seconds.'\n ' Without this flag, the command just starts an export'\n ' task in the background, and returns immediately.'))\n\n\ndef _add_overwrite_arg(parser):\n parser.add_argument(\n '--force', '-f', action='store_true',\n help='Overwrite any existing version of the asset.')\n\n\ndef _upload(args, request, ingestion_function):\n if 0 <= args.wait < 10:\n raise ee.EEException('Wait time should be at least 10 seconds.')\n request_id = ee.data.newTaskId()[0]\n task_id = ingestion_function(request_id, request, args.force)['id']\n print('Started upload task with ID: %s' % task_id)\n if args.wait >= 0:\n print('Waiting for the upload task to complete...')\n utils.wait_for_task(task_id, args.wait)\n\n\n# Argument types\ndef _comma_separated_strings(string):\n \"\"\"Parses an input consisting of comma-separated strings.\"\"\"\n error_msg = 'Argument should be a comma-separated list of strings: {}'\n values = string.split(',')\n if not values:\n raise argparse.ArgumentTypeError(error_msg.format(string))\n return values\n\n\ndef _comma_separated_numbers(string):\n \"\"\"Parses an input consisting of comma-separated numbers.\"\"\"\n error_msg = 'Argument should be a comma-separated list of numbers: {}'\n values = string.split(',')\n if not values:\n raise argparse.ArgumentTypeError(error_msg.format(string))\n numbervalues = []\n for value in values:\n try:\n numbervalues.append(int(value))\n except ValueError:\n try:\n numbervalues.append(float(value))\n except ValueError:\n raise argparse.ArgumentTypeError(error_msg.format(string))\n return numbervalues\n\n\ndef _comma_separated_pyramiding_policies(string):\n \"\"\"Parses an input consisting of comma-separated pyramiding policies.\"\"\"\n error_msg = ('Argument should be a comma-separated list of: '\n '{{\"mean\", \"sample\", \"min\", \"max\", \"mode\"}}: {}')\n values = string.split(',')\n if not values:\n raise argparse.ArgumentTypeError(error_msg.format(string))\n redvalues = []\n for value in values:\n value = value.upper()\n if value not in {'MEAN', 'SAMPLE', 'MIN', 'MAX', 'MODE'}:\n raise argparse.ArgumentTypeError(error_msg.format(string))\n redvalues.append(value)\n return redvalues\n\n\ndef _decode_number(string):\n \"\"\"Decodes a number from a command line argument.\"\"\"\n try:\n return float(string)\n except ValueError:\n raise argparse.ArgumentTypeError(\n 'Invalid value for property of type \"number\": \"%s\".' % string)\n\n\ndef _timestamp_ms_for_datetime(datetime_obj):\n \"\"\"Returns time since the epoch in ms for the given UTC datetime object.\"\"\"\n return (\n int(calendar.timegm(datetime_obj.timetuple()) * 1000) +\n datetime_obj.microsecond / 1000)\n\n\ndef _cloud_timestamp_for_timestamp_ms(timestamp_ms):\n \"\"\"Returns a Cloud-formatted date for the given millisecond timestamp.\"\"\"\n # Desired format is like '2003-09-07T19:30:12.345Z'\n return datetime.datetime.utcfromtimestamp(\n timestamp_ms / 1000.0).isoformat() + 'Z'\n\n\ndef _parse_millis(millis):\n return datetime.datetime.fromtimestamp(millis / 1000)\n\n\ndef _decode_date(string):\n \"\"\"Decodes a date from a command line argument, returning msec since epoch\".\n\n Args:\n string: See AssetSetCommand class comment for the allowable\n date formats.\n\n Returns:\n long, ms since epoch, or '' if the input is empty.\n\n Raises:\n argparse.ArgumentTypeError: if string does not conform to a legal\n date format.\n \"\"\"\n if not string:\n return ''\n\n try:\n return int(string)\n except ValueError:\n date_formats = ['%Y-%m-%d',\n '%Y-%m-%dT%H:%M:%S',\n '%Y-%m-%dT%H:%M:%S.%f']\n for date_format in date_formats:\n try:\n dt = datetime.datetime.strptime(string, date_format)\n return _timestamp_ms_for_datetime(dt)\n except ValueError:\n continue\n raise argparse.ArgumentTypeError(\n 'Invalid value for property of type \"date\": \"%s\".' % string)\n\n\ndef _decode_property(string):\n \"\"\"Decodes a general key-value property from a command-line argument.\n\n Args:\n string: The string must have the form name=value or (type)name=value, where\n type is one of 'number', 'string', or 'date'. The value format for dates\n is YYYY-MM-DD[THH:MM:SS[.MS]]. The value 'null' is special: it evaluates\n to None unless it is cast to a string of 'null'.\n\n Returns:\n a tuple representing the property in the format (name, value)\n\n Raises:\n argparse.ArgumentTypeError: if the flag value could not be decoded or if\n the type is not recognized\n \"\"\"\n\n m = PROPERTY_RE.match(string)\n if not m:\n raise argparse.ArgumentTypeError(\n 'Invalid property: \"%s\". Must have the form \"name=value\" or '\n '\"(type)name=value\".', string)\n _, type_str, name, value_str = m.groups()\n if value_str == 'null' and type_str != TYPE_STRING:\n return (name, None)\n if type_str is None:\n # Guess numeric types automatically.\n try:\n value = _decode_number(value_str)\n except argparse.ArgumentTypeError:\n value = value_str\n elif type_str == TYPE_DATE:\n value = _decode_date(value_str)\n elif type_str == TYPE_NUMBER:\n value = _decode_number(value_str)\n elif type_str == TYPE_STRING:\n value = value_str\n else:\n raise argparse.ArgumentTypeError(\n 'Unrecognized property type name: \"%s\". Expected one of \"string\", '\n '\"number\", or \"date\".' % type_str)\n return (name, value)\n\n\ndef _add_property_flags(parser):\n \"\"\"Adds command line flags related to metadata properties to a parser.\"\"\"\n parser.add_argument(\n '--property', '-p',\n help='A property to set, in the form [(type)]name=value. If no type '\n 'is specified the type will be \"number\" if the value is numeric and '\n '\"string\" otherwise. May be provided multiple times.',\n action='append',\n type=_decode_property)\n parser.add_argument(\n '--time_start', '-ts',\n help='Sets the start time property to a number or date.',\n type=_decode_date)\n parser.add_argument(\n '--time_end', '-te',\n help='Sets the end time property to a number or date.',\n type=_decode_date)\n\n\ndef _decode_property_flags(args):\n \"\"\"Decodes metadata properties from args as a name->value dict.\"\"\"\n property_list = list(args.property or [])\n names = [name for name, _ in property_list]\n duplicates = [name for name, count in Counter(names).items() if count > 1]\n if duplicates:\n raise ee.EEException('Duplicate property name(s): %s.' % duplicates)\n return dict(property_list)\n\n\ndef _decode_timestamp_flags(args):\n \"\"\"Decodes timestamp properties from args as a name->value dict.\"\"\"\n result = {}\n if args.time_start is not None:\n result[SYSTEM_TIME_START] = args.time_start\n if args.time_end is not None:\n result[SYSTEM_TIME_END] = args.time_end\n return result\n\n\ndef _check_valid_files(filenames):\n \"\"\"Returns true if the given filenames are valid upload file URIs.\"\"\"\n for filename in filenames:\n if not filename.startswith('gs://'):\n raise ee.EEException('Invalid Cloud Storage URL: ' + filename)\n\n\ndef _pretty_print_json(json_obj):\n \"\"\"Pretty-prints a JSON object to stdandard output.\"\"\"\n print(json.dumps(json_obj, sort_keys=True, indent=2, separators=(',', ': ')))\n\n\nclass Dispatcher(object):\n \"\"\"Dispatches to a set of commands implemented as command classes.\"\"\"\n\n def __init__(self, parser):\n self.command_dict = {}\n self.dest = self.name + '_cmd'\n subparsers = parser.add_subparsers(title='Commands', dest=self.dest)\n subparsers.required = True # Needed for proper missing arg handling in 3.x\n for command in self.COMMANDS:\n command_help = None\n if command.__doc__ and command.__doc__.splitlines():\n command_help = command.__doc__.splitlines()[0]\n subparser = subparsers.add_parser(\n command.name,\n description=command.__doc__,\n help=command_help)\n self.command_dict[command.name] = command(subparser)\n\n def run(self, args, config):\n self.command_dict[vars(args)[self.dest]].run(args, config)\n\n\nclass AuthenticateCommand(object):\n \"\"\"Prompts the user to authorize access to Earth Engine via OAuth2.\n\n Note that running this command in the default interactive mode within\n JupyterLab with a bash magic command (i.e. \"!earthengine authenticate\") is\n problematic (see https://github.com/ipython/ipython/issues/10499). To avoid\n this issue, use the non-interactive mode\n (i.e. \"!earthengine authenticate --quiet\").\n \"\"\"\n\n name = 'authenticate'\n\n def __init__(self, parser):\n parser.add_argument(\n '--authorization-code',\n help='Use this specified authorization code.')\n parser.add_argument(\n '--quiet',\n action='store_true',\n help='Do not issue any interactive prompts.')\n parser.add_argument(\n '--code-verifier',\n help='PKCE verifier to prevent auth code stealing.')\n\n def run(self, args, unused_config):\n \"\"\"Prompts for an auth code, requests a token and saves it.\"\"\"\n\n # Filter for arguments relevant for ee.Authenticate()\n args_auth = {x: vars(args)[x] for x in (\n 'authorization_code', 'quiet', 'code_verifier')}\n ee.Authenticate(**args_auth)\n\n\nclass SetProjectCommand(object):\n \"\"\"Sets the default user project to be used for all API calls.\"\"\"\n\n name = 'set_project'\n\n def __init__(self, parser):\n parser.add_argument('project', help='project id or number to use.')\n\n def run(self, args, config):\n \"\"\"Saves the project to the config file.\"\"\"\n\n config_path = config.config_file\n with open(config_path) as config_file_json:\n config = json.load(config_file_json)\n\n config['project'] = args.project\n json.dump(config, open(config_path, 'w'))\n print('Successfully saved project id')\n\n\nclass UnSetProjectCommand(object):\n \"\"\"UnSets the default user project to be used for all API calls.\"\"\"\n\n name = 'unset_project'\n\n def __init__(self, unused_parser):\n pass\n\n def run(self, unused_args, config):\n \"\"\"Saves the project to the config file.\"\"\"\n\n config_path = config.config_file\n with open(config_path) as config_file_json:\n config = json.load(config_file_json)\n\n if 'project' in config:\n del config['project']\n json.dump(config, open(config_path, 'w'))\n print('Successfully unset project id')\n\n\nclass AclChCommand(object):\n \"\"\"Changes the access control list for an asset.\n\n Each change specifies the email address of a user or group and,\n for additions, one of R or W corresponding to the read or write\n permissions to be granted, as in \"[email protected]:R\". Use the\n special name \"allUsers\" to change whether all users can read the\n asset.\n \"\"\"\n\n name = 'ch'\n\n def __init__(self, parser):\n parser.add_argument('-u', action='append', metavar='user permission',\n help='Add or modify a user\\'s permission.')\n parser.add_argument('-d', action='append', metavar='remove user',\n help='Remove all permissions for a user.')\n parser.add_argument('-g', action='append', metavar='group permission',\n help='Add or modify a group\\'s permission.')\n parser.add_argument('-dg', action='append', metavar='remove group',\n help='Remove all permissions for a user.')\n parser.add_argument('asset_id', help='ID of the asset.')\n self._cloud_api_enabled = False\n\n def run(self, args, config):\n \"\"\"Performs an ACL update.\"\"\"\n config.ee_init()\n self._cloud_api_enabled = config.use_cloud_api\n permissions = self._parse_permissions(args)\n acl = ee.data.getAssetAcl(args.asset_id)\n self._apply_permissions(acl, permissions)\n if not config.use_cloud_api:\n # The original permissions will contain an 'owners' stanza, but the\n # non-Cloud EE API does not allow setting the owner ACL so we have to\n # remove it even though it has not changed.\n del acl['owners']\n ee.data.setAssetAcl(args.asset_id, json.dumps(acl))\n\n def _set_permission(self, permissions, grant, prefix):\n \"\"\"Sets the permission for a given user/group.\"\"\"\n parts = grant.rsplit(':', 1)\n if len(parts) != 2 or parts[1] not in ['R', 'W']:\n raise ee.EEException('Invalid permission \"%s\".' % grant)\n user, role = parts\n prefixed_user = user\n if self._cloud_api_enabled and not self._is_all_users(user):\n prefixed_user = prefix + user\n if prefixed_user in permissions:\n raise ee.EEException('Multiple permission settings for \"%s\".' % user)\n if self._is_all_users(user) and role == 'W':\n raise ee.EEException('Cannot grant write permissions to all users.')\n permissions[prefixed_user] = role\n\n def _remove_permission(self, permissions, user, prefix):\n \"\"\"Removes permissions for a given user/group.\"\"\"\n prefixed_user = user\n if self._cloud_api_enabled and not self._is_all_users(user):\n prefixed_user = prefix + user\n if prefixed_user in permissions:\n raise ee.EEException('Multiple permission settings for \"%s\".' % user)\n permissions[prefixed_user] = 'D'\n\n def _user_account_type(self, user):\n \"\"\"Returns the appropriate account type for a user email.\"\"\"\n\n # Here 'user' ends with ':R', ':W', or ':D', so we extract\n # just the username.\n if user.split(':')[0].endswith('.gserviceaccount.com'):\n return 'serviceAccount:'\n else:\n return 'user:'\n\n def _parse_permissions(self, args):\n \"\"\"Decodes and sanity-checks the permissions in the arguments.\"\"\"\n # A dictionary mapping from user ids to one of 'R', 'W', or 'D'.\n permissions = {}\n if args.u:\n for user in args.u:\n self._set_permission(permissions, user, self._user_account_type(user))\n if args.d:\n for user in args.d:\n self._remove_permission(\n permissions, user, self._user_account_type(user))\n if args.g:\n for group in args.g:\n self._set_permission(permissions, group, 'group:')\n if args.dg:\n for group in args.dg:\n self._remove_permission(permissions, group, 'group:')\n return permissions\n\n def _apply_permissions(self, acl, permissions):\n \"\"\"Applies the given permission edits to the given acl.\"\"\"\n for user, role in six.iteritems(permissions):\n if self._is_all_users(user):\n acl[ALL_USERS_CAN_READ] = (role == 'R')\n elif role == 'R':\n if user not in acl[READERS]:\n acl[READERS].append(user)\n if user in acl[WRITERS]:\n acl[WRITERS].remove(user)\n elif role == 'W':\n if user in acl[READERS]:\n acl[READERS].remove(user)\n if user not in acl[WRITERS]:\n acl[WRITERS].append(user)\n elif role == 'D':\n if user in acl[READERS]:\n acl[READERS].remove(user)\n if user in acl[WRITERS]:\n acl[WRITERS].remove(user)\n\n def _is_all_users(self, user):\n \"\"\"Determines if a user name represents the special \"all users\" entity.\"\"\"\n # We previously used \"AllUsers\" as the magic string to denote that we wanted\n # to apply some permission to everyone. However, Google Cloud convention for\n # this concept is \"allUsers\". Because some people might be using one and\n # some the other, we do a case-insentive comparison.\n return user.lower() == ALL_USERS.lower()\n\n\nclass AclGetCommand(object):\n \"\"\"Prints the access control list for an asset.\"\"\"\n\n name = 'get'\n\n def __init__(self, parser):\n parser.add_argument('asset_id', help='ID of the asset.')\n\n def run(self, args, config):\n config.ee_init()\n acl = ee.data.getAssetAcl(args.asset_id)\n _pretty_print_json(acl)\n\n\nclass AclSetCommand(object):\n \"\"\"Sets the access control list for an asset.\n\n The ACL may be the name of a canned ACL, or it may be the path to a\n file containing the output from \"acl get\". The recognized canned ACL\n names are \"private\", indicating that no users other than the owner\n have access, and \"public\", indicating that all users have read\n access. It is currently not possible to modify the owner ACL using\n this tool.\n \"\"\"\n\n name = 'set'\n\n CANNED_ACLS = {\n 'private': {\n READERS: [],\n WRITERS: [],\n ALL_USERS_CAN_READ: False,\n },\n 'public': {\n READERS: [],\n WRITERS: [],\n ALL_USERS_CAN_READ: True,\n },\n }\n\n def __init__(self, parser):\n parser.add_argument('file_or_acl_name',\n help='File path or canned ACL name.')\n parser.add_argument('asset_id', help='ID of the asset.')\n\n def run(self, args, config):\n \"\"\"Sets asset ACL to a canned ACL or one provided in a JSON file.\"\"\"\n config.ee_init()\n if args.file_or_acl_name in list(self.CANNED_ACLS.keys()):\n acl = self.CANNED_ACLS[args.file_or_acl_name]\n else:\n acl = json.load(open(args.file_or_acl_name))\n # In the expected usage the ACL file will have come from a previous\n # invocation of 'acl get', which means it will include an 'owners' stanza,\n # but the non-Cloud EE API does not allow setting the owner ACL, so we\n # have to remove it.\n if 'owners' in acl and not config.use_cloud_api:\n print('Warning: Not updating the owner ACL.')\n del acl['owners']\n ee.data.setAssetAcl(args.asset_id, json.dumps(acl))\n\n\nclass AclCommand(Dispatcher):\n \"\"\"Prints or updates the access control list of the specified asset.\"\"\"\n\n name = 'acl'\n\n COMMANDS = [\n AclChCommand,\n AclGetCommand,\n AclSetCommand,\n ]\n\n\nclass AssetInfoCommand(object):\n \"\"\"Prints metadata and other information about an Earth Engine asset.\"\"\"\n\n name = 'info'\n\n def __init__(self, parser):\n parser.add_argument('asset_id', help='ID of the asset to print.')\n\n def run(self, args, config):\n config.ee_init()\n info = ee.data.getInfo(args.asset_id)\n if info:\n _pretty_print_json(info)\n else:\n raise ee.EEException(\n 'Asset does not exist or is not accessible: %s' % args.asset_id)\n\n\nclass AssetSetCommand(object):\n \"\"\"Sets metadata properties of an Earth Engine asset.\n\n Properties may be of type \"string\", \"number\", or \"date\". Dates must\n be specified in the form YYYY-MM-DD[Thh:mm:ss[.ff]] in UTC and are\n stored as numbers representing the number of milliseconds since the\n Unix epoch (00:00:00 UTC on 1 January 1970).\n\n To delete a property, set it to null without a type:\n prop=null.\n To set a property to the string value 'null', use the assignment\n (string)prop4=null.\n \"\"\"\n\n name = 'set'\n\n def __init__(self, parser):\n parser.add_argument('asset_id', help='ID of the asset to update.')\n _add_property_flags(parser)\n\n def run(self, args, config):\n \"\"\"Runs the asset update.\"\"\"\n config.ee_init()\n properties = _decode_property_flags(args)\n if not properties and args.time_start is None and args.time_end is None:\n raise ee.EEException('No properties specified.')\n if config.use_cloud_api:\n update_mask = [\n 'properties.' + property_name for property_name in properties\n ]\n asset = {}\n if properties:\n asset['properties'] = {\n k: v for k, v in six.iteritems(properties) if v is not None\n }\n # args.time_start and .time_end could have any of three falsy values, with\n # different meanings:\n # None: the --time_start flag was not provided at all\n # '': the --time_start flag was explicitly set to the empty string\n # 0: the --time_start flag was explicitly set to midnight 1 Jan 1970.\n # pylint:disable=g-explicit-bool-comparison\n if args.time_start is not None:\n update_mask.append('start_time')\n if args.time_start != '':\n asset['start_time'] = _cloud_timestamp_for_timestamp_ms(\n args.time_start)\n if args.time_end is not None:\n update_mask.append('end_time')\n if args.time_end != '':\n asset['end_time'] = _cloud_timestamp_for_timestamp_ms(args.time_end)\n # pylint:enable=g-explicit-bool-comparison\n ee.data.updateAsset(args.asset_id, asset, update_mask)\n return\n properties.update(_decode_timestamp_flags(args))\n ee.data.setAssetProperties(args.asset_id, properties)\n\n\nclass AssetCommand(Dispatcher):\n \"\"\"Prints or updates metadata associated with an Earth Engine asset.\"\"\"\n\n name = 'asset'\n\n COMMANDS = [\n AssetInfoCommand,\n AssetSetCommand,\n ]\n\n\n\n\nclass CopyCommand(object):\n \"\"\"Creates a new Earth Engine asset as a copy of another asset.\"\"\"\n\n name = 'cp'\n\n def __init__(self, parser):\n parser.add_argument(\n 'source', help='Full path of the source asset.')\n parser.add_argument(\n 'destination', help='Full path of the destination asset.')\n _add_overwrite_arg(parser)\n\n def run(self, args, config):\n \"\"\"Runs the asset copy.\"\"\"\n config.ee_init()\n ee.data.copyAsset(\n args.source,\n args.destination,\n args.force\n )\n\n\nclass CreateCommandBase(object):\n \"\"\"Base class for implementing Create subcommands.\"\"\"\n\n def __init__(self, parser, fragment, asset_type):\n parser.add_argument(\n 'asset_id', nargs='+',\n help='Full path of %s to create.' % fragment)\n parser.add_argument(\n '--parents', '-p', action='store_true',\n help='Make parent folders as needed.')\n self.asset_type = asset_type\n\n def run(self, args, config):\n config.ee_init()\n ee.data.create_assets(args.asset_id, self.asset_type, args.parents)\n\n\nclass CreateCollectionCommand(CreateCommandBase):\n \"\"\"Creates one or more image collections.\"\"\"\n\n name = 'collection'\n\n def __init__(self, parser):\n super(CreateCollectionCommand, self).__init__(\n parser, 'an image collection', ee.data.ASSET_TYPE_IMAGE_COLL)\n\n\nclass CreateFolderCommand(CreateCommandBase):\n \"\"\"Creates one or more folders.\"\"\"\n\n name = 'folder'\n\n def __init__(self, parser):\n super(CreateFolderCommand, self).__init__(\n parser, 'a folder', ee.data.ASSET_TYPE_FOLDER)\n\n\nclass CreateCommand(Dispatcher):\n \"\"\"Creates assets and folders.\"\"\"\n\n name = 'create'\n\n COMMANDS = [\n CreateCollectionCommand,\n CreateFolderCommand,\n ]\n\n\n\n\nclass ListCommand(object):\n \"\"\"Prints the contents of a folder or collection.\"\"\"\n\n name = 'ls'\n\n def __init__(self, parser):\n parser.add_argument(\n 'asset_id', nargs='*',\n help='A folder or image collection to be inspected.')\n parser.add_argument(\n '--long_format',\n '-l',\n action='store_true',\n help='Print output in long format.')\n parser.add_argument(\n '--max_items', '-m', default=-1, type=int,\n help='Maximum number of items to list for each collection.')\n parser.add_argument(\n '--recursive',\n '-r',\n action='store_true',\n help='List folders recursively.')\n parser.add_argument(\n '--filter', '-f', default='', type=str,\n help='Filter string to pass to ee.ImageCollection.filter().')\n\n def run(self, args, config):\n config.ee_init()\n if not args.asset_id:\n roots = ee.data.getAssetRoots()\n self._print_assets(roots, args.max_items, '', args.long_format,\n args.recursive)\n return\n assets = args.asset_id\n count = 0\n for asset in assets:\n if count > 0:\n print()\n self._list_asset_content(\n asset, args.max_items, len(assets), args.long_format,\n args.recursive, args.filter)\n count += 1\n\n def _print_assets(self, assets, max_items, indent, long_format, recursive):\n \"\"\"Prints the listing of given assets.\"\"\"\n if not assets:\n return\n\n max_type_length = max([len(asset['type']) for asset in assets])\n\n if recursive:\n # fallback to max to include the string 'ImageCollection'\n max_type_length = ee.data.MAX_TYPE_LENGTH\n\n format_str = '%s{:%ds}{:s}' % (indent, max_type_length + 4)\n for asset in assets:\n if long_format:\n # Example output:\n # [Image] user/test/my_img\n # [ImageCollection] user/test/my_coll\n print(format_str.format('['+asset['type']+']', asset['id']))\n\n else:\n print(asset['id'])\n\n if recursive and asset['type'] in (ee.data.ASSET_TYPE_FOLDER,\n ee.data.ASSET_TYPE_FOLDER_CLOUD):\n list_req = {'id': asset['id']}\n children = ee.data.getList(list_req)\n self._print_assets(children, max_items, indent, long_format, recursive)\n\n def _list_asset_content(self, asset, max_items, total_assets, long_format,\n recursive, filter_string):\n try:\n list_req = {'id': asset}\n if max_items >= 0:\n list_req['num'] = max_items\n if filter_string:\n list_req['filter'] = filter_string\n children = ee.data.getList(list_req)\n indent = ''\n if total_assets > 1:\n print('%s:' % asset)\n indent = ' '\n self._print_assets(children, max_items, indent, long_format, recursive)\n except ee.EEException as e:\n print(e)\n\n\nclass SizeCommand(object):\n \"\"\"Prints the size and names of all items in a given folder or collection.\"\"\"\n\n name = 'du'\n\n def __init__(self, parser):\n parser.add_argument(\n 'asset_id',\n nargs='*',\n help='A folder or image collection to be inspected.')\n parser.add_argument(\n '--summarize', '-s', action='store_true',\n help='Display only a total.')\n\n def run(self, args, config):\n \"\"\"Runs the du command.\"\"\"\n config.ee_init()\n\n # Select all available asset roots if no asset ids are given.\n if not args.asset_id:\n assets = ee.data.getAssetRoots()\n else:\n assets = [ee.data.getInfo(asset) for asset in args.asset_id]\n\n # If args.summarize is True, list size+name for every leaf child asset,\n # and show totals for non-leaf children.\n # If args.summarize is False, print sizes of all children.\n for asset in assets:\n is_parent = asset['type'] in (\n ee.data.ASSET_TYPE_FOLDER,\n ee.data.ASSET_TYPE_IMAGE_COLL,\n ee.data.ASSET_TYPE_FOLDER_CLOUD,\n ee.data.ASSET_TYPE_IMAGE_COLL_CLOUD,\n )\n if not is_parent or args.summarize:\n self._print_size(asset)\n else:\n children = ee.data.getList({'id': asset['id']})\n if not children:\n # A leaf asset\n children = [asset]\n for child in children:\n self._print_size(child)\n\n def _print_size(self, asset):\n size = self._get_size(asset)\n print('{:>16d} {}'.format(size, asset['id']))\n\n def _get_size(self, asset):\n \"\"\"Returns the size of the given asset in bytes.\"\"\"\n size_parsers = {\n 'Image': self._get_size_asset,\n 'Folder': self._get_size_folder,\n 'ImageCollection': self._get_size_image_collection,\n 'Table': self._get_size_asset,\n 'IMAGE': self._get_size_asset,\n 'FOLDER': self._get_size_folder,\n 'IMAGE_COLLECTION': self._get_size_image_collection,\n 'TABLE': self._get_size_asset,\n }\n\n if asset['type'] not in size_parsers:\n raise ee.EEException(\n 'Cannot get size for asset type \"%s\"' % asset['type'])\n\n return size_parsers[asset['type']](asset)\n\n def _get_size_asset(self, asset):\n info = ee.data.getInfo(asset['id'])\n\n if 'sizeBytes' in info:\n return int(info['sizeBytes'])\n return info['properties']['system:asset_size']\n\n def _get_size_folder(self, asset):\n children = ee.data.getList({'id': asset['id']})\n sizes = [self._get_size(child) for child in children]\n\n return sum(sizes)\n\n def _get_size_image_collection(self, asset):\n images = ee.ImageCollection(asset['id'])\n sizes = images.aggregate_array('system:asset_size')\n\n return sum(sizes.getInfo())\n\n\nclass MoveCommand(object):\n \"\"\"Moves or renames an Earth Engine asset.\"\"\"\n\n name = 'mv'\n\n def __init__(self, parser):\n parser.add_argument(\n 'source', help='Full path of the source asset.')\n parser.add_argument(\n 'destination', help='Full path of the destination asset.')\n\n def run(self, args, config):\n config.ee_init()\n ee.data.renameAsset(args.source, args.destination)\n\n\nclass RmCommand(object):\n \"\"\"Deletes the specified assets.\"\"\"\n\n name = 'rm'\n\n def __init__(self, parser):\n parser.add_argument(\n 'asset_id', nargs='+', help='Full path of an asset to delete.')\n parser.add_argument(\n '--recursive', '-r', action='store_true',\n help='Recursively delete child assets.')\n parser.add_argument(\n '--dry_run', action='store_true',\n help=('Perform a dry run of the delete operation. Does not '\n 'delete any assets.'))\n parser.add_argument(\n '--verbose', '-v', action='store_true',\n help='Print the progress of the operation to the console.')\n\n def run(self, args, config):\n config.ee_init()\n for asset in args.asset_id:\n self._delete_asset(asset, args.recursive, args.verbose, args.dry_run)\n\n def _delete_asset(self, asset_id, recursive, verbose, dry_run):\n \"\"\"Attempts to delete the specified asset or asset collection.\"\"\"\n info = ee.data.getInfo(asset_id)\n if info is None:\n print('Asset does not exist or is not accessible: %s' % asset_id)\n return\n if recursive:\n if info['type'] in (ee.data.ASSET_TYPE_FOLDER,\n ee.data.ASSET_TYPE_IMAGE_COLL,\n ee.data.ASSET_TYPE_FOLDER_CLOUD,\n ee.data.ASSET_TYPE_IMAGE_COLL_CLOUD):\n children = ee.data.getList({'id': asset_id})\n for child in children:\n self._delete_asset(child['id'], True, verbose, dry_run)\n if dry_run:\n print('[dry-run] Deleting asset: %s' % asset_id)\n else:\n if verbose:\n print('Deleting asset: %s' % asset_id)\n try:\n ee.data.deleteAsset(asset_id)\n except ee.EEException as e:\n print('Failed to delete %s. %s' % (asset_id, e))\n\n\nclass TaskCancelCommand(object):\n \"\"\"Cancels a running task.\"\"\"\n\n name = 'cancel'\n\n def __init__(self, parser):\n parser.add_argument(\n 'task_ids', nargs='+',\n help='IDs of one or more tasks to cancel,'\n ' or `all` to cancel all tasks.')\n\n def run(self, args, config):\n config.ee_init()\n cancel_all = args.task_ids == ['all']\n if cancel_all:\n statuses = ee.data.getTaskList()\n else:\n statuses = ee.data.getTaskStatus(args.task_ids)\n for status in statuses:\n state = status['state']\n task_id = status['id']\n if state == 'UNKNOWN':\n raise ee.EEException('Unknown task id \"%s\"' % task_id)\n elif state == 'READY' or state == 'RUNNING':\n print('Canceling task \"%s\"' % task_id)\n ee.data.cancelTask(task_id)\n elif not cancel_all:\n print('Task \"%s\" already in state \"%s\".' % (status['id'], state))\n\n\nclass TaskInfoCommand(object):\n \"\"\"Prints information about a task.\"\"\"\n\n name = 'info'\n\n def __init__(self, parser):\n parser.add_argument('task_id', nargs='*', help='ID of a task to get.')\n\n def run(self, args, config):\n config.ee_init()\n for i, status in enumerate(ee.data.getTaskStatus(args.task_id)):\n if i:\n print()\n print('%s:' % status['id'])\n print(' State: %s' % status['state'])\n if status['state'] == 'UNKNOWN':\n continue\n print(' Type: %s' % TASK_TYPES.get(status.get('task_type'), 'Unknown'))\n print(' Description: %s' % status.get('description'))\n print(' Created: %s' % _parse_millis(status['creation_timestamp_ms']))\n if 'start_timestamp_ms' in status:\n print(' Started: %s' % _parse_millis(status['start_timestamp_ms']))\n if 'update_timestamp_ms' in status:\n print(' Updated: %s' % _parse_millis(status['update_timestamp_ms']))\n if 'error_message' in status:\n print(' Error: %s' % status['error_message'])\n if 'destination_uris' in status:\n print(' Destination URIs: %s' % ', '.join(status['destination_uris']))\n\n\nclass TaskListCommand(object):\n \"\"\"Lists the tasks submitted recently.\"\"\"\n\n name = 'list'\n\n def __init__(self, parser):\n parser.add_argument(\n '--status', '-s', required=False, nargs='*',\n choices=['READY', 'RUNNING', 'COMPLETED', 'FAILED',\n 'CANCELLED', 'UNKNOWN'],\n help=('List tasks only with a given status'))\n parser.add_argument(\n '--long_format',\n '-l',\n action='store_true',\n help='Print output in long format.')\n\n def run(self, args, config):\n \"\"\"Lists tasks present for a user, maybe filtering by state.\"\"\"\n config.ee_init()\n status = args.status\n tasks = ee.data.getTaskList()\n descs = [utils.truncate(task.get('description', ''), 40) for task in tasks]\n desc_length = max(len(word) for word in descs)\n format_str = '{:25s} {:13s} {:%ds} {:10s} {:s}' % (desc_length + 1)\n for task in tasks:\n if status and task['state'] not in status:\n continue\n truncated_desc = utils.truncate(task.get('description', ''), 40)\n task_type = TASK_TYPES.get(task['task_type'], 'Unknown')\n extra = ''\n if args.long_format:\n show_date = lambda ms: _parse_millis(ms).strftime('%Y-%m-%d %H:%M:%S')\n extra = ' {:20s} {:20s} {:20s} {}'.format(\n show_date(task['creation_timestamp_ms']),\n show_date(task['start_timestamp_ms']),\n show_date(task['update_timestamp_ms']),\n ' '.join(task.get('destination_uris', [])))\n print(format_str.format(\n task['id'], task_type, truncated_desc,\n task['state'], task.get('error_message', '---')) + extra)\n\n\nclass TaskWaitCommand(object):\n \"\"\"Waits for the specified task or tasks to complete.\"\"\"\n\n name = 'wait'\n\n def __init__(self, parser):\n parser.add_argument(\n '--timeout', '-t', default=sys.maxsize, type=int,\n help=('Stop waiting for the task(s) to finish after the specified,'\n ' number of seconds. Without this flag, the command will wait'\n ' indefinitely.'))\n parser.add_argument('--verbose', '-v', action='store_true',\n help=('Print periodic status messages for each'\n ' incomplete task.'))\n parser.add_argument('task_ids', nargs='+',\n help=('Either a list of one or more currently-running'\n ' task ids to wait on; or \\'all\\' to wait on all'\n ' running tasks.'))\n\n def run(self, args, config):\n \"\"\"Waits on the given tasks to complete or for a timeout to pass.\"\"\"\n config.ee_init()\n task_ids = []\n if args.task_ids == ['all']:\n tasks = ee.data.getTaskList()\n for task in tasks:\n if task['state'] not in utils.TASK_FINISHED_STATES:\n task_ids.append(task['id'])\n else:\n statuses = ee.data.getTaskStatus(args.task_ids)\n for status in statuses:\n state = status['state']\n task_id = status['id']\n if state == 'UNKNOWN':\n raise ee.EEException('Unknown task id \"%s\"' % task_id)\n else:\n task_ids.append(task_id)\n\n utils.wait_for_tasks(task_ids, args.timeout, log_progress=args.verbose)\n\n\nclass TaskCommand(Dispatcher):\n \"\"\"Prints information about or manages long-running tasks.\"\"\"\n\n name = 'task'\n\n COMMANDS = [\n TaskCancelCommand,\n TaskInfoCommand,\n TaskListCommand,\n TaskWaitCommand,\n ]\n\n\n# TODO(user): in both upload tasks, check if the parent namespace\n# exists and is writeable first.\nclass UploadImageCommand(object):\n \"\"\"Uploads an image from Cloud Storage to Earth Engine.\n\n See docs for \"asset set\" for additional details on how to specify asset\n metadata properties.\n \"\"\"\n\n name = 'image'\n\n def __init__(self, parser):\n _add_wait_arg(parser)\n _add_overwrite_arg(parser)\n parser.add_argument(\n 'src_files',\n help=('Cloud Storage URL(s) of the file(s) to upload. '\n 'Must have the prefix \\'gs://\\'.'),\n nargs='*')\n parser.add_argument(\n '--asset_id',\n help='Destination asset ID for the uploaded file.')\n parser.add_argument(\n '--last_band_alpha',\n help='Use the last band as a masking channel for all bands. '\n 'Mutually exclusive with nodata_value.',\n action='store_true')\n parser.add_argument(\n '--nodata_value',\n help='Value for missing data. '\n 'Mutually exclusive with last_band_alpha.',\n type=_comma_separated_numbers)\n parser.add_argument(\n '--pyramiding_policy',\n help='The pyramid reduction policy to use',\n type=_comma_separated_pyramiding_policies)\n parser.add_argument(\n '--bands',\n help='Comma-separated list of names to use for the image bands.',\n type=_comma_separated_strings)\n parser.add_argument(\n '--crs',\n help='The coordinate reference system, to override the map projection '\n 'of the image. May be either a well-known authority code (e.g. '\n 'EPSG:4326) or a WKT string.')\n parser.add_argument(\n '--manifest',\n help='Local path to a JSON asset manifest file. No other flags are '\n 'used if this flag is set.')\n _add_property_flags(parser)\n\n def _check_num_bands(self, bands, num_bands, flag_name):\n \"\"\"Checks the number of bands, creating them if there are none yet.\"\"\"\n if bands:\n if len(bands) != num_bands:\n raise ValueError(\n 'Inconsistent number of bands in --{}: expected {} but found {}.'\n .format(flag_name, len(bands), num_bands))\n else:\n bands = ['b%d' % (i + 1) for i in range(num_bands)]\n return bands\n\n def run(self, args, config):\n \"\"\"Starts the upload task, and waits for completion if requested.\"\"\"\n config.ee_init()\n manifest = self.manifest_from_args(args, config)\n _upload(args, manifest, ee.data.startIngestion)\n\n def manifest_from_args(self, args, config):\n \"\"\"Constructs an upload manifest from the command-line flags.\"\"\"\n\n def is_tf_record(path):\n if any(path.lower().endswith(extension)\n for extension in TF_RECORD_EXTENSIONS):\n return True\n return False\n\n if args.manifest:\n with open(args.manifest) as fh:\n return json.loads(fh.read())\n\n if not args.asset_id:\n raise ValueError('Flag --asset_id must be set.')\n\n _check_valid_files(args.src_files)\n if args.last_band_alpha and args.nodata_value:\n raise ValueError(\n 'last_band_alpha and nodata_value are mutually exclusive.')\n\n properties = _decode_property_flags(args)\n source_files = list(utils.expand_gcs_wildcards(args.src_files))\n if not source_files:\n raise ValueError('At least one file must be specified.')\n\n bands = args.bands\n if args.pyramiding_policy and len(args.pyramiding_policy) != 1:\n bands = self._check_num_bands(bands, len(args.pyramiding_policy),\n 'pyramiding_policy')\n if args.nodata_value and len(args.nodata_value) != 1:\n bands = self._check_num_bands(bands, len(args.nodata_value),\n 'nodata_value')\n\n if config.use_cloud_api:\n args.asset_id = ee.data.convert_asset_id_to_asset_name(args.asset_id)\n # If we are ingesting a tfrecord, we actually treat the inputs as one\n # source and many uris.\n if any(is_tf_record(source) for source in source_files):\n tileset = {\n 'id': 'ts',\n 'sources': [{'uris': [source for source in source_files]}]\n }\n else:\n tileset = {\n 'id': 'ts',\n 'sources': [{'uris': [source]} for source in source_files]\n }\n manifest = {\n 'name': args.asset_id,\n 'properties': properties,\n 'tilesets': [tileset]\n }\n # pylint:disable=g-explicit-bool-comparison\n if args.time_start is not None and args.time_start != '':\n manifest['start_time'] = _cloud_timestamp_for_timestamp_ms(\n args.time_start)\n if args.time_end is not None and args.time_end != '':\n manifest['end_time'] = _cloud_timestamp_for_timestamp_ms(args.time_end)\n # pylint:enable=g-explicit-bool-comparison\n\n if bands:\n file_bands = []\n for i, band in enumerate(bands):\n file_bands.append({\n 'id': band,\n 'tilesetId': tileset['id'],\n 'tilesetBandIndex': i\n })\n manifest['bands'] = file_bands\n\n if args.pyramiding_policy:\n if len(args.pyramiding_policy) == 1:\n manifest['pyramidingPolicy'] = args.pyramiding_policy[0]\n else:\n for index, policy in enumerate(args.pyramiding_policy):\n file_bands[index]['pyramidingPolicy'] = policy\n\n if args.nodata_value:\n if len(args.nodata_value) == 1:\n manifest['missingData'] = {'values': [args.nodata_value[0]]}\n else:\n for index, value in enumerate(args.nodata_value):\n file_bands[index]['missingData'] = {'values': [value]}\n\n if args.last_band_alpha:\n manifest['maskBands'] = {'tilesetId': tileset['id']}\n\n return manifest\n\n # non-cloud API section\n properties.update(_decode_timestamp_flags(args))\n manifest = {\n 'id': args.asset_id,\n 'properties': properties\n }\n\n sources = [{'primaryPath': source} for source in source_files]\n tileset = {'sources': sources}\n if args.last_band_alpha:\n tileset['fileBands'] = [{'fileBandIndex': -1, 'maskForAllBands': True}]\n manifest['tilesets'] = [tileset]\n\n if bands:\n manifest['bands'] = [{'id': name} for name in bands]\n\n if args.pyramiding_policy:\n if len(args.pyramiding_policy) == 1:\n manifest['pyramidingPolicy'] = args.pyramiding_policy[0]\n else:\n for index, policy in enumerate(args.pyramiding_policy):\n manifest['bands'][index]['pyramidingPolicy'] = policy\n\n if args.nodata_value:\n if len(args.nodata_value) == 1:\n manifest['missingData'] = {'value': args.nodata_value[0]}\n else:\n for index, nodata in enumerate(args.nodata_value):\n manifest['bands'][index]['missingData'] = {'value': nodata}\n\n if args.crs:\n manifest['crs'] = args.crs\n\n return manifest\n\n\n# TODO(user): update src_files help string when secondary files\n# can be uploaded.\nclass UploadTableCommand(object):\n \"\"\"Uploads a table from Cloud Storage to Earth Engine.\"\"\"\n\n name = 'table'\n\n def __init__(self, parser):\n _add_wait_arg(parser)\n _add_overwrite_arg(parser)\n parser.add_argument(\n 'src_file',\n help=('Cloud Storage URL of the .csv, .tfrecord, .shp, or '\n '.zip file to upload. Must have the prefix \\'gs://\\'. For '\n '.shp files, related .dbf, .shx, and .prj files must be '\n 'present in the same location.'),\n nargs='*')\n parser.add_argument(\n '--asset_id',\n help='Destination asset ID for the uploaded file.')\n _add_property_flags(parser)\n parser.add_argument(\n '--max_error',\n help='Max allowed error in meters when transforming geometry '\n 'between coordinate systems.',\n type=float, nargs='?')\n parser.add_argument(\n '--max_vertices',\n help='Max number of vertices per geometry. If set, geometry will be '\n 'subdivided into spatially disjoint pieces each under this limit.',\n type=int, nargs='?')\n parser.add_argument(\n '--max_failed_features',\n help='The maximum number of failed features to allow during ingestion.',\n type=int, nargs='?')\n parser.add_argument(\n '--crs',\n help='The default CRS code or WKT string specifying the coordinate '\n 'reference system of any geometry without one. If unspecified, '\n 'the default will be EPSG:4326 (https://epsg.io/4326). For '\n 'CSV/TFRecord only.')\n parser.add_argument(\n '--geodesic',\n help='The default strategy for interpreting edges in geometries that '\n 'do not have one specified. If false, edges are '\n 'straight in the projection. If true, edges are curved to follow '\n 'the shortest path on the surface of the Earth. When '\n 'unspecified, defaults to false if \\'crs\\' is a projected '\n 'coordinate system. For CSV/TFRecord only.',\n action='store_true')\n parser.add_argument(\n '--primary_geometry_column',\n help='The geometry column to use as a row\\'s primary geometry when '\n 'there is more than one geometry column. If unspecified and more '\n 'than one geometry column exists, the first geometry column '\n 'is used. For CSV/TFRecord only.')\n parser.add_argument(\n '--x_column',\n help='The name of the numeric x coordinate column for constructing '\n 'point geometries. If the y_column is also specified, and both '\n 'columns contain numerical values, then a point geometry column '\n 'will be constructed with x,y values in the coordinate system '\n 'given in \\'--crs\\'. If unspecified and \\'--crs\\' does _not_ '\n 'specify a projected coordinate system, defaults to \"longitude\". '\n 'If unspecified and \\'--crs\\' _does_ specify a projected '\n 'coordinate system, defaults to \"\" and no point geometry is '\n 'generated. A generated point geometry column will be named '\n '{x_column}_{y_column}_N where N might be appended to '\n 'disambiguate the column name. For CSV/TFRecord only.')\n parser.add_argument(\n '--y_column',\n help='The name of the numeric y coordinate column for constructing '\n 'point geometries. If the x_column is also specified, and both '\n 'columns contain numerical values, then a point geometry column '\n 'will be constructed with x,y values in the coordinate system '\n 'given in \\'--crs\\'. If unspecified and \\'--crs\\' does _not_ '\n 'specify a projected coordinate system, defaults to \"latitude\". '\n 'If unspecified and \\'--crs\\' _does_ specify a projected '\n 'coordinate system, defaults to \"\" and no point geometry is '\n 'generated. A generated point geometry column will be named '\n '{x_column}_{y_column}_N where N might be appended to '\n 'disambiguate the column name. For CSV/TFRecord only.')\n parser.add_argument(\n '--date_format',\n help='A format used to parse dates. The format pattern must follow '\n 'http://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html. '\n 'If unspecified, dates will be imported as strings. For '\n 'CSV/TFRecord only.')\n parser.add_argument(\n '--csv_delimiter',\n help='A single character used as a delimiter between column values '\n 'in a row. If unspecified, defaults to \\',\\'. For CSV only.')\n parser.add_argument(\n '--csv_qualifier',\n help='A character that surrounds column values (a.k.a. '\n '\\'quote character\\'). If unspecified, defaults to \\'\"\\'. A '\n 'column value may include the qualifier as a literal character by '\n 'having 2 consecutive qualifier characters. For CSV only.')\n parser.add_argument(\n '--manifest',\n help='Local path to a JSON asset manifest file. No other flags are '\n 'used if this flag is set.')\n\n def run(self, args, config):\n \"\"\"Starts the upload task, and waits for completion if requested.\"\"\"\n config.ee_init()\n manifest = self.manifest_from_args(args, config)\n _upload(args, manifest, ee.data.startTableIngestion)\n\n def manifest_from_args(self, args, config):\n \"\"\"Constructs an upload manifest from the command-line flags.\"\"\"\n\n if args.manifest:\n with open(args.manifest) as fh:\n return json.loads(fh.read())\n\n if not args.asset_id:\n raise ValueError('Flag --asset_id must be set.')\n\n _check_valid_files(args.src_file)\n source_files = list(utils.expand_gcs_wildcards(args.src_file))\n if len(source_files) != 1:\n raise ValueError('Exactly one file must be specified.')\n\n if config.use_cloud_api:\n properties = _decode_property_flags(args)\n args.asset_id = ee.data.convert_asset_id_to_asset_name(args.asset_id)\n source = {'uris': source_files}\n if args.max_error:\n source['maxErrorMeters'] = args.max_error\n if args.max_vertices:\n source['maxVertices'] = args.max_vertices\n if args.max_failed_features:\n raise ee.EEException(\n '--max_failed_features is not supported with the Cloud API')\n if args.crs:\n source['crs'] = args.crs\n if args.geodesic:\n source['geodesic'] = args.geodesic\n if args.primary_geometry_column:\n source['primary_geometry_column'] = args.primary_geometry_column\n if args.x_column:\n source['x_column'] = args.x_column\n if args.y_column:\n source['y_column'] = args.y_column\n if args.date_format:\n source['date_format'] = args.date_format\n if args.csv_delimiter:\n source['csv_delimiter'] = args.csv_delimiter\n if args.csv_qualifier:\n source['csv_qualifier'] = args.csv_qualifier\n\n manifest = {\n 'name': args.asset_id,\n 'sources': [source],\n 'properties': properties\n }\n\n # pylint:disable=g-explicit-bool-comparison\n if args.time_start is not None and args.time_start != '':\n manifest['start_time'] = _cloud_timestamp_for_timestamp_ms(\n args.time_start)\n if args.time_end is not None and args.time_end != '':\n manifest['end_time'] = _cloud_timestamp_for_timestamp_ms(args.time_end)\n # pylint:enable=g-explicit-bool-comparison\n return manifest\n\n # non-cloud API section\n source = {'primaryPath': source_files[0]}\n if args.max_error:\n source['max_error'] = args.max_error\n if args.max_vertices:\n source['max_vertices'] = args.max_vertices\n if args.max_failed_features:\n source['max_failed_features'] = args.max_failed_features\n if args.crs:\n source['crs'] = args.crs\n if args.geodesic:\n source['geodesic'] = args.geodesic\n if args.primary_geometry_column:\n source['primary_geometry_column'] = args.primary_geometry_column\n if args.x_column:\n source['x_column'] = args.x_column\n if args.y_column:\n source['y_column'] = args.y_column\n if args.date_format:\n source['date_format'] = args.date_format\n if args.csv_delimiter:\n source['csv_delimiter'] = args.csv_delimiter\n if args.csv_qualifier:\n source['csv_qualifier'] = args.csv_qualifier\n\n return {\n 'id': args.asset_id,\n 'sources': [source]\n }\n\n\nclass UploadCommand(Dispatcher):\n \"\"\"Uploads assets to Earth Engine.\"\"\"\n\n name = 'upload'\n\n COMMANDS = [\n UploadImageCommand,\n UploadTableCommand,\n ]\n\n\nclass _UploadManifestBase(object):\n \"\"\"Uploads an asset to Earth Engine using the given manifest file.\"\"\"\n\n def __init__(self, parser):\n _add_wait_arg(parser)\n _add_overwrite_arg(parser)\n parser.add_argument(\n 'manifest',\n help=('Local path to a JSON asset manifest file.'))\n\n def run(self, args, config, ingestion_function):\n \"\"\"Starts the upload task, and waits for completion if requested.\"\"\"\n config.ee_init()\n with open(args.manifest) as fh:\n manifest = json.loads(fh.read())\n\n _upload(args, manifest, ingestion_function)\n\n\nclass UploadImageManifestCommand(_UploadManifestBase):\n \"\"\"Uploads an image to Earth Engine using the given manifest file.\"\"\"\n\n name = 'upload_manifest'\n\n def run(self, args, config):\n \"\"\"Starts the upload task, and waits for completion if requested.\"\"\"\n print(\n 'This command is deprecated. '\n 'Use \"earthengine upload image --manifest\".'\n )\n super(UploadImageManifestCommand, self).run(\n args, config, ee.data.startIngestion)\n\n\nclass UploadTableManifestCommand(_UploadManifestBase):\n \"\"\"Uploads a table to Earth Engine using the given manifest file.\"\"\"\n\n name = 'upload_table_manifest'\n\n def run(self, args, config):\n print(\n 'This command is deprecated. '\n 'Use \"earthengine upload table --manifest\".'\n )\n super(UploadTableManifestCommand, self).run(\n args, config, ee.data.startTableIngestion)\n\n\nclass LicensesCommand(object):\n \"\"\"Prints the name and license of all third party dependencies.\"\"\"\n\n name = 'licenses'\n\n def __init__(self, unused_parser):\n pass\n\n def run(self, unused_args, unused_config):\n print('The Earth Engine python client library uess the following opensource'\n ' libraries.\\n')\n license_path = os.path.join(os.path.dirname(__file__), 'licenses.txt')\n print(open(license_path).read())\n\n\nclass PrepareModelCommand(object):\n \"\"\"Prepares a TensorFlow/Keras SavedModel for inference with Earth Engine.\n\n This is required only if a model is manually uploaded to Cloud AI Platform\n (https://cloud.google.com/ai-platform/) for predictions.\n \"\"\"\n\n name = 'prepare'\n\n def __init__(self, parser):\n parser.add_argument(\n '--source_dir',\n help='The local or Cloud Storage path to directory containing the '\n 'SavedModel.')\n parser.add_argument(\n '--dest_dir',\n help='The name of the directory to be created locally or in Cloud '\n 'Storage that will contain the Earth Engine ready SavedModel.')\n parser.add_argument(\n '--input',\n help='A comma-delimited list of input node names that will map to '\n 'Earth Engine Feature columns or Image bands for prediction, or a JSON '\n 'dictionary specifying a remapping of input node names to names '\n 'mapping to Feature columns or Image bands etc... (e.x: '\n '\\'{\"Conv2D:0\":\"my_landsat_band\"}\\'). The names of model inputs will '\n 'be stripped of any trailing \\'<:prefix>\\'.')\n parser.add_argument(\n '--output',\n help='A comma-delimited list of output tensor names that will map to '\n 'Earth Engine Feature columns or Image bands for prediction, or a JSON '\n 'dictionary specifying a remapping of output node names to names '\n 'mapping to Feature columns or Image bands etc... (e.x: '\n '\\'{\"Sigmoid:0\":\"my_predicted_class\"}\\'). The names of model outputs '\n 'will be stripped of any trailing \\'<:prefix>\\'.')\n parser.add_argument(\n '--tag',\n help='An optional tag used to load a specific graph from the '\n 'SavedModel. Defaults to \\'serve\\'.')\n parser.add_argument(\n '--variables',\n help='An optional relative path from within the source directory to '\n 'the prefix of the model variables. (e.x: if the model variables are '\n 'stored under \\'model_dir/variables/x.*\\', set '\n '--variables=/variables/x). Defaults to \\'/variables/variables\\'.')\n\n @staticmethod\n def _validate_and_extract_nodes(args):\n \"\"\"Validate command line args and extract in/out node mappings.\"\"\"\n if not args.source_dir:\n raise ValueError('Flag --source_dir must be set.')\n if not args.dest_dir:\n raise ValueError('Flag --dest_dir must be set.')\n if not args.input:\n raise ValueError('Flag --input must be set.')\n if not args.output:\n raise ValueError('Flag --output must be set.')\n\n return (PrepareModelCommand._get_nodes(args.input, '--input'),\n PrepareModelCommand._get_nodes(args.output, '--output'))\n\n @staticmethod\n def _get_nodes(node_spec, source_flag_name):\n \"\"\"Extract a node mapping from a list or flag-specified JSON.\"\"\"\n try:\n spec = json.loads(node_spec)\n except ValueError:\n spec = [n.strip() for n in node_spec.split(',')]\n return {item: item for item in spec}\n\n if not isinstance(spec, dict):\n raise ValueError(\n 'If flag {} is JSON it must specify a dictionary.'.format(\n source_flag_name))\n\n for k, v in spec.items():\n if ((not isinstance(k, six.string_types)) or\n (not isinstance(v, six.string_types))):\n raise ValueError('All key/value pairs of the dictionary specified in '\n '{} must be strings.'.format(source_flag_name))\n\n return spec\n\n @staticmethod\n def _encode_op(output_tensor, name):\n return tf.identity(\n tf.map_fn(lambda x: tf.io.encode_base64(tf.serialize_tensor(x)),\n output_tensor, tf.string),\n name=name)\n\n @staticmethod\n def _decode_op(input_tensor, dtype):\n mapped = tf.map_fn(lambda x: tf.parse_tensor(tf.io.decode_base64(x), dtype),\n input_tensor, dtype)\n return mapped\n\n @staticmethod\n def _shape_from_proto(shape_proto):\n return [d.size for d in shape_proto.dim]\n\n @staticmethod\n def _strip_index(edge_name):\n colon_pos = edge_name.rfind(':')\n if colon_pos == -1:\n return edge_name\n else:\n return edge_name[:colon_pos]\n\n @staticmethod\n def _get_input_tensor_spec(graph_def, input_names_set):\n \"\"\"Extracts the types of the given node names from the GraphDef.\"\"\"\n\n # Get the op names stripped of the input index e.g: \"op:0\" becomes \"op\"\n input_names_missing_index = {\n PrepareModelCommand._strip_index(i): i for i in input_names_set\n }\n\n spec = {}\n for cur_node in graph_def.node:\n if cur_node.name in input_names_missing_index:\n if 'shape' not in cur_node.attr or 'dtype' not in cur_node.attr:\n raise ValueError(\n 'Specified input op is not a valid graph input: \\'{}\\'.'.format(\n cur_node.name))\n\n spec[input_names_missing_index[cur_node.name]] = tf.dtypes.DType(\n cur_node.attr['dtype'].type)\n\n if len(spec) != len(input_names_set):\n raise ValueError(\n 'Specified input ops were missing from graph: {}.'.format(\n list(set(input_names_set).difference(list(spec.keys())))))\n return spec\n\n @staticmethod\n def _make_rpc_friendly(model_dir, tag, in_map, out_map, vars_path):\n \"\"\"Wraps a SavedModel in EE RPC-friendly ops and saves a temporary copy.\"\"\"\n out_dir = tempfile.mkdtemp()\n builder = tf.saved_model.Builder(out_dir)\n\n # Get a GraphDef from the saved model\n with tf.Session() as sesh:\n meta_graph = tf.saved_model.load(sesh, [tag], model_dir)\n\n graph_def = meta_graph.graph_def\n\n # Purge the default graph immediately after: we want to remap parts of the\n # graph when we load it and we don't know what those parts are yet.\n tf.reset_default_graph()\n\n input_op_keys = list(in_map.keys())\n input_new_keys = list(in_map.values())\n\n # Get the shape and type of the input tensors\n in_op_types = PrepareModelCommand._get_input_tensor_spec(\n graph_def, input_op_keys)\n\n # Create new input placeholders to receive RPC TensorProto payloads\n in_op_map = {\n k: tf.placeholder(\n tf.string, shape=[None], name='earthengine_in_{}'.format(i))\n for (i, k) in enumerate(input_new_keys)\n }\n\n # Glue on decoding ops to remap to the imported graph.\n decoded_op_map = {\n k: PrepareModelCommand._decode_op(in_op_map[in_map[k]], in_op_types[k])\n for k in input_op_keys\n }\n\n # Okay now we're ready to import the graph again but remapped.\n saver = tf.train.import_meta_graph(\n meta_graph_or_file=meta_graph, input_map=decoded_op_map)\n\n # Boilerplate to build a signature def for our new graph\n sig_in = {\n PrepareModelCommand._strip_index(k):\n saved_model_utils.build_tensor_info(v) for (k, v) in in_op_map.items()\n }\n\n sig_out = {}\n for index, (k, v) in enumerate(out_map.items()):\n out_tensor = saved_model_utils.build_tensor_info(\n PrepareModelCommand._encode_op(\n tf.get_default_graph().get_tensor_by_name(k),\n name='earthengine_out_{}'.format(index)))\n\n sig_out[PrepareModelCommand._strip_index(v)] = out_tensor\n\n sig_def = signature_def_utils.build_signature_def(\n sig_in, sig_out, signature_constants.PREDICT_METHOD_NAME)\n\n # Open a new session to load the variables and add them to the builder.\n with tf.Session() as sesh:\n if saver:\n saver.restore(sesh, model_dir + vars_path)\n builder.add_meta_graph_and_variables(\n sesh,\n tags=[tf.saved_model.tag_constants.SERVING],\n signature_def_map={\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: sig_def\n },\n saver=saver)\n\n builder.save()\n return out_dir\n\n def run(self, args, config):\n \"\"\"Wraps a SavedModel in EE RPC-friendly ops and saves a copy of it.\"\"\"\n ModelCommand.check_tensorflow_installed()\n\n in_spec, out_spec = PrepareModelCommand._validate_and_extract_nodes(args)\n gcs_client = None\n\n if utils.is_gcs_path(args.source_dir):\n # If the model isn't locally available, we have to make it available...\n gcs_client = config.create_gcs_helper()\n gcs_client.check_gcs_dir_within_size(args.source_dir,\n SAVED_MODEL_MAX_SIZE)\n local_model_dir = gcs_client.download_dir_to_temp(args.source_dir)\n else:\n local_model_dir = args.source_dir\n\n tag = args.tag if args.tag else tf.saved_model.tag_constants.SERVING\n vars_path = args.variables if args.variables else DEFAULT_VARIABLES_PREFIX\n new_model_dir = PrepareModelCommand._make_rpc_friendly(\n local_model_dir, tag, in_spec, out_spec, vars_path)\n\n if utils.is_gcs_path(args.dest_dir):\n if not gcs_client:\n gcs_client = config.create_gcs_helper()\n gcs_client.upload_dir_to_bucket(new_model_dir, args.dest_dir)\n else:\n shutil.move(new_model_dir, args.dest_dir)\n\n print(\n 'Success: model at \\'{}\\' is ready to be hosted in AI Platform.'.format(\n args.dest_dir))\n\n\nclass ModelCommand(Dispatcher):\n \"\"\"TensorFlow model related commands.\"\"\"\n\n name = 'model'\n\n COMMANDS = [PrepareModelCommand]\n\n @staticmethod\n def check_tensorflow_installed():\n \"\"\"Checks the status of TensorFlow installations.\"\"\"\n if not TENSORFLOW_INSTALLED:\n raise ImportError(\n 'By default, TensorFlow is not installed with Earth Engine client '\n 'libraries. To use \\'model\\' commands, make sure at least TensorFlow '\n '1.14 is installed; you can do this by executing \\'pip install '\n 'tensorflow\\' in your shell.'\n )\n else:\n if not TENSORFLOW_ADDONS_INSTALLED:\n if sys.version_info[0] < 3:\n print(\n 'Warning: Python 3 required for TensorFlow Addons. Models that '\n 'use non-standard ops may not work.')\n else:\n print(\n 'Warning: TensorFlow Addons not found. Models that use '\n 'non-standard ops may not work.')\n\n\n\nEXTERNAL_COMMANDS = [\n AuthenticateCommand,\n AclCommand,\n AssetCommand,\n CopyCommand,\n CreateCommand,\n ListCommand,\n LicensesCommand,\n SizeCommand,\n MoveCommand,\n ModelCommand,\n RmCommand,\n SetProjectCommand,\n TaskCommand,\n UnSetProjectCommand,\n UploadCommand,\n UploadImageManifestCommand,\n UploadTableManifestCommand,\n]\n" ]
[ [ "tensorflow.compat.v1.train.import_meta_graph", "tensorflow.compat.v1.io.decode_base64", "tensorflow.compat.v1.saved_model.utils.build_tensor_info", "tensorflow.compat.v1.dtypes.DType", "tensorflow.compat.v1.disable_v2_behavior", "tensorflow.compat.v1.serialize_tensor", "tensorflow.compat.v1.get_default_graph", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.logging.set_verbosity", "tensorflow.compat.v1.saved_model.Builder", "tensorflow.compat.v1.reset_default_graph", "tensorflow.compat.v1.saved_model.load", "tensorflow.compat.v1.saved_model.signature_def_utils.build_signature_def" ] ]
colour-science/trimesh
[ "ee5db2ac81b2357886d854dfa1436b5e4ec5e8d8" ]
[ "trimesh/exchange/dae.py" ]
[ "import io\nimport copy\nimport uuid\n\nimport numpy as np\n\ntry:\n # pip install pycollada\n import collada\nexcept BaseException:\n collada = None\n\ntry:\n import PIL.Image\nexcept ImportError:\n pass\n\nfrom .. import util\nfrom .. import visual\n\nfrom ..constants import log\n\n\ndef load_collada(file_obj, resolver=None, **kwargs):\n \"\"\"\n Load a COLLADA (.dae) file into a list of trimesh kwargs.\n\n Parameters\n ----------\n file_obj : file object\n Containing a COLLADA file\n resolver : trimesh.visual.Resolver or None\n For loading referenced files, like texture images\n kwargs : **\n Passed to trimesh.Trimesh.__init__\n\n Returns\n -------\n loaded : list of dict\n kwargs for Trimesh constructor\n \"\"\"\n # load scene using pycollada\n c = collada.Collada(file_obj)\n\n # Create material map from Material ID to trimesh material\n material_map = {}\n for m in c.materials:\n effect = m.effect\n material_map[m.id] = _parse_material(effect, resolver)\n\n # name : kwargs\n meshes = {}\n # list of dict\n graph = []\n\n for node in c.scene.nodes:\n _parse_node(node=node,\n parent_matrix=np.eye(4),\n material_map=material_map,\n meshes=meshes,\n graph=graph,\n resolver=resolver)\n\n # create kwargs for load_kwargs\n result = {'class': 'Scene',\n 'graph': graph,\n 'geometry': meshes}\n\n return result\n\n\ndef export_collada(mesh, **kwargs):\n \"\"\"\n Export a mesh or a list of meshes as a COLLADA .dae file.\n\n Parameters\n -----------\n mesh: Trimesh object or list of Trimesh objects\n The mesh(es) to export.\n\n Returns\n -----------\n export: str, string of COLLADA format output\n \"\"\"\n meshes = mesh\n if not isinstance(mesh, (list, tuple, set, np.ndarray)):\n meshes = [mesh]\n\n c = collada.Collada()\n nodes = []\n for i, m in enumerate(meshes):\n\n # Load uv, colors, materials\n uv = None\n colors = None\n mat = _unparse_material(None)\n if m.visual.defined:\n if m.visual.kind == 'texture':\n mat = _unparse_material(m.visual.material)\n uv = m.visual.uv\n elif m.visual.kind == 'vertex':\n colors = (m.visual.vertex_colors / 255.0)[:, :3]\n c.effects.append(mat.effect)\n c.materials.append(mat)\n\n # Create geometry object\n vertices = collada.source.FloatSource(\n 'verts-array', m.vertices.flatten(), ('X', 'Y', 'Z'))\n normals = collada.source.FloatSource(\n 'normals-array', m.vertex_normals.flatten(), ('X', 'Y', 'Z'))\n input_list = collada.source.InputList()\n input_list.addInput(0, 'VERTEX', '#verts-array')\n input_list.addInput(1, 'NORMAL', '#normals-array')\n arrays = [vertices, normals]\n if uv is not None:\n texcoords = collada.source.FloatSource(\n 'texcoords-array', uv.flatten(), ('U', 'V'))\n input_list.addInput(2, 'TEXCOORD', '#texcoords-array')\n arrays.append(texcoords)\n if colors is not None:\n idx = 2\n if uv:\n idx = 3\n colors = collada.source.FloatSource('colors-array',\n colors.flatten(), ('R', 'G', 'B'))\n input_list.addInput(idx, 'COLOR', '#colors-array')\n arrays.append(colors)\n geom = collada.geometry.Geometry(\n c, uuid.uuid4().hex, uuid.uuid4().hex, arrays\n )\n indices = np.repeat(m.faces.flatten(), len(arrays))\n\n matref = 'material{}'.format(i)\n triset = geom.createTriangleSet(indices, input_list, matref)\n geom.primitives.append(triset)\n c.geometries.append(geom)\n\n matnode = collada.scene.MaterialNode(matref, mat, inputs=[])\n geomnode = collada.scene.GeometryNode(geom, [matnode])\n node = collada.scene.Node('node{}'.format(i), children=[geomnode])\n nodes.append(node)\n scene = collada.scene.Scene('scene', nodes)\n c.scenes.append(scene)\n c.scene = scene\n\n b = io.BytesIO()\n c.write(b)\n b.seek(0)\n return b.read()\n\n\ndef _parse_node(node,\n parent_matrix,\n material_map,\n meshes,\n graph,\n resolver=None):\n \"\"\"\n Recursively parse COLLADA scene nodes.\n \"\"\"\n\n # Parse mesh node\n if isinstance(node, collada.scene.GeometryNode):\n geometry = node.geometry\n\n # Create local material map from material symbol to actual material\n local_material_map = {}\n for mn in node.materials:\n symbol = mn.symbol\n m = mn.target\n if m.id in material_map:\n local_material_map[symbol] = material_map[m.id]\n else:\n local_material_map[symbol] = _parse_material(m, resolver)\n\n # Iterate over primitives of geometry\n for i, primitive in enumerate(geometry.primitives):\n if isinstance(primitive, collada.polylist.Polylist):\n primitive = primitive.triangleset()\n if isinstance(primitive, collada.triangleset.TriangleSet):\n vertex = primitive.vertex\n vertex_index = primitive.vertex_index\n vertices = vertex[vertex_index].reshape(\n len(vertex_index) * 3, 3)\n\n # Get normals if present\n normals = None\n if primitive.normal is not None:\n normal = primitive.normal\n normal_index = primitive.normal_index\n normals = normal[normal_index].reshape(\n len(normal_index) * 3, 3)\n\n # Get colors if present\n colors = None\n s = primitive.sources\n if ('COLOR' in s and len(s['COLOR'])\n > 0 and len(primitive.index) > 0):\n color = s['COLOR'][0][4].data\n color_index = primitive.index[:, :, s['COLOR'][0][0]]\n colors = color[color_index].reshape(\n len(color_index) * 3, 3)\n\n faces = np.arange(\n vertices.shape[0]).reshape(\n vertices.shape[0] // 3, 3)\n\n # Get UV coordinates if possible\n vis = None\n if primitive.material in local_material_map:\n material = copy.copy(\n local_material_map[primitive.material])\n uv = None\n if len(primitive.texcoordset) > 0:\n texcoord = primitive.texcoordset[0]\n texcoord_index = primitive.texcoord_indexset[0]\n uv = texcoord[texcoord_index].reshape(\n (len(texcoord_index) * 3, 2))\n vis = visual.texture.TextureVisuals(\n uv=uv, material=material)\n\n primid = '{}.{}'.format(geometry.id, i)\n meshes[primid] = {\n 'vertices': vertices,\n 'faces': faces,\n 'vertex_normals': normals,\n 'vertex_colors': colors,\n 'visual': vis}\n\n graph.append({'frame_to': primid,\n 'matrix': parent_matrix,\n 'geometry': primid})\n\n # recurse down tree for nodes with children\n elif isinstance(node, collada.scene.Node):\n if node.children is not None:\n for child in node.children:\n # create the new matrix\n matrix = np.dot(parent_matrix, node.matrix)\n # parse the child node\n _parse_node(\n node=child,\n parent_matrix=matrix,\n material_map=material_map,\n meshes=meshes,\n graph=graph,\n resolver=resolver)\n\n elif isinstance(node, collada.scene.CameraNode):\n # TODO: convert collada cameras to trimesh cameras\n pass\n elif isinstance(node, collada.scene.LightNode):\n # TODO: convert collada lights to trimesh lights\n pass\n\n\ndef _load_texture(file_name, resolver):\n \"\"\"\n Load a texture from a file into a PIL image.\n \"\"\"\n file_data = resolver.get(file_name)\n image = PIL.Image.open(util.wrap_as_stream(file_data))\n return image\n\n\ndef _parse_material(effect, resolver):\n \"\"\"\n Turn a COLLADA effect into a trimesh material.\n \"\"\"\n\n # Compute base color\n baseColorFactor = np.ones(4)\n baseColorTexture = None\n if isinstance(effect.diffuse, collada.material.Map):\n try:\n baseColorTexture = _load_texture(\n effect.diffuse.sampler.surface.image.path, resolver)\n except BaseException:\n log.warning('unable to load base texture',\n exc_info=True)\n elif effect.diffuse is not None:\n baseColorFactor = effect.diffuse\n\n # Compute emission color\n emissiveFactor = np.zeros(3)\n emissiveTexture = None\n if isinstance(effect.emission, collada.material.Map):\n try:\n emissiveTexture = _load_texture(\n effect.diffuse.sampler.surface.image.path, resolver)\n except BaseException:\n log.warning('unable to load emissive texture',\n exc_info=True)\n elif effect.emission is not None:\n emissiveFactor = effect.emission[:3]\n\n # Compute roughness\n roughnessFactor = 1.0\n if (not isinstance(effect.shininess, collada.material.Map)\n and effect.shininess is not None):\n roughnessFactor = np.sqrt(2.0 / (2.0 + effect.shininess))\n\n # Compute metallic factor\n metallicFactor = 0.0\n\n # Compute normal texture\n normalTexture = None\n if effect.bumpmap is not None:\n try:\n normalTexture = _load_texture(\n effect.bumpmap.sampler.surface.image.path, resolver)\n except BaseException:\n log.warning('unable to load bumpmap',\n exc_info=True)\n\n return visual.material.PBRMaterial(\n emissiveFactor=emissiveFactor,\n emissiveTexture=emissiveTexture,\n normalTexture=normalTexture,\n baseColorTexture=baseColorTexture,\n baseColorFactor=baseColorFactor,\n metallicFactor=metallicFactor,\n roughnessFactor=roughnessFactor\n )\n\n\ndef _unparse_material(material):\n \"\"\"\n Turn a trimesh material into a COLLADA material.\n \"\"\"\n # TODO EXPORT TEXTURES\n if isinstance(material, visual.material.PBRMaterial):\n diffuse = material.baseColorFactor\n if diffuse is not None:\n diffuse = list(diffuse)\n\n emission = material.emissiveFactor\n if emission is not None:\n emission = [float(emission[0]), float(emission[1]),\n float(emission[2]), 1.0]\n\n shininess = material.roughnessFactor\n if shininess is not None:\n shininess = 2.0 / shininess**2 - 2.0\n\n effect = collada.material.Effect(\n uuid.uuid4().hex, params=[], shadingtype='phong',\n diffuse=diffuse, emission=emission,\n specular=[1.0, 1.0, 1.0, 1.0], shininess=float(shininess)\n )\n material = collada.material.Material(\n uuid.uuid4().hex, 'pbrmaterial', effect\n )\n else:\n effect = collada.material.Effect(\n uuid.uuid4().hex, params=[], shadingtype='phong'\n )\n material = collada.material.Material(\n uuid.uuid4().hex, 'defaultmaterial', effect\n )\n return material\n\n\ndef load_zae(file_obj, resolver=None, **kwargs):\n \"\"\"\n Load a ZAE file, which is just a zipped DAE file.\n\n Parameters\n -------------\n file_obj : file object\n Contains ZAE data\n resolver : trimesh.visual.Resolver\n Resolver to load additional assets\n kwargs : dict\n Passed to load_collada\n\n Returns\n ------------\n loaded : dict\n Results of loading\n \"\"\"\n\n # a dict, {file name : file object}\n archive = util.decompress(file_obj,\n file_type='zip')\n\n # load the first file with a .dae extension\n file_name = next(i for i in archive.keys()\n if i.lower().endswith('.dae'))\n\n # a resolver so the loader can load textures / etc\n resolver = visual.resolvers.ZipResolver(archive)\n\n # run the regular collada loader\n loaded = load_collada(archive[file_name],\n resolver=resolver,\n **kwargs)\n return loaded\n\n\n# only provide loaders if `pycollada` is installed\n_collada_loaders = {}\n_collada_exporters = {}\nif collada is not None:\n _collada_loaders['dae'] = load_collada\n _collada_loaders['zae'] = load_zae\n _collada_exporters['dae'] = export_collada\n" ]
[ [ "numpy.dot", "numpy.zeros", "numpy.ones", "numpy.eye", "numpy.arange", "numpy.sqrt" ] ]
rovany706/interpret
[ "5ecf05aa894f1e778a3f0b7fa40af9075afe1b8a" ]
[ "python/interpret-core/interpret/glassbox/ebm/ebm.py" ]
[ "# Copyright (c) 2019 Microsoft Corporation\n# Distributed under the MIT software license\n\n\nfrom ...utils import gen_perf_dicts\nfrom .utils import EBMUtils\nfrom .internal import NativeHelper\nfrom .postprocessing import multiclass_postprocess\nfrom ...utils import unify_data, autogen_schema\nfrom ...api.base import ExplainerMixin\nfrom ...api.templates import FeatureValueExplanation\nfrom ...provider.compute import JobLibProvider\nfrom ...utils import gen_name_from_class, gen_global_selector, gen_local_selector\n\nimport numpy as np\nfrom warnings import warn\n\nfrom sklearn.base import is_classifier\nfrom sklearn.utils.validation import check_is_fitted\nfrom sklearn.metrics import log_loss, mean_squared_error\nfrom collections import Counter\n\nfrom sklearn.base import (\n BaseEstimator,\n TransformerMixin,\n ClassifierMixin,\n RegressorMixin,\n)\nfrom itertools import combinations\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\nclass EBMExplanation(FeatureValueExplanation):\n \"\"\" Visualizes specifically for EBM. \"\"\"\n\n explanation_type = None\n\n def __init__(\n self,\n explanation_type,\n internal_obj,\n feature_names=None,\n feature_types=None,\n name=None,\n selector=None,\n ):\n \"\"\" Initializes class.\n\n Args:\n explanation_type: Type of explanation.\n internal_obj: A jsonable object that backs the explanation.\n feature_names: List of feature names.\n feature_types: List of feature types.\n name: User-defined name of explanation.\n selector: A dataframe whose indices correspond to explanation entries.\n \"\"\"\n super(EBMExplanation, self).__init__(\n explanation_type,\n internal_obj,\n feature_names=feature_names,\n feature_types=feature_types,\n name=name,\n selector=selector,\n )\n\n def visualize(self, key=None):\n \"\"\" Provides interactive visualizations.\n\n Args:\n key: Either a scalar or list\n that indexes the internal object for sub-plotting.\n If an overall visualization is requested, pass None.\n\n Returns:\n A Plotly figure.\n \"\"\"\n from ...visual.plot import (\n plot_continuous_bar,\n plot_horizontal_bar,\n sort_take,\n is_multiclass_global_data_dict,\n )\n\n data_dict = self.data(key)\n if data_dict is None:\n return None\n\n # Overall graph\n if self.explanation_type == \"global\" and key is None:\n data_dict = sort_take(\n data_dict, sort_fn=lambda x: -abs(x), top_n=15, reverse_results=True\n )\n figure = plot_horizontal_bar(\n data_dict,\n title=\"Overall Importance:<br>Mean Absolute Score\",\n start_zero=True,\n )\n\n return figure\n\n # Continuous feature graph\n if (\n self.explanation_type == \"global\"\n and self.feature_types[key] == \"continuous\"\n ):\n title = self.feature_names[key]\n if is_multiclass_global_data_dict(data_dict):\n figure = plot_continuous_bar(\n data_dict, multiclass=True, show_error=False, title=title\n )\n else:\n figure = plot_continuous_bar(data_dict, title=title)\n\n return figure\n\n return super().visualize(key)\n\n\n# TODO: More documentation in binning process to be explicit.\n# TODO: Consider stripping this down to the bare minimum.\nclass EBMPreprocessor(BaseEstimator, TransformerMixin):\n \"\"\" Transformer that preprocesses data to be ready before EBM. \"\"\"\n\n def __init__(\n self, feature_names=None, feature_types=None, max_bins=255, binning=\"quantile\",\n ):\n \"\"\" Initializes EBM preprocessor.\n\n Args:\n feature_names: Feature names as list.\n feature_types: Feature types as list, for example \"continuous\" or \"categorical\".\n max_bins: Max number of bins to process numeric features.\n binning: Strategy to compute bins according to density if \"quantile\" or equidistant if \"uniform\".\n \"\"\"\n self.feature_names = feature_names\n self.feature_types = feature_types\n self.max_bins = max_bins\n self.binning = binning\n\n def fit(self, X):\n \"\"\" Fits transformer to provided samples.\n\n Args:\n X: Numpy array for training samples.\n\n Returns:\n Itself.\n \"\"\"\n self.col_bin_counts_ = {}\n self.col_bin_edges_ = {}\n\n self.hist_counts_ = {}\n self.hist_edges_ = {}\n\n self.col_mapping_ = {}\n self.col_mapping_counts_ = {}\n\n self.col_n_bins_ = {}\n\n self.col_names_ = []\n self.col_types_ = []\n\n self.has_fitted_ = False\n\n schema = autogen_schema(\n X, feature_names=self.feature_names, feature_types=self.feature_types\n )\n\n for col_idx in range(X.shape[1]):\n col_name = list(schema.keys())[col_idx]\n self.col_names_.append(col_name)\n\n col_info = schema[col_name]\n assert col_info[\"column_number\"] == col_idx\n col_data = X[:, col_idx]\n\n self.col_types_.append(col_info[\"type\"])\n if col_info[\"type\"] == \"continuous\":\n col_data = col_data.astype(float)\n\n uniq_vals = set(col_data[~np.isnan(col_data)])\n if len(uniq_vals) < self.max_bins:\n bins = list(sorted(uniq_vals))\n else:\n if self.binning == \"uniform\":\n bins = self.max_bins\n elif self.binning == \"quantile\":\n bins = np.unique(\n np.quantile(\n col_data, q=np.linspace(0, 1, self.max_bins + 1)\n )\n )\n else: # pragma: no cover\n raise ValueError(\"Unknown binning: '{}'.\".format(self.binning))\n\n bin_counts, bin_edges = np.histogram(col_data, bins=bins)\n\n hist_counts, hist_edges = np.histogram(col_data, bins=\"doane\")\n self.col_bin_counts_[col_idx] = bin_counts\n self.col_bin_edges_[col_idx] = bin_edges\n\n self.hist_edges_[col_idx] = hist_edges\n self.hist_counts_[col_idx] = hist_counts\n self.col_n_bins_[col_idx] = len(bin_edges)\n elif col_info[\"type\"] == \"ordinal\":\n mapping = {val: indx for indx, val in enumerate(col_info[\"order\"])}\n self.col_mapping_[col_idx] = mapping\n self.col_n_bins_[col_idx] = len(col_info[\"order\"])\n elif col_info[\"type\"] == \"categorical\":\n uniq_vals, counts = np.unique(col_data, return_counts=True)\n\n non_nan_index = ~np.isnan(counts)\n uniq_vals = uniq_vals[non_nan_index]\n counts = counts[non_nan_index]\n\n mapping = {val: indx for indx, val in enumerate(uniq_vals)}\n self.col_mapping_counts_[col_idx] = counts\n self.col_mapping_[col_idx] = mapping\n\n # TODO: Review NA as we don't support it yet.\n self.col_n_bins_[col_idx] = len(uniq_vals)\n\n self.has_fitted_ = True\n return self\n\n def transform(self, X):\n \"\"\" Transform on provided samples.\n\n Args:\n X: Numpy array for samples.\n\n Returns:\n Transformed numpy array.\n \"\"\"\n check_is_fitted(self, \"has_fitted_\")\n\n missing_constant = -1\n unknown_constant = -2\n\n X_new = np.copy(X)\n for col_idx in range(X.shape[1]):\n col_type = self.col_types_[col_idx]\n col_data = X[:, col_idx]\n\n if col_type == \"continuous\":\n col_data = col_data.astype(float)\n bin_edges = self.col_bin_edges_[col_idx].copy()\n\n digitized = np.digitize(col_data, bin_edges, right=False)\n digitized[digitized == 0] = 1\n digitized -= 1\n\n # NOTE: NA handling done later.\n # digitized[np.isnan(col_data)] = missing_constant\n X_new[:, col_idx] = digitized\n elif col_type == \"ordinal\":\n mapping = self.col_mapping_[col_idx]\n mapping[np.nan] = missing_constant\n vec_map = np.vectorize(\n lambda x: mapping[x] if x in mapping else unknown_constant\n )\n X_new[:, col_idx] = vec_map(col_data)\n elif col_type == \"categorical\":\n mapping = self.col_mapping_[col_idx]\n mapping[np.nan] = missing_constant\n vec_map = np.vectorize(\n lambda x: mapping[x] if x in mapping else unknown_constant\n )\n X_new[:, col_idx] = vec_map(col_data)\n\n return X_new.astype(np.int64)\n\n def get_hist_counts(self, feature_index):\n col_type = self.col_types_[feature_index]\n if col_type == \"continuous\":\n return list(self.hist_counts_[feature_index])\n elif col_type == \"categorical\":\n return list(self.col_mapping_counts_[feature_index])\n else: # pragma: no cover\n raise Exception(\"Cannot get counts for type: {0}\".format(col_type))\n\n def get_hist_edges(self, feature_index):\n col_type = self.col_types_[feature_index]\n if col_type == \"continuous\":\n return list(self.hist_edges_[feature_index])\n elif col_type == \"categorical\":\n map = self.col_mapping_[feature_index]\n return list(map.keys())\n else: # pragma: no cover\n raise Exception(\"Cannot get counts for type: {0}\".format(col_type))\n\n def get_bin_counts(self, feature_index):\n col_type = self.col_types_[feature_index]\n if col_type == 'continuous':\n return list(self.col_bin_counts_[feature_index])\n elif col_type == 'categorical':\n return list(self.col_mapping_counts_[feature_index])\n else:\n raise Exception(\"Cannot get counts for type: {0}\".format(col_type))\n\n def get_bin_labels(self, feature_index):\n \"\"\" Returns bin labels for a given feature index.\n\n Args:\n feature_index: An integer for feature index.\n\n Returns:\n List of labels for bins.\n \"\"\"\n\n col_type = self.col_types_[feature_index]\n if col_type == \"continuous\":\n return list(self.col_bin_edges_[feature_index])\n elif col_type == \"ordinal\":\n map = self.col_mapping_[feature_index]\n return list(map.keys())\n elif col_type == \"categorical\":\n map = self.col_mapping_[feature_index]\n return list(map.keys())\n else: # pragma: no cover\n raise Exception(\"Unknown column type\")\n\n\nclass BaseCoreEBM:\n \"\"\"Internal use EBM.\"\"\"\n\n def __init__(\n self,\n model_type,\n # Data\n col_types,\n col_n_bins,\n # Core\n main_features,\n interactions,\n validation_size,\n max_rounds,\n early_stopping_tolerance,\n early_stopping_rounds,\n # Native\n inner_bags,\n learning_rate,\n max_leaves,\n min_samples_leaf,\n # Overall\n random_state,\n ):\n\n self.model_type = model_type\n\n # Arguments for data\n self.col_types = col_types\n self.col_n_bins = col_n_bins\n\n # Arguments for EBM beyond training a feature-step.\n self.main_features = main_features\n self.interactions = interactions\n self.validation_size = validation_size\n self.max_rounds = max_rounds\n self.early_stopping_tolerance = early_stopping_tolerance\n self.early_stopping_rounds = early_stopping_rounds\n\n # Arguments for internal EBM.\n self.inner_bags = inner_bags\n self.learning_rate = learning_rate\n self.max_leaves = max_leaves\n self.min_samples_leaf = min_samples_leaf\n\n # Arguments for overall\n self.random_state = random_state\n\n def fit_parallel(self, X, y, n_classes):\n self.n_classes_ = n_classes\n\n # Split data into train/val\n\n X_train, X_val, y_train, y_val = EBMUtils.ebm_train_test_split(\n X,\n y,\n test_size=self.validation_size,\n random_state=self.random_state,\n is_classification=self.model_type == \"classification\",\n )\n\n # Define features\n self.features_ = EBMUtils.gen_features(self.col_types, self.col_n_bins)\n # Build EBM allocation code\n\n # scikit-learn returns an np.array for classification and\n # a single np.float64 for regression, so we do the same\n if self.model_type == \"classification\":\n self.intercept_ = np.zeros(\n NativeHelper.get_count_scores_c(self.n_classes_),\n dtype=np.float64,\n order=\"C\",\n )\n else:\n self.intercept_ = np.float64(0)\n\n if isinstance(self.main_features, str) and self.main_features == \"all\":\n main_feature_indices = [[x] for x in range(len(self.features_))]\n elif isinstance(self.main_features, list) and all(\n isinstance(x, int) for x in self.main_features\n ):\n main_feature_indices = [[x] for x in self.main_features]\n else: # pragma: no cover\n raise RuntimeError(\"Argument 'mains' has invalid value\")\n\n self.feature_groups_ = []\n self.model_ = []\n\n # Train main effects\n self._fit_main(main_feature_indices, X_train, y_train, X_val, y_val)\n\n # Build interaction terms, if required\n self.inter_indices_, self.inter_scores_ = self._build_interactions(\n X_train, y_train\n )\n\n self.inter_episode_idx_ = 0\n if len(self.inter_indices_) != 0:\n self._staged_fit_interactions(\n X_train, y_train, X_val, y_val, self.inter_indices_\n )\n\n return self\n\n def _fit_main(self, main_feature_groups, X_train, y_train, X_val, y_val):\n log.info(\"Train main effects\")\n\n (\n self.model_,\n self.current_metric_,\n self.main_episode_idx_,\n ) = NativeHelper.cyclic_gradient_boost(\n model_type=self.model_type,\n n_classes=self.n_classes_,\n features=self.features_,\n feature_groups=main_feature_groups,\n X_train=X_train,\n y_train=y_train,\n scores_train=None,\n X_val=X_val,\n y_val=y_val,\n scores_val=None,\n n_inner_bags=self.inner_bags,\n random_state=self.random_state,\n learning_rate=self.learning_rate,\n max_leaves=self.max_leaves,\n min_samples_leaf=self.min_samples_leaf,\n max_rounds=self.max_rounds,\n early_stopping_tolerance=self.early_stopping_tolerance,\n early_stopping_rounds=self.early_stopping_rounds,\n name=\"Main\",\n )\n\n self.feature_groups_ = main_feature_groups\n\n return\n\n def _build_interactions(self, X_train, y_train):\n if isinstance(self.interactions, int) and self.interactions != 0:\n log.info(\"Estimating with FAST\")\n\n scores_train = EBMUtils.decision_function(\n X_train, self.feature_groups_, self.model_, self.intercept_\n )\n\n iter_feature_groups = combinations(range(len(self.col_types)), 2)\n\n final_indices, final_scores = NativeHelper.get_interactions(\n n_interactions=self.interactions,\n iter_feature_groups=iter_feature_groups,\n model_type=self.model_type,\n n_classes=self.n_classes_,\n features=self.features_,\n X=X_train,\n y=y_train,\n scores=scores_train,\n min_samples_leaf=self.min_samples_leaf,\n )\n elif isinstance(self.interactions, int) and self.interactions == 0:\n final_indices = []\n final_scores = []\n elif isinstance(self.interactions, list):\n final_indices = self.interactions\n final_scores = [None for _ in range(len(self.interactions))]\n else: # pragma: no cover\n raise RuntimeError(\"Argument 'interaction' has invalid value\")\n\n return final_indices, final_scores\n\n def _staged_fit_interactions(\n self, X_train, y_train, X_val, y_val, inter_indices=[]\n ):\n\n log.info(\"Training interactions\")\n\n scores_train = EBMUtils.decision_function(\n X_train, self.feature_groups_, self.model_, self.intercept_\n )\n scores_val = EBMUtils.decision_function(\n X_val, self.feature_groups_, self.model_, self.intercept_\n )\n\n (\n model_update,\n self.current_metric_,\n self.inter_episode_idx_,\n ) = NativeHelper.cyclic_gradient_boost(\n model_type=self.model_type,\n n_classes=self.n_classes_,\n features=self.features_,\n feature_groups=inter_indices,\n X_train=X_train,\n y_train=y_train,\n scores_train=scores_train,\n X_val=X_val,\n y_val=y_val,\n scores_val=scores_val,\n n_inner_bags=self.inner_bags,\n random_state=self.random_state,\n learning_rate=self.learning_rate,\n max_leaves=self.max_leaves,\n min_samples_leaf=self.min_samples_leaf,\n max_rounds=self.max_rounds,\n early_stopping_tolerance=self.early_stopping_tolerance,\n early_stopping_rounds=self.early_stopping_rounds,\n name=\"Pair\",\n )\n\n self.model_.extend(model_update)\n self.feature_groups_.extend(inter_indices)\n\n return\n\n def staged_fit_interactions_parallel(self, X, y, inter_indices=[]):\n\n log.info(\"Splitting train/test for interactions\")\n\n # Split data into train/val\n # NOTE: ideally we would store the train/validation split in the\n # remote processes, but joblib doesn't have a concept\n # of keeping remote state, so we re-split our sets\n X_train, X_val, y_train, y_val = EBMUtils.ebm_train_test_split(\n X,\n y,\n test_size=self.validation_size,\n random_state=self.random_state,\n is_classification=self.model_type == \"classification\",\n )\n\n self._staged_fit_interactions(X_train, y_train, X_val, y_val, inter_indices)\n return self\n\n\nclass BaseEBM(BaseEstimator):\n \"\"\"Client facing SK EBM.\"\"\"\n\n # Interface modeled after:\n # https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.HistGradientBoostingClassifier.html\n # https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html\n # https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html\n # https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html\n # https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html\n # https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn\n # https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMClassifier.html\n\n def __init__(\n self,\n # Explainer\n #\n # feature_names in scikit-learn convention should probably be passed in via the fit function. Also,\n # we can get feature_names via pandas dataframes, and those would only be known at fit time, so\n # we need a version of feature_names_out_ with the underscore to indicate items set at fit time.\n # Despite this, we need to recieve a list of feature_names here to be compatible with blackbox explainations\n # where we still need to have feature_names, but we do not have a fit function since we explain existing\n # models without fitting them ourselves. To conform to a common explaination API we get the feature_names\n # here.\n feature_names,\n # other packages LightGBM, CatBoost, Scikit-Learn (future) are using categorical specific ways to indicate\n # feature_types. The benefit to them is that they can accept multiple ways of specifying categoricals like:\n # categorical = [true, false, true, true] OR categorical = [1, 4, 8] OR categorical = 'all'/'auto'/'none'\n # We're choosing a different route because for visualization we want to be able to express multiple\n # different types of data. For example, if the user has data with strings of \"low\", \"medium\", \"high\"\n # We want to keep both the ordinal nature of this feature and we wish to preserve the text for visualization\n # scikit-learn callers can pre-convert these things to [0, 1, 2] in the correct order because they don't\n # need to worry about visualizing the data afterwards, but for us we need a way to specify the strings\n # back anyways. So we need some way to express both the categorical nature of features and the order\n # mapping. We can do this and more complicated conversions via:\n # feature_types = [\"categorical\", [\"low\", \"medium\", \"high\"], \"continuous\", \"time\", \"bool\"]\n feature_types,\n # Data\n #\n # Ensemble\n outer_bags,\n inner_bags,\n # Core\n # TODO PK v.3 replace mains in favor of a \"boosting stage plan\"\n mains,\n interactions,\n validation_size,\n max_rounds,\n early_stopping_tolerance,\n early_stopping_rounds,\n # Native\n learning_rate,\n max_leaves,\n # Holte, R. C. (1993) \"Very simple classification rules perform well on most commonly used datasets\"\n # says use 6 as the minimum samples https://link.springer.com/content/pdf/10.1023/A:1022631118932.pdf\n # TODO PK try setting this (not here, but in our caller) to 6 and run tests to verify the best value.\n min_samples_leaf,\n # Overall\n n_jobs,\n random_state,\n # Preprocessor\n binning,\n max_bins,\n ):\n # NOTE: Per scikit-learn convention, we shouldn't attempt to sanity check these inputs here. We just\n # Store these values for future use. Validate inputs in the fit or other functions. More details in:\n # https://scikit-learn.org/stable/developers/develop.html\n\n # Arguments for explainer\n self.feature_names = feature_names\n self.feature_types = feature_types\n\n # Arguments for ensemble\n self.outer_bags = outer_bags\n self.inner_bags = inner_bags\n\n # Arguments for EBM beyond training a feature-step.\n self.mains = mains\n self.interactions = interactions\n self.validation_size = validation_size\n self.max_rounds = max_rounds\n self.early_stopping_tolerance = early_stopping_tolerance\n self.early_stopping_rounds = early_stopping_rounds\n\n # Arguments for internal EBM.\n self.learning_rate = learning_rate\n self.max_leaves = max_leaves\n self.min_samples_leaf = min_samples_leaf\n\n # Arguments for overall\n self.n_jobs = n_jobs\n self.random_state = random_state\n\n # Arguments for preprocessor\n self.binning = binning\n self.max_bins = max_bins\n\n def fit(self, X, y): # noqa: C901\n \"\"\" Fits model to provided samples.\n\n Args:\n X: Numpy array for training samples.\n y: Numpy array as training labels.\n\n Returns:\n Itself.\n \"\"\"\n\n # NOTE: Generally, we want to keep parameters in the __init__ function, since scikit-learn\n # doesn't like parameters in the fit function, other than ones like weights that have\n # the same length as the number of samples. See:\n # https://scikit-learn.org/stable/developers/develop.html\n # https://github.com/microsoft/LightGBM/issues/2628#issue-536116395\n #\n\n\n # TODO PK sanity check all our inputs from the __init__ function, and this fit fuction\n\n # TODO PK we shouldn't expose our internal state until we are 100% sure that we succeeded\n # so move everything to local variables until the end when we assign them to self.*\n\n # TODO PK we should do some basic checks here that X and y have the same dimensions and that\n # they are well formed (look for NaNs, etc)\n\n # TODO PK handle calls where X.dim == 1. This could occur if there was only 1 feature, or if\n # there was only 1 sample? We can differentiate either condition via y.dim and reshape\n # AND add some tests for the X.dim == 1 scenario\n\n # TODO PK write an efficient striping converter for X that replaces unify_data for EBMs\n # algorithm: grap N columns and convert them to rows then process those by sending them to C\n X, y, self.feature_names, _ = unify_data(\n X, y, self.feature_names, self.feature_types\n )\n\n # Build preprocessor\n self.preprocessor_ = EBMPreprocessor(\n feature_names=self.feature_names,\n feature_types=self.feature_types,\n max_bins=self.max_bins,\n binning=self.binning,\n )\n self.preprocessor_.fit(X)\n\n X_orig = X\n X = self.preprocessor_.transform(X)\n\n estimators = []\n seed = EBMUtils.normalize_initial_random_seed(self.random_state)\n\n if is_classifier(self):\n self.classes_, y = np.unique(y, return_inverse=True)\n self._class_idx_ = {x: index for index, x in enumerate(self.classes_)}\n\n y = y.astype(np.int64, casting=\"unsafe\", copy=False)\n n_classes = len(self.classes_)\n if n_classes > 2: # pragma: no cover\n warn(\"Multiclass is still experimental. Subject to change per release.\")\n if n_classes > 2 and self.interactions != 0: # pragma: no cover\n raise RuntimeError(\n \"Multiclass with interactions currently not supported.\"\n )\n for i in range(self.outer_bags):\n seed=NativeHelper.generate_random_number(seed, 1416147523)\n estimator = BaseCoreEBM(\n # Data\n model_type=\"classification\",\n col_types=self.preprocessor_.col_types_,\n col_n_bins=self.preprocessor_.col_n_bins_,\n # Core\n main_features=self.mains,\n interactions=self.interactions,\n validation_size=self.validation_size,\n max_rounds=self.max_rounds,\n early_stopping_tolerance=self.early_stopping_tolerance,\n early_stopping_rounds=self.early_stopping_rounds,\n # Native\n inner_bags=self.inner_bags,\n learning_rate=self.learning_rate,\n max_leaves=self.max_leaves,\n min_samples_leaf=self.min_samples_leaf,\n # Overall\n random_state=seed\n )\n estimators.append(estimator)\n else:\n n_classes = -1\n y = y.astype(np.float64, casting=\"unsafe\", copy=False)\n for i in range(self.outer_bags):\n seed=NativeHelper.generate_random_number(seed, 1416147523)\n estimator = BaseCoreEBM(\n # Data\n model_type=\"regression\",\n col_types=self.preprocessor_.col_types_,\n col_n_bins=self.preprocessor_.col_n_bins_,\n # Core\n main_features=self.mains,\n interactions=self.interactions,\n validation_size=self.validation_size,\n max_rounds=self.max_rounds,\n early_stopping_tolerance=self.early_stopping_tolerance,\n early_stopping_rounds=self.early_stopping_rounds,\n # Native\n inner_bags=self.inner_bags,\n learning_rate=self.learning_rate,\n max_leaves=self.max_leaves,\n min_samples_leaf=self.min_samples_leaf,\n # Overall\n random_state=seed,\n )\n estimators.append(estimator)\n\n # Train base models for main effects, pair detection.\n\n # scikit-learn returns an np.array for classification and\n # a single float64 for regression, so we do the same\n if is_classifier(self):\n self.intercept_ = np.zeros(\n NativeHelper.get_count_scores_c(n_classes), dtype=np.float64, order=\"C\",\n )\n else:\n self.intercept_ = np.float64(0)\n\n provider = JobLibProvider(n_jobs=self.n_jobs)\n\n def train_model(estimator, X, y, n_classes):\n return estimator.fit_parallel(X, y, n_classes)\n\n train_model_args_iter = (\n (estimators[i], X, y, n_classes) for i in range(self.outer_bags)\n )\n\n estimators = provider.parallel(train_model, train_model_args_iter)\n\n if isinstance(self.interactions, int) and self.interactions > 0:\n # Select merged pairs\n pair_indices = self._select_merged_pairs(estimators, X, y)\n\n for estimator in estimators:\n # Discard initial interactions\n new_model = []\n new_feature_groups = []\n for i, feature_group in enumerate(estimator.feature_groups_):\n if len(feature_group) != 1:\n continue\n new_model.append(estimator.model_[i])\n new_feature_groups.append(estimator.feature_groups_[i])\n estimator.model_ = new_model\n estimator.feature_groups_ = new_feature_groups\n estimator.inter_episode_idx_ = 0\n\n if len(pair_indices) != 0:\n # Retrain interactions for base models\n def staged_fit_fn(estimator, X, y, inter_indices=[]):\n return estimator.staged_fit_interactions_parallel(\n X, y, inter_indices\n )\n\n staged_fit_args_iter = (\n (estimators[i], X, y, pair_indices) for i in range(self.outer_bags)\n )\n\n estimators = provider.parallel(staged_fit_fn, staged_fit_args_iter)\n elif isinstance(self.interactions, int) and self.interactions == 0:\n pair_indices = []\n elif isinstance(self.interactions, list):\n pair_indices = self.interactions\n else: # pragma: no cover\n raise RuntimeError(\"Argument 'interaction' has invalid value\")\n\n X = np.ascontiguousarray(X.T)\n\n if isinstance(self.mains, str) and self.mains == \"all\":\n main_indices = [[x] for x in range(X.shape[0])]\n elif isinstance(self.mains, list) and all(\n isinstance(x, int) for x in self.mains\n ):\n main_indices = [[x] for x in self.mains]\n else: # pragma: no cover\n msg = \"Argument 'mains' has invalid value (valid values are 'all'|list<int>): {}\".format(\n self.mains\n )\n raise RuntimeError(msg)\n\n self.feature_groups_ = main_indices + pair_indices\n\n # Merge estimators into one.\n self.additive_terms_ = []\n self.term_standard_deviations_ = []\n for index, _ in enumerate(self.feature_groups_):\n log_odds_tensors = []\n for estimator in estimators:\n log_odds_tensors.append(estimator.model_[index])\n\n averaged_model = np.average(np.array(log_odds_tensors), axis=0)\n model_errors = np.std(np.array(log_odds_tensors), axis=0)\n\n self.additive_terms_.append(averaged_model)\n self.term_standard_deviations_.append(model_errors)\n\n # Get episode indexes for base estimators.\n main_episode_idxs = []\n inter_episode_idxs = []\n for estimator in estimators:\n main_episode_idxs.append(estimator.main_episode_idx_)\n inter_episode_idxs.append(estimator.inter_episode_idx_)\n\n self.breakpoint_iteration_ = [main_episode_idxs]\n if len(pair_indices) != 0:\n self.breakpoint_iteration_.append(inter_episode_idxs)\n\n # Extract feature names and feature types.\n # TODO PK v.3 don't overwrite feature_names and feature_types. Create new fields called feature_names_out and\n # feature_types_out_\n self.feature_names = []\n self.feature_types = []\n for index, feature_indices in enumerate(self.feature_groups_):\n feature_name = EBMUtils.gen_feature_name(\n feature_indices, self.preprocessor_.col_names_\n )\n feature_type = EBMUtils.gen_feature_type(\n feature_indices, self.preprocessor_.col_types_\n )\n self.feature_types.append(feature_type)\n self.feature_names.append(feature_name)\n\n if n_classes <= 2:\n # Mean center graphs - only for binary classification and regression\n scores_gen = EBMUtils.scores_by_feature_group(\n X, self.feature_groups_, self.additive_terms_\n )\n self._original_term_means_ = []\n\n for set_idx, _, scores in scores_gen:\n score_mean = np.mean(scores)\n\n self.additive_terms_[set_idx] = (\n self.additive_terms_[set_idx] - score_mean\n )\n\n # Add mean center adjustment back to intercept\n self.intercept_ += score_mean\n self._original_term_means_.append(score_mean)\n else:\n # Postprocess model graphs for multiclass\n binned_predict_proba = lambda x: EBMUtils.classifier_predict_proba(\n x, self.feature_groups_, self.additive_terms_, self.intercept_\n )\n\n postprocessed = multiclass_postprocess(\n X, self.additive_terms_, binned_predict_proba, self.feature_types\n )\n self.additive_terms_ = postprocessed[\"feature_graphs\"]\n self.intercept_ = postprocessed[\"intercepts\"]\n\n # Generate overall importance\n scores_gen = EBMUtils.scores_by_feature_group(\n X, self.feature_groups_, self.additive_terms_\n )\n self.feature_importances_ = []\n for set_idx, _, scores in scores_gen:\n mean_abs_score = np.mean(np.abs(scores))\n self.feature_importances_.append(mean_abs_score)\n\n # Generate selector\n # TODO PK v.3 shouldn't this be self._global_selector_ ??\n self.global_selector = gen_global_selector(\n X_orig, self.feature_names, self.feature_types, None\n )\n\n self.has_fitted_ = True\n return self\n\n def _select_merged_pairs(self, estimators, X, y):\n # TODO PK we really need to use purification before here because it's not really legal to elminate\n # a feature group unless it's average contribution value is zero, and for a pair that\n # would mean that the intercepts for both features in the group were zero, hense purified\n\n # Select pairs from base models\n def score_fn(model_type, X, y, feature_groups, model, intercept):\n if model_type == \"classification\":\n prob = EBMUtils.classifier_predict_proba(\n X, feature_groups, model, intercept\n )\n return (\n 0 if len(y) == 0 else log_loss(y, prob)\n ) # use logloss to conform consistnetly and for multiclass\n elif model_type == \"regression\":\n pred = EBMUtils.regressor_predict(\n X, feature_groups, model, intercept\n )\n return 0 if len(y) == 0 else mean_squared_error(y, pred)\n else: # pragma: no cover\n msg = \"Unknown model_type: '{}'.\".format(model_type)\n raise ValueError(msg)\n\n # TODO PK rename the \"pair\" variables in this function to \"interaction\" since that's more generalized\n\n # TODO PK sort the interaction tuples so that they have a unique ordering, otherwise\n # when they get inserted into pair_cum_rank and pair_freq they could potentially have\n # reversed ordering and then be duplicates\n # ordering by increasing indexes is probably the most meaningful representation to the user\n\n pair_cum_rank = Counter()\n pair_freq = Counter()\n\n for index, estimator in enumerate(estimators):\n # TODO PK move the work done inside this loop to the original parallel threads so that this part can be done in parallel\n\n # TODO PK this algorithm in O(N^2) by the number of interactions. Alternatively\n # there is an O(N) algorithm where we generate the logits for the base forward and base backwards\n # predictions, then we copy that entire array AND add or substract the one feature under consideration\n\n backward_impacts = []\n forward_impacts = []\n\n # TODO PK we can remove the is_train input to ebm_train_test_split once we've moved the pair scoring stuff\n # to a background thread because we'll already have the validation split without re-splitting it\n _, X_val, _, y_val = EBMUtils.ebm_train_test_split(\n X,\n y,\n test_size=self.validation_size,\n random_state=estimator.random_state,\n is_classification=is_classifier(self),\n is_train=False,\n )\n\n n_base_feature_groups = len(estimator.feature_groups_) - len(\n estimator.inter_indices_\n )\n\n base_forward_score = score_fn(\n estimator.model_type,\n X_val,\n y_val,\n estimator.feature_groups_[:n_base_feature_groups],\n estimator.model_[:n_base_feature_groups],\n estimator.intercept_,\n )\n base_backward_score = score_fn(\n estimator.model_type,\n X_val,\n y_val,\n estimator.feature_groups_,\n estimator.model_,\n estimator.intercept_,\n )\n for pair_idx, pair in enumerate(estimator.inter_indices_):\n n_full_idx = n_base_feature_groups + pair_idx\n\n pair_freq[pair] += 1\n\n backward_score = score_fn(\n estimator.model_type,\n X_val,\n y_val,\n estimator.feature_groups_[:n_full_idx]\n + estimator.feature_groups_[n_full_idx + 1 :],\n estimator.model_[:n_full_idx] + estimator.model_[n_full_idx + 1 :],\n estimator.intercept_,\n )\n forward_score = score_fn(\n estimator.model_type,\n X_val,\n y_val,\n estimator.feature_groups_[:n_base_feature_groups]\n + estimator.feature_groups_[n_full_idx : n_full_idx + 1],\n estimator.model_[:n_base_feature_groups]\n + estimator.model_[n_full_idx : n_full_idx + 1],\n estimator.intercept_,\n )\n # for both regression (mean square error) and classification (log loss), higher values are bad, so\n # interactions with high positive values for backward_impact and forward_impact are good\n backward_impact = backward_score - base_backward_score\n forward_impact = base_forward_score - forward_score\n\n backward_impacts.append(backward_impact)\n forward_impacts.append(forward_impact)\n\n # Average ranks\n backward_ranks = np.argsort(backward_impacts)[::-1]\n forward_ranks = np.argsort(forward_impacts)[::-1]\n pair_ranks = np.mean(np.array([backward_ranks, forward_ranks]), axis=0)\n\n # Add to cumulative rank for a pair across all models\n for pair_idx, pair in enumerate(estimator.inter_indices_):\n pair_cum_rank[pair] += pair_ranks[pair_idx]\n\n # Calculate pair importance ranks\n # TODO PK this copy isn't required\n pair_weighted_ranks = pair_cum_rank.copy()\n for pair, freq in pair_freq.items():\n # Calculate average rank\n pair_weighted_ranks[pair] /= freq\n # Reweight by frequency\n pair_weighted_ranks[pair] /= np.sqrt(freq)\n pair_weighted_ranks = sorted(pair_weighted_ranks.items(), key=lambda x: x[1])\n\n # Retrieve top K pairs\n pair_indices = [list(x[0]) for x in pair_weighted_ranks[: self.interactions]]\n\n return pair_indices\n\n def decision_function(self, X):\n \"\"\" Predict scores from model before calling the link function.\n\n Args:\n X: Numpy array for samples.\n\n Returns:\n The sum of the additive term contributions.\n \"\"\"\n check_is_fitted(self, \"has_fitted_\")\n X, _, _, _ = unify_data(X, None, self.feature_names, self.feature_types)\n X = self.preprocessor_.transform(X)\n\n # TODO PK add a test to see if we handle X.ndim == 1 (or should we throw ValueError)\n\n X = np.ascontiguousarray(X.T)\n\n decision_scores = EBMUtils.decision_function(\n X, self.feature_groups_, self.additive_terms_, self.intercept_\n )\n\n return decision_scores\n\n def explain_global(self, name=None):\n \"\"\" Provides global explanation for model.\n\n Args:\n name: User-defined explanation name.\n\n Returns:\n An explanation object,\n visualizing feature-value pairs as horizontal bar chart.\n \"\"\"\n if name is None:\n name = gen_name_from_class(self)\n\n check_is_fitted(self, \"has_fitted_\")\n\n # Obtain min/max for model scores\n lower_bound = np.inf\n upper_bound = -np.inf\n for feature_group_index, _ in enumerate(self.feature_groups_):\n errors = self.term_standard_deviations_[feature_group_index]\n scores = self.additive_terms_[feature_group_index]\n\n lower_bound = min(lower_bound, np.min(scores - errors))\n upper_bound = max(upper_bound, np.max(scores + errors))\n\n bounds = (lower_bound, upper_bound)\n\n # Add per feature graph\n data_dicts = []\n feature_list = []\n density_list = []\n for feature_group_index, feature_indexes in enumerate(\n self.feature_groups_\n ):\n model_graph = self.additive_terms_[feature_group_index]\n\n # NOTE: This uses stddev. for bounds, consider issue warnings.\n errors = self.term_standard_deviations_[feature_group_index]\n\n if len(feature_indexes) == 1:\n bin_labels = self.preprocessor_.get_bin_labels(feature_indexes[0])\n # bin_counts = self.preprocessor_.get_bin_counts(\n # feature_indexes[0]\n # )\n scores = list(model_graph)\n upper_bounds = list(model_graph + errors)\n lower_bounds = list(model_graph - errors)\n density_dict = {\n \"names\": self.preprocessor_.get_hist_edges(feature_indexes[0]),\n \"scores\": self.preprocessor_.get_hist_counts(feature_indexes[0]),\n }\n\n feature_dict = {\n \"type\": \"univariate\",\n \"names\": bin_labels,\n \"scores\": scores,\n \"scores_range\": bounds,\n \"upper_bounds\": upper_bounds,\n \"lower_bounds\": lower_bounds,\n }\n feature_list.append(feature_dict)\n density_list.append(density_dict)\n\n data_dict = {\n \"type\": \"univariate\",\n \"names\": bin_labels,\n \"scores\": model_graph,\n \"scores_range\": bounds,\n \"upper_bounds\": model_graph + errors,\n \"lower_bounds\": model_graph - errors,\n \"density\": {\n \"names\": self.preprocessor_.get_hist_edges(feature_indexes[0]),\n \"scores\": self.preprocessor_.get_hist_counts(\n feature_indexes[0]\n ),\n },\n }\n if is_classifier(self):\n data_dict[\"meta\"] = {\n \"label_names\": self.classes_.tolist() # Classes should be numpy array, convert to list.\n }\n\n data_dicts.append(data_dict)\n elif len(feature_indexes) == 2:\n bin_labels_left = self.preprocessor_.get_bin_labels(feature_indexes[0])\n bin_labels_right = self.preprocessor_.get_bin_labels(feature_indexes[1])\n\n feature_dict = {\n \"type\": \"pairwise\",\n \"left_names\": bin_labels_left,\n \"right_names\": bin_labels_right,\n \"scores\": model_graph,\n \"scores_range\": bounds,\n }\n feature_list.append(feature_dict)\n density_list.append({})\n\n data_dict = {\n \"type\": \"pairwise\",\n \"left_names\": bin_labels_left,\n \"right_names\": bin_labels_right,\n \"scores\": model_graph,\n \"scores_range\": bounds,\n }\n data_dicts.append(data_dict)\n else: # pragma: no cover\n raise Exception(\"Interactions greater than 2 not supported.\")\n\n overall_dict = {\n \"type\": \"univariate\",\n \"names\": self.feature_names,\n \"scores\": self.feature_importances_,\n }\n internal_obj = {\n \"overall\": overall_dict,\n \"specific\": data_dicts,\n \"mli\": [\n {\n \"explanation_type\": \"ebm_global\",\n \"value\": {\"feature_list\": feature_list},\n },\n {\"explanation_type\": \"density\", \"value\": {\"density\": density_list}},\n ],\n }\n\n return EBMExplanation(\n \"global\",\n internal_obj,\n feature_names=self.feature_names,\n feature_types=self.feature_types,\n name=name,\n selector=self.global_selector,\n )\n\n def explain_local(self, X, y=None, name=None):\n \"\"\" Provides local explanations for provided samples.\n\n Args:\n X: Numpy array for X to explain.\n y: Numpy vector for y to explain.\n name: User-defined explanation name.\n\n Returns:\n An explanation object, visualizing feature-value pairs\n for each sample as horizontal bar charts.\n \"\"\"\n\n # Produce feature value pairs for each sample.\n # Values are the model graph score per respective feature group.\n if name is None:\n name = gen_name_from_class(self)\n\n check_is_fitted(self, \"has_fitted_\")\n\n X, y, _, _ = unify_data(X, y, self.feature_names, self.feature_types)\n\n # Transform y if classifier\n if is_classifier(self) and y is not None:\n y = np.array([self._class_idx_[el] for el in y])\n\n samples = self.preprocessor_.transform(X)\n\n samples = np.ascontiguousarray(samples.T)\n\n scores_gen = EBMUtils.scores_by_feature_group(\n samples, self.feature_groups_, self.additive_terms_\n )\n\n # TODO PK add a test to see if we handle X.ndim == 1 (or should we throw ValueError)\n\n n_rows = samples.shape[1]\n data_dicts = []\n intercept = self.intercept_\n if not is_classifier(self) or len(self.classes_) <= 2:\n if isinstance(self.intercept_, np.ndarray) or isinstance(\n self.intercept_, list\n ):\n intercept = intercept[0]\n\n for _ in range(n_rows):\n data_dict = {\n \"type\": \"univariate\",\n \"names\": [],\n \"scores\": [],\n \"values\": [],\n \"extra\": {\"names\": [\"Intercept\"], \"scores\": [intercept], \"values\": [1]},\n }\n if is_classifier(self):\n data_dict[\"meta\"] = {\n \"label_names\": self.classes_.tolist() # Classes should be numpy array, convert to list.\n }\n data_dicts.append(data_dict)\n\n for set_idx, feature_group, scores in scores_gen:\n for row_idx in range(n_rows):\n feature_name = self.feature_names[set_idx]\n data_dicts[row_idx][\"names\"].append(feature_name)\n data_dicts[row_idx][\"scores\"].append(scores[row_idx])\n if len(feature_group) == 1:\n data_dicts[row_idx][\"values\"].append(\n X[row_idx, feature_group[0]]\n )\n else:\n data_dicts[row_idx][\"values\"].append(\"\")\n\n is_classification = is_classifier(self)\n if is_classification:\n scores = EBMUtils.classifier_predict_proba(\n samples, self.feature_groups_, self.additive_terms_, self.intercept_,\n )\n else:\n scores = EBMUtils.regressor_predict(\n samples, self.feature_groups_, self.additive_terms_, self.intercept_,\n )\n\n perf_list = []\n perf_dicts = gen_perf_dicts(scores, y, is_classification)\n for row_idx in range(n_rows):\n perf = None if perf_dicts is None else perf_dicts[row_idx]\n perf_list.append(perf)\n data_dicts[row_idx][\"perf\"] = perf\n\n selector = gen_local_selector(data_dicts, is_classification=is_classification)\n\n internal_obj = {\n \"overall\": None,\n \"specific\": data_dicts,\n \"mli\": [\n {\n \"explanation_type\": \"ebm_local\",\n \"value\": {\n \"scores\": self.additive_terms_,\n \"intercept\": self.intercept_,\n \"perf\": perf_list,\n },\n }\n ],\n }\n internal_obj[\"mli\"].append(\n {\n \"explanation_type\": \"evaluation_dataset\",\n \"value\": {\"dataset_x\": X, \"dataset_y\": y},\n }\n )\n\n return EBMExplanation(\n \"local\",\n internal_obj,\n feature_names=self.feature_names,\n feature_types=self.feature_types,\n name=name,\n selector=selector,\n )\n\n\nclass ExplainableBoostingClassifier(BaseEBM, ClassifierMixin, ExplainerMixin):\n \"\"\" Explainable Boosting Classifier. The arguments will change in a future release, watch the changelog. \"\"\"\n\n # TODO PK v.3 use underscores here like ClassifierMixin._estimator_type?\n available_explanations = [\"global\", \"local\"]\n explainer_type = \"model\"\n\n \"\"\" Public facing EBM classifier.\"\"\"\n\n def __init__(\n self,\n # Explainer\n feature_names=None,\n feature_types=None,\n # Preprocessor\n max_bins=255,\n binning=\"quantile\",\n # Stages\n mains=\"all\",\n interactions=0,\n # Ensemble\n outer_bags=16,\n inner_bags=0,\n # Boosting\n learning_rate=0.01,\n validation_size=0.15,\n early_stopping_rounds=50,\n early_stopping_tolerance=1e-4,\n max_rounds=5000,\n # Trees\n max_leaves=3,\n min_samples_leaf=2,\n # Overall\n n_jobs=-2,\n random_state=42,\n ):\n \"\"\" Explainable Boosting Classifier. The arguments will change in a future release, watch the changelog.\n\n Args:\n feature_names: List of feature names.\n feature_types: List of feature types.\n max_bins: Max number of bins per feature for pre-processing stage.\n binning: Method to bin values for pre-processing. Choose \"uniform\" or \"quantile\".\n mains: Features to be trained on in main effects stage. Either \"all\" or a list of feature indexes.\n interactions: Interactions to be trained on.\n Either a list of lists of feature indices, or an integer for number of automatically detected interactions.\n outer_bags: Number of outer bags.\n inner_bags: Number of inner bags.\n learning_rate: Learning rate for boosting.\n validation_size: Validation set size for boosting.\n early_stopping_rounds: Number of rounds of no improvement to trigger early stopping.\n early_stopping_tolerance: Tolerance that dictates the smallest delta required to be considered an improvement.\n max_rounds: Number of rounds for boosting.\n max_leaves: Maximum leaf nodes used in boosting.\n min_samples_leaf: Minimum number of cases for tree splits used in boosting.\n n_jobs: Number of jobs to run in parallel.\n random_state: Random state.\n \"\"\"\n super(ExplainableBoostingClassifier, self).__init__(\n # Explainer\n feature_names=feature_names,\n feature_types=feature_types,\n # Preprocessor\n max_bins=max_bins,\n binning=binning,\n # Stages\n mains=mains,\n interactions=interactions,\n # Ensemble\n outer_bags=outer_bags,\n inner_bags=inner_bags,\n # Boosting\n learning_rate=learning_rate,\n validation_size=validation_size,\n early_stopping_rounds=early_stopping_rounds,\n early_stopping_tolerance=early_stopping_tolerance,\n max_rounds=max_rounds,\n # Trees\n max_leaves=max_leaves,\n min_samples_leaf=min_samples_leaf,\n # Overall\n n_jobs=n_jobs,\n random_state=random_state,\n )\n\n # TODO: Throw ValueError like scikit for 1d instead of 2d arrays\n def predict_proba(self, X):\n \"\"\" Probability estimates on provided samples.\n\n Args:\n X: Numpy array for samples.\n\n Returns:\n Probability estimate of sample for each class.\n \"\"\"\n check_is_fitted(self, \"has_fitted_\")\n X, _, _, _ = unify_data(X, None, self.feature_names, self.feature_types)\n X = self.preprocessor_.transform(X)\n\n # TODO PK add a test to see if we handle X.ndim == 1 (or should we throw ValueError)\n\n X = np.ascontiguousarray(X.T)\n\n prob = EBMUtils.classifier_predict_proba(\n X, self.feature_groups_, self.additive_terms_, self.intercept_\n )\n return prob\n\n def predict(self, X):\n \"\"\" Predicts on provided samples.\n\n Args:\n X: Numpy array for samples.\n\n Returns:\n Predicted class label per sample.\n \"\"\"\n check_is_fitted(self, \"has_fitted_\")\n X, _, _, _ = unify_data(X, None, self.feature_names, self.feature_types)\n X = self.preprocessor_.transform(X)\n\n # TODO PK add a test to see if we handle X.ndim == 1 (or should we throw ValueError)\n\n X = np.ascontiguousarray(X.T)\n\n return EBMUtils.classifier_predict(\n X,\n self.feature_groups_,\n self.additive_terms_,\n self.intercept_,\n self.classes_,\n )\n\n\nclass ExplainableBoostingRegressor(BaseEBM, RegressorMixin, ExplainerMixin):\n \"\"\" Explainable Boosting Regressor. The arguments will change in a future release, watch the changelog. \"\"\"\n\n # TODO PK v.3 use underscores here like RegressorMixin._estimator_type?\n available_explanations = [\"global\", \"local\"]\n explainer_type = \"model\"\n\n \"\"\" Public facing EBM regressor.\"\"\"\n\n def __init__(\n self,\n # Explainer\n feature_names=None,\n feature_types=None,\n # Preprocessor\n max_bins=255,\n binning=\"quantile\",\n # Stages\n mains=\"all\",\n interactions=0,\n # Ensemble\n outer_bags=16,\n inner_bags=0,\n # Boosting\n learning_rate=0.01,\n validation_size=0.15,\n early_stopping_rounds=50,\n early_stopping_tolerance=1e-4,\n max_rounds=5000,\n # Trees\n max_leaves=3,\n min_samples_leaf=2,\n # Overall\n n_jobs=-2,\n random_state=42,\n ):\n \"\"\" Explainable Boosting Regressor. The arguments will change in a future release, watch the changelog.\n\n Args:\n feature_names: List of feature names.\n feature_types: List of feature types.\n max_bins: Max number of bins per feature for pre-processing stage.\n binning: Method to bin values for pre-processing. Choose \"uniform\" or \"quantile\".\n mains: Features to be trained on in main effects stage. Either \"all\" or a list of feature indexes.\n interactions: Interactions to be trained on.\n Either a list of lists of feature indices, or an integer for number of automatically detected interactions.\n outer_bags: Number of outer bags.\n inner_bags: Number of inner bags.\n learning_rate: Learning rate for boosting.\n validation_size: Validation set size for boosting.\n early_stopping_rounds: Number of rounds of no improvement to trigger early stopping.\n early_stopping_tolerance: Tolerance that dictates the smallest delta required to be considered an improvement.\n max_rounds: Number of rounds for boosting.\n max_leaves: Maximum leaf nodes used in boosting.\n min_samples_leaf: Minimum number of cases for tree splits used in boosting.\n n_jobs: Number of jobs to run in parallel.\n random_state: Random state.\n \"\"\"\n super(ExplainableBoostingRegressor, self).__init__(\n # Explainer\n feature_names=feature_names,\n feature_types=feature_types,\n # Preprocessor\n max_bins=max_bins,\n binning=binning,\n # Stages\n mains=mains,\n interactions=interactions,\n # Ensemble\n outer_bags=outer_bags,\n inner_bags=inner_bags,\n # Boosting\n learning_rate=learning_rate,\n validation_size=validation_size,\n early_stopping_rounds=early_stopping_rounds,\n early_stopping_tolerance=early_stopping_tolerance,\n max_rounds=max_rounds,\n # Trees\n max_leaves=max_leaves,\n min_samples_leaf=min_samples_leaf,\n # Overall\n n_jobs=n_jobs,\n random_state=random_state,\n )\n\n def predict(self, X):\n \"\"\" Predicts on provided samples.\n\n Args:\n X: Numpy array for samples.\n\n Returns:\n Predicted class label per sample.\n \"\"\"\n check_is_fitted(self, \"has_fitted_\")\n X, _, _, _ = unify_data(X, None, self.feature_names, self.feature_types)\n X = self.preprocessor_.transform(X)\n\n # TODO PK add a test to see if we handle X.ndim == 1 (or should we throw ValueError)\n\n X = np.ascontiguousarray(X.T)\n\n return EBMUtils.regressor_predict(\n X, self.feature_groups_, self.additive_terms_, self.intercept_\n )\n" ]
[ [ "numpy.copy", "numpy.min", "numpy.mean", "sklearn.base.is_classifier", "numpy.max", "numpy.histogram", "numpy.vectorize", "numpy.sqrt", "numpy.array", "sklearn.utils.validation.check_is_fitted", "numpy.float64", "numpy.argsort", "sklearn.metrics.mean_squared_error", "numpy.isnan", "numpy.ascontiguousarray", "numpy.digitize", "sklearn.metrics.log_loss", "numpy.abs", "numpy.linspace", "numpy.unique" ] ]
godblessforhimself/nmt
[ "1d71bbe4d69932fbe92998abc6c23443c75ebbf9" ]
[ "nmt/utils/misc_utils.py" ]
[ "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Generally useful utility functions.\"\"\"\nfrom __future__ import print_function\n\nimport codecs\nimport collections\nimport json\nimport math\nimport os\nimport sys\nimport time\nfrom distutils import version\n\nimport numpy as np\nimport six\nimport tensorflow as tf\n\n\ndef check_tensorflow_version():\n # LINT.IfChange\n min_tf_version = \"1.12.0\"\n # LINT.ThenChange(<pwd>/nmt/copy.bara.sky)\n if (version.LooseVersion(tf.__version__) <\n version.LooseVersion(min_tf_version)):\n raise EnvironmentError(\"Tensorflow version must >= %s\" % min_tf_version)\n\n\ndef safe_exp(value):\n \"\"\"Exponentiation with catching of overflow error.\"\"\"\n try:\n ans = math.exp(value)\n except OverflowError:\n ans = float(\"inf\")\n return ans\n\n\ndef print_time(s, start_time):\n \"\"\"Take a start time, print elapsed duration, and return a new time.\"\"\"\n print(\"%s, time %ds, %s.\" % (s, (time.time() - start_time), time.ctime()))\n sys.stdout.flush()\n return time.time()\n\n\ndef print_out(s, f=None, new_line=True):\n \"\"\"Similar to print but with support to flush and output to a file.\"\"\"\n if isinstance(s, bytes):\n s = s.decode(\"utf-8\")\n\n if f:\n f.write(s.encode(\"utf-8\"))\n if new_line:\n f.write(b\"\\n\")\n\n # stdout\n if six.PY2:\n sys.stdout.write(s.encode(\"utf-8\"))\n else:\n sys.stdout.buffer.write(s.encode(\"utf-8\"))\n\n if new_line:\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()\n\n\ndef print_hparams(hparams, skip_patterns=None, header=None):\n \"\"\"Print hparams, can skip keys based on pattern.\"\"\"\n if header: print_out(\"%s\" % header)\n values = hparams.values()\n for key in sorted(values.keys()):\n if not skip_patterns or all(\n [skip_pattern not in key for skip_pattern in skip_patterns]):\n print_out(\" %s=%s\" % (key, str(values[key])))\n\n\ndef load_hparams(model_dir):\n \"\"\"Load hparams from an existing model directory.\"\"\"\n hparams_file = os.path.join(model_dir, \"hparams\")\n if tf.io.gfile.exists(hparams_file):\n print_out(\"# Loading hparams from %s\" % hparams_file)\n with codecs.getreader(\"utf-8\")( tf.io.gfile.GFile(hparams_file, \"rb\")) as f:\n try:\n hparams_values = json.load(f)\n hparams = tf.contrib.training.HParams(**hparams_values)\n except ValueError:\n print_out(\" can't load hparams file\")\n return None\n return hparams\n else:\n return None\n\n\ndef maybe_parse_standard_hparams(hparams, hparams_path):\n \"\"\"Override hparams values with existing standard hparams config.\"\"\"\n if hparams_path and tf.io.gfile.exists(hparams_path):\n print_out(\"# Loading standard hparams from %s\" % hparams_path)\n with codecs.getreader(\"utf-8\")( tf.io.gfile.GFile(hparams_path, \"rb\")) as f:\n hparams.parse_json(f.read())\n return hparams\n\n\ndef save_hparams(out_dir, hparams):\n \"\"\"Save hparams.\"\"\"\n hparams_file = os.path.join(out_dir, \"hparams\")\n print_out(\" saving hparams to %s\" % hparams_file)\n with codecs.getwriter(\"utf-8\")( tf.io.gfile.GFile(hparams_file, \"wb\")) as f:\n f.write(hparams.to_json(indent=4, sort_keys=True))\n\n\ndef debug_tensor(s, msg=None, summarize=10):\n \"\"\"Print the shape and value of a tensor at test time. Return a new tensor.\"\"\"\n if not msg:\n msg = s.name\n return tf.Print(s, [tf.shape(s), s], msg + \" \", summarize=summarize)\n\n\ndef add_summary(summary_writer, global_step, tag, value):\n \"\"\"Add a new summary to the current summary_writer.\n Useful to log things that are not part of the training graph, e.g., tag=BLEU.\n \"\"\"\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])\n summary_writer.add_summary(summary, global_step)\n\n\ndef get_config_proto(log_device_placement=False, allow_soft_placement=True,\n num_intra_threads=0, num_inter_threads=0):\n # GPU options:\n # https://www.tensorflow.org/versions/r0.10/how_tos/using_gpu/index.html\n config_proto = tf.ConfigProto(\n log_device_placement=log_device_placement,\n allow_soft_placement=allow_soft_placement)\n config_proto.gpu_options.allow_growth = True\n\n # CPU threads options\n if num_intra_threads:\n config_proto.intra_op_parallelism_threads = num_intra_threads\n if num_inter_threads:\n config_proto.inter_op_parallelism_threads = num_inter_threads\n\n return config_proto\n\n\ndef format_text(words):\n \"\"\"Convert a sequence words into sentence.\"\"\"\n if (not hasattr(words, \"__len__\") and # for numpy array\n not isinstance(words, collections.Iterable)):\n words = [words]\n return b\" \".join(words)\n\n\ndef format_bpe_text(symbols, delimiter=b\"@@\"):\n \"\"\"Convert a sequence of bpe words into sentence.\"\"\"\n words = []\n word = b\"\"\n if isinstance(symbols, str):\n symbols = symbols.encode()\n delimiter_len = len(delimiter)\n for symbol in symbols:\n if len(symbol) >= delimiter_len and symbol[-delimiter_len:] == delimiter:\n word += symbol[:-delimiter_len]\n else: # end of a word\n word += symbol\n words.append(word)\n word = b\"\"\n return b\" \".join(words)\n\n\ndef format_spm_text(symbols):\n \"\"\"Decode a text in SPM (https://github.com/google/sentencepiece) format.\"\"\"\n return u\"\".join(format_text(symbols).decode(\"utf-8\").split()).replace(\n u\"\\u2581\", u\" \").strip().encode(\"utf-8\")\n" ]
[ [ "tensorflow.io.gfile.GFile", "tensorflow.shape", "tensorflow.io.gfile.exists", "tensorflow.ConfigProto", "tensorflow.Summary.Value", "tensorflow.contrib.training.HParams" ] ]
scan33scan33/nts2020
[ "aeda4c5668b9ac62ba74752e3cc9aa2ab56c2fe8" ]
[ "xtreme/bert_singletask.py" ]
[ "import os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\n\nimport tensorflow_hub as hub\n\nos.environ[\"TFHUB_CACHE_DIR\"] = \"gs://nts2020-tpu\"\n\nfrom official import nlp\nfrom official.modeling import tf_utils\nfrom official.nlp import bert\n\n# Load the required submodules\nimport official.nlp.optimization\nimport official.nlp.bert.bert_models\nimport official.nlp.bert.configs\nimport official.nlp.bert.run_classifier\nimport official.nlp.bert.tokenization\nimport official.nlp.data.classifier_data_lib\nimport official.nlp.modeling.losses\nimport official.nlp.modeling.models\nimport official.nlp.modeling.networks\n\nimport json\n\ndef decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.io.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, tf.int32)\n example[name] = t\n\n return example\n\ndef single_file_dataset(input_file, name_to_features, num_samples=None):\n \"\"\"Creates a single-file dataset to be passed for BERT custom training.\"\"\"\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if num_samples:\n d = d.take(num_samples)\n d = d.map(\n lambda record: decode_record(record, name_to_features),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n # When `input_file` is a path to a single file or a list\n # containing a single path, disable auto sharding so that\n # same input file is sent to all workers.\n if isinstance(input_file, str) or len(input_file) == 1:\n options = tf.data.Options()\n options.experimental_distribute.auto_shard_policy = (\n tf.data.experimental.AutoShardPolicy.OFF)\n d = d.with_options(options)\n return d\n\ndef create_classifier_dataset(file_path,\n seq_length,\n batch_size,\n is_training=True,\n input_pipeline_context=None,\n label_type=tf.int64,\n include_sample_weights=False,\n num_samples=None):\n \"\"\"Creates input dataset from (tf)records files for train/eval.\"\"\"\n name_to_features = {\n 'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64),\n 'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64),\n 'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64),\n 'label_ids': tf.io.FixedLenFeature([], label_type),\n }\n if include_sample_weights:\n name_to_features['weight'] = tf.io.FixedLenFeature([], tf.float32)\n dataset = single_file_dataset(file_path, name_to_features,\n num_samples=num_samples)\n\n # The dataset is always sharded by number of hosts.\n # num_input_pipelines is the number of hosts rather than number of cores.\n if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1:\n dataset = dataset.shard(input_pipeline_context.num_input_pipelines,\n input_pipeline_context.input_pipeline_id)\n\n def _select_data_from_record(record):\n x = {\n 'input_word_ids': record['input_ids'],\n 'input_mask': record['input_mask'],\n 'input_type_ids': record['segment_ids']\n }\n y = record['label_ids']\n if include_sample_weights:\n w = record['weight']\n return (x, y, w)\n return (x, y)\n\n if is_training:\n dataset = dataset.shuffle(100)\n dataset = dataset.repeat()\n\n dataset = dataset.map(\n _select_data_from_record,\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n dataset = dataset.batch(batch_size, drop_remainder=is_training)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n return dataset\n\n\nconfig_dict = {\n\"attention_probs_dropout_prob\": 0.1,\n\"directionality\": \"bidi\",\n\"hidden_act\": \"gelu\",\n\"hidden_dropout_prob\": 0.1,\n\"hidden_size\": 768,\n\"initializer_range\": 0.02,\n\"intermediate_size\": 3072,\n\"max_position_embeddings\": 512,\n\"num_attention_heads\": 12,\n\"num_hidden_layers\": 12,\n\"pooler_fc_size\": 768,\n\"pooler_num_attention_heads\": 12,\n\"pooler_num_fc_layers\": 3,\n\"pooler_size_per_head\": 128,\n\"pooler_type\": \"first_token_transform\",\n\"type_vocab_size\": 2,\n\"vocab_size\": 119547\n}\n\nbert_config = bert.configs.BertConfig.from_dict(config_dict)\n\nresolver = tf.distribute.cluster_resolver.TPUClusterResolver.connect(tpu='tpu-quickstart')\ntf.config.experimental_connect_to_cluster(resolver)\ntf.tpu.experimental.initialize_tpu_system(resolver)\nstrategy = tf.distribute.TPUStrategy(resolver)\nwith strategy.scope():\n max_seq_length = 128\n initializer = tf.keras.initializers.TruncatedNormal(\n stddev=bert_config.initializer_range)\n bert_encoder = bert.bert_models.get_transformer_encoder(\n bert_config, max_seq_length)\n\n input_word_ids = tf.keras.layers.Input(\n shape=(max_seq_length,), dtype=tf.int32, name='input_word_ids')\n input_mask = tf.keras.layers.Input(\n shape=(max_seq_length,), dtype=tf.int32, name='input_mask')\n input_type_ids = tf.keras.layers.Input(\n shape=(max_seq_length,), dtype=tf.int32, name='input_type_ids')\n bert_model = hub.KerasLayer(\"https://tfhub.dev/tensorflow/bert_multi_cased_L-12_H-768_A-12/2\", trainable=True)\n outputs= bert_model([input_word_ids, input_mask, input_type_ids])\n output1 = tf.keras.layers.Dropout(rate=bert_config.hidden_dropout_prob)(outputs[0])\n\n output1 = tf.keras.layers.Dense(3, kernel_initializer=initializer, name='output1')(output1)\n\n model = tf.keras.Model(inputs={\n 'input_word_ids': input_word_ids,\n 'input_mask': input_mask,\n 'input_type_ids': input_type_ids},outputs=[output1])\n\n # Set up epochs and steps\n epochs = 3\n batch_size = 32\n eval_batch_size = 32\n\n # get train_data_size from metadata\n train_data_size = 392702\n steps_per_epoch = int(train_data_size / batch_size)\n num_train_steps = steps_per_epoch * epochs\n warmup_steps = int(epochs * train_data_size * 0.1 / batch_size)\n\n # creates an optimizer with learning rate schedule\n optimizer = nlp.optimization.create_optimizer(\n 2e-5, num_train_steps=num_train_steps, num_warmup_steps=warmup_steps)\n\n xnli_training_dataset = create_classifier_dataset(\n \"gs://nts2020/xtereme/xnli/train.en.tfrecords\",\n 128,\n batch_size,\n is_training=True)\n\n xnli_eval_dataset = create_classifier_dataset(\n \"gs://nts2020/xtereme/xnli/eval.en.tfrecords\",\n 128,\n batch_size,\n is_training=False)\n\n metrics = [tf.keras.metrics.SparseCategoricalAccuracy('accuracy', dtype=tf.float32)]\n loss2 = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n\n model.compile(optimizer = optimizer, loss = [loss2], metrics = metrics)\n model.fit(xnli_training_dataset, batch_size = batch_size, epochs= 3, steps_per_epoch = 1000, validation_data=xnli_eval_dataset)\n\n" ]
[ [ "tensorflow.data.TFRecordDataset", "tensorflow.keras.layers.Input", "tensorflow.data.Options", "tensorflow.io.FixedLenFeature", "tensorflow.config.experimental_connect_to_cluster", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Dropout", "tensorflow.io.parse_single_example", "tensorflow.keras.Model", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.distribute.cluster_resolver.TPUClusterResolver.connect", "tensorflow.keras.metrics.SparseCategoricalAccuracy", "tensorflow.tpu.experimental.initialize_tpu_system", "tensorflow.keras.initializers.TruncatedNormal", "tensorflow.distribute.TPUStrategy", "tensorflow.cast" ] ]
XiaoyuanGuo/TEND_MedicalNoveltyDetection
[ "5c2144f0592373d814540cc0fa8e60197ea51756" ]
[ "train.py" ]
[ "import os\nimport time\nimport copy\nimport torch\nimport logging\nimport numpy as np\n\ndef load_ckpt(checkpoint_fpath, model, optimizer):\n checkpoint = torch.load(checkpoint_fpath)\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n return model\n\ndef save_checkpoint(state, filename):\n \"\"\"Save checkpoint if a new best is achieved\"\"\"\n print (\"=> Saving a new best\")\n torch.save(state, filename) # save checkpoint\n \n\ndef train_embnet(embnet, recon_loss, ae_optimizer, ae_num_epochs, ae_dataloaders, ae_dataset_sizes, dataset_name, device):\n logger = logging.getLogger()\n since = time.time()\n best_loss = np.inf\n trainloss = []\n valloss = []\n logger.info(\"---------Stage-1 AE training----------\")\n for epoch in range(ae_num_epochs):\n logger.info('Epoch {}/{}'.format(epoch, ae_num_epochs))\n \n for phase in ['train','val']:\n if phase == 'train':\n embnet.train()\n else:\n embnet.eval()\n \n running_loss= 0.0\n \n for idx, inputs in enumerate(ae_dataloaders[phase]):\n ae_optimizer.zero_grad()\n with torch.set_grad_enabled(phase =='train'):\n images, _ = inputs\n images = images.to(device) \n recon_imgs = embnet(images)\n \n loss = recon_loss(recon_imgs, images)\n \n if phase == 'train':\n loss.backward()\n ae_optimizer.step()\n running_loss += loss.item() * images.size(0)\n epoch_loss = running_loss / ae_dataset_sizes[phase]\n \n logger.info('{} Loss: {:.4f}'.format(phase, epoch_loss))\n if phase == 'train':\n trainloss.append(epoch_loss)\n else:\n valloss.append(epoch_loss)\n \n if not os.path.exists('./weights/'):\n os.mkdir('./weights/')\n if not os.path.exists('./weights/'+dataset_name):\n os.mkdir('./weights/'+dataset_name)\n \n if phase == 'val' and epoch_loss < best_loss:\n best_loss = epoch_loss\n best_model_wts = copy.deepcopy(embnet.state_dict())\n save_checkpoint(state={'epoch': epoch, \n 'model_state_dict': embnet.state_dict(),\n 'best_loss':best_loss,\n 'optimizer_state_dict': ae_optimizer.state_dict()},\n filename = './weights/'+dataset_name+\"/ae.pt\")\n print()\n time_elapsed = time.time()-since\n logger.info('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed//60, time_elapsed%60))\n logger.info('Best val loss:{:4f}'.format(best_loss))\n \n embnet.load_state_dict(best_model_wts)\n \n return embnet\n\n\ndef init_center_c(train_loader, dataset_name, net, device, eps=0.1,):\n \"\"\"Initialize hypersphere center c as the mean from an initial forward pass on the data.\"\"\"\n n_samples = 0\n \n if dataset_name == \"mnist\":\n tnum = 32\n elif dataset_name == \"cifar10\":\n tnum = 96*2*2\n elif dataset_name == \"ivc-filter\" or dataset_name == \"rsna\":\n tnum = 512\n else:\n tnum = 0\n c = torch.zeros(tnum, device=device)\n\n net.eval()\n with torch.no_grad():\n for idx, inputs in enumerate(train_loader):\n # get the inputs of the batch\n data, _ = inputs\n org_imgs, _ = data\n org_imgs = org_imgs.to(device)\n outputs = net.get_embedding(org_imgs)\n n_samples += outputs.shape[0]\n c += torch.sum(outputs, dim=0)\n\n c /= n_samples\n\n # If c_i is too close to 0, set to +-eps. Reason: a zero unit can be trivially matched with zero weights.\n c[(abs(c) < eps) & (c < 0)] = -eps\n c[(abs(c) < eps) & (c > 0)] = eps\n\n return c\n\ndef train_cls_model(cls_model, cls_loss, cls_optimizer, cls_num_epochs, warmup_epochs, cls_dataloaders, cls_dataset_sizes, dataset_name, device, R=150, c=None): \n assert dataset_name in ['mnist', 'cifar10', 'rsna', 'ivc-filter']\n logger = logging.getLogger()\n logger.info(\"---------Stage-2 TEND training----------\")\n since = time.time()\n best_loss = np.inf\n best_fpr = np.inf\n trainloss = []\n valloss = []\n\n for epoch in range(cls_num_epochs):\n logger.info('Epoch {}/{}'.format(epoch, cls_num_epochs))\n for phase in ['train','val']:\n if phase == 'train':\n cls_model.train()\n if epoch == warmup_epochs and c == None:\n c = init_center_c(cls_dataloaders[phase], dataset_name, cls_model, device)\n else:\n cls_model.eval()\n running_loss= 0.0\n \n for idx, inputs in enumerate(cls_dataloaders[phase]):\n \n cls_optimizer.zero_grad()\n with torch.set_grad_enabled(phase =='train'):\n if phase == 'train':\n data, targets = inputs\n org_imgs, tfm_imgs = data\n org_imgs, tfm_imgs = org_imgs.to(device), tfm_imgs.to(device)\n \n org_targets, tfm_targets = targets\n org_targets, tfm_targets = org_targets.to(device), tfm_targets.to(device)\n \n all_imgs = torch.cat([org_imgs, tfm_imgs], dim=0)\n all_targets = torch.cat([org_targets, tfm_targets], dim=0)\n all_targets = torch.unsqueeze(all_targets, dim=1).float()\n \n preds = cls_model(all_imgs)\n loss = cls_loss(preds, all_targets) \n \n if isinstance(cls_model,torch.nn.DataParallel):\n cls_model = cls_model.module\n\n if epoch >= warmup_epochs:\n outputs = cls_model.get_embedding(org_imgs)\n dist = torch.sum((outputs - c) ** 2, dim=1)\n loss += torch.mean(dist)\n \n toutputs = cls_model.get_embedding(tfm_imgs)\n tdist = torch.sum((toutputs - c) ** 2, dim=1)\n loss += torch.mean(torch.nn.functional.relu(R-tdist))\n \n loss.backward()\n cls_optimizer.step()\n else:\n data, targets = inputs\n all_imgs = data.to(device)\n all_targets = targets.to(device)\n all_targets = torch.unsqueeze(all_targets, dim=1).float()\n \n preds = cls_model(all_imgs)\n loss = cls_loss(preds, all_targets) \n if isinstance(cls_model,torch.nn.DataParallel):\n cls_model = cls_model.module\n if epoch >= warmup_epochs:\n nrm_indices = torch.nonzero((targets == 0))\n nrm_imgs = torch.index_select(data, 0, nrm_indices.squeeze(1))\n \n ood_indices = torch.nonzero((targets == 1))\n ood_imgs = torch.index_select(data, 0, ood_indices.squeeze(1))\n \n nrm_imgs, ood_imgs = nrm_imgs.to(device), ood_imgs.to(device)\n \n if nrm_indices.shape[0] != 0:\n nrm_outputs = cls_model.get_embedding(nrm_imgs)\n nrm_dist = torch.sum((nrm_outputs - c) ** 2, dim=1)\n loss += torch.mean(nrm_dist)\n \n if ood_indices.shape[0] != 0:\n ood_outputs = cls_model.get_embedding(ood_imgs)\n ood_dist = torch.sum((ood_outputs - c) ** 2, dim=1)\n loss += torch.mean(torch.nn.functional.relu(R-tdist))\n \n running_loss += loss.item() * all_imgs.shape[0]\n epoch_loss = running_loss / cls_dataset_sizes[phase]\n \n logger.info('{} Loss: {:.4f}'.format(phase, epoch_loss))\n if phase == 'train':\n trainloss.append(epoch_loss)\n elif phase == 'val' and epoch >= warmup_epochs: \n if epoch_loss < best_loss:\n best_loss = epoch_loss\n best_cls_model_wts = copy.deepcopy(cls_model.state_dict())\n save_checkpoint(state={'epoch': epoch, \n 'model_state_dict': cls_model.state_dict(),\n 'best_loss':best_loss,\n 'optimizer_state_dict': cls_optimizer.state_dict()},\n filename = './weights/'+dataset_name+\"/tend.pt\")\n print()\n time_elapsed = time.time()-since\n logger.info('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed//60, time_elapsed%60))\n logger.info('Best val loss:{:4f}'.format(best_loss))\n \n cls_model.load_state_dict(best_cls_model_wts)\n return cls_model, c\n" ]
[ [ "torch.zeros", "torch.nonzero", "torch.cat", "torch.save", "torch.no_grad", "torch.unsqueeze", "torch.nn.functional.relu", "torch.load", "torch.mean", "torch.set_grad_enabled", "torch.sum" ] ]
Eggiverse/FAE
[ "1b953ba6dfcced83e5929eeaa8f525ec4acde5ed" ]
[ "FAE/FeatureAnalysis/DimensionReduction.py" ]
[ "from abc import abstractmethod\nimport numpy as np\nimport os\nfrom copy import deepcopy\nimport pandas as pd\nfrom scipy.stats import pearsonr\n\nfrom FAE.DataContainer.DataContainer import DataContainer\nfrom sklearn.decomposition import PCA\n\nclass DimensionReduction:\n def __init__(self, model=None, number=0, is_transform=False):\n self.__model = model\n self.__remained_number = number\n self.__is_transform=is_transform\n\n def SetModel(self, model):\n self.__model = model\n\n def GetModel(self):\n return self.__model\n\n def SetRemainedNumber(self, number):\n self.__remained_number = number\n\n def GetRemainedNumber(self):\n return self.__remained_number\n\n def SetTransform(self, is_transform):\n self.__is_transform = is_transform\n\n def GetTransform(self):\n return self.__is_transform\n\n def GetDescription(self):\n text = \"Since the dimension of feature space is low enough, we did not used any dimension reduction method \" \\\n \"here to reduce the dimension of feature space. \"\n return text\n\nclass DimensionReductionByPCA(DimensionReduction):\n def __init__(self, number=0):\n super(DimensionReductionByPCA, self).__init__(number=number, is_transform=True)\n super(DimensionReductionByPCA, self).SetModel(PCA(n_components=super(DimensionReductionByPCA, self).GetRemainedNumber()))\n\n def GetName(self):\n return 'PCA'\n\n def SetRemainedNumber(self, number):\n super(DimensionReductionByPCA, self).SetRemainedNumber(number)\n super(DimensionReductionByPCA, self).SetModel(PCA(n_components=super(DimensionReductionByPCA, self).GetRemainedNumber()))\n\n def Transform(self, data_container):\n data = data_container.GetArray()\n if data.shape[1] != self.GetModel().components_.shape[1]:\n print('Data can not be transformed by existed PCA')\n sub_data = self.GetModel().transform(data)\n\n sub_feature_name = ['PCA_feature_' + str(index) for index in\n range(1, super(DimensionReductionByPCA, self).GetRemainedNumber() + 1)]\n\n new_data_container = deepcopy(data_container)\n new_data_container.SetArray(sub_data)\n new_data_container.SetFeatureName(sub_feature_name)\n new_data_container.UpdateFrameByData()\n\n return new_data_container\n\n def GetDescription(self):\n text = \"Since the dimension of feature space was high, we applied principle component analysis (PCA) on the feature matrix. \" \\\n \"The feature vector of the transformed feature matrix was independent to each other. \"\n return text\n\n\n def Run(self, data_container, store_folder=''):\n data = data_container.GetArray()\n self.SetRemainedNumber(np.min(data.shape))\n\n self.GetModel().fit(data)\n sub_data = self.GetModel().transform(data)\n\n sub_feature_name = ['PCA_feature_'+str(index) for index in range(1, super(DimensionReductionByPCA, self).GetRemainedNumber() + 1 )]\n\n new_data_container = deepcopy(data_container)\n new_data_container.SetArray(sub_data)\n new_data_container.SetFeatureName(sub_feature_name)\n new_data_container.UpdateFrameByData()\n if store_folder and os.path.isdir(store_folder):\n container_store_path = os.path.join(store_folder, 'pca_train_feature.csv')\n new_data_container.Save(container_store_path)\n\n pca_sort_path = os.path.join(store_folder, 'pca_sort.csv')\n df = pd.DataFrame(data=self.GetModel().components_, index=new_data_container.GetFeatureName(),\n columns=data_container.GetFeatureName())\n df.to_csv(pca_sort_path)\n\n return new_data_container\n\nclass DimensionReductionByPCC(DimensionReduction):\n def __init__(self, threshold=0.999):\n super(DimensionReductionByPCC, self).__init__()\n self.__threshold = threshold\n self.__selected_index = []\n\n def GetName(self):\n return 'PCC'\n\n def __PCCSimilarity(self, data1, data2):\n return np.abs(pearsonr(data1, data2)[0])\n\n def GetSelectedFeatureIndex(self, data_container):\n data = data_container.GetArray()\n data /= np.linalg.norm(data, ord=2, axis=0)\n\n for feature_index in range(data.shape[1]):\n is_similar = False\n for save_index in self.__selected_index:\n if self.__PCCSimilarity(data[:, save_index], data[:, feature_index]) > self.__threshold:\n is_similar = True\n break\n if not is_similar:\n self.__selected_index.append(feature_index)\n\n def Transform(self, data_container):\n new_data = data_container.GetArray()[:, self.__selected_index]\n new_feature = [data_container.GetFeatureName()[t] for t in self.__selected_index]\n\n new_data_container = deepcopy(data_container)\n new_data_container.SetArray(new_data)\n new_data_container.SetFeatureName(new_feature)\n new_data_container.UpdateFrameByData()\n\n return new_data_container\n\n def Run(self, data_container, store_folder=''):\n self.GetSelectedFeatureIndex(data_container)\n\n new_data = data_container.GetArray()[:, self.__selected_index]\n new_feature = [data_container.GetFeatureName()[t] for t in self.__selected_index]\n\n new_data_container = deepcopy(data_container)\n new_data_container.SetArray(new_data)\n new_data_container.SetFeatureName(new_feature)\n new_data_container.UpdateFrameByData()\n\n if store_folder and os.path.isdir(store_folder):\n container_store_path = os.path.join(store_folder, 'PCC_feature.csv')\n new_data_container.Save(container_store_path)\n\n pca_sort_path = os.path.join(store_folder, 'PCC_sort.csv')\n df = pd.DataFrame(data=new_feature)\n df.to_csv(pca_sort_path)\n\n return new_data_container\n\n def GetDescription(self):\n text = \"Since the dimension of feature space was high, we compared the similarity of each feature pair. \" \\\n \"If the PCC value of the feature pair was larger than 0.86, we removed one of them. After this \" \\\n \"process, the dimension of the feature space was reduced and each feature was independent to each other. \"\n return text" ]
[ [ "pandas.DataFrame", "numpy.linalg.norm", "numpy.min", "scipy.stats.pearsonr" ] ]
Metri-Co/AnfisTensorflow2.0
[ "574f643d38119ee4cc38e3b1d6908eddc484f99d" ]
[ "Datagenerator/markov_process.py" ]
[ "\"\"\"\nThis module simulates a Markov Regime-Switching process\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass MRS:\n def __init__(self,\n P = np.array([ [0.989, 0.01, 0.001], ## Transition Matrix\n [0.03, 0.969, 0.001], \n [0.00, 0.03, 0.97] ]),\n \n mu_params = np.array( [0.07,0.0,-0.55]) ,\n \n sigma_params = np.array( [.1,.25,.60]),\n \n AR_params = np.array([ [0.4, -0.2],\n [0.5, -0.3],\n [0.8, -.4]]) \n ):\n \n \"\"\"\n P = Transition Matrix\n mu_params = Expected return for each state\n sigma_params = Expected volatility for each state\n AR_params = Autoregressive parameters for each state\n \"\"\"\n self.P, self.mu_params, self.sigma_params, self.AR_params = P, mu_params, sigma_params, AR_params \n \n check = P.shape[0] == P.shape[1] == len(mu_params) == len(sigma_params) == AR_params.shape[0]\n if check == False:\n raise ValueError('Dimensions of parameters does not fit!')\n \n self.markovchain = []\n self.k = len(mu_params)\n self.r = None\n self.y = None\n \n def roulettewheel(self, prob):\n \"Update state\"\n cp = np.cumsum(prob) / np.sum(prob)\n u = np.random.rand(1)\n i = 0\n while u > cp[i]:\n i += 1\n return i\n \n def sim(self, N, state_0 = 0):\n \"Simulate a Markov Regime Switching time series of length N\"\n dt = 1/250\n e = np.random.randn(N) # sim shocks\n state = state_0\n e[0:2] = e[0:2] * self.sigma_params[state] * np.sqrt(dt)\n self.r = e.copy()\n self.r[0:2] = self.r[0:2] + self.mu_params[state] * dt\n self.markovchain = np.repeat(state,2).astype(int)\n \n # Simulate:\n for t in np.arange(2, N):\n # determine state\n state = self.roulettewheel(self.P[state])\n self.markovchain = np.append(self.markovchain, state)\n # calc returns for given state\n e[t] = e[t] * self.sigma_params[state] * np.sqrt(dt)\n mu = self.mu_params[state] * dt #+ e[t]\n self.r[t] = mu + e[t] + self.AR_params[state,0]*self.r[t-1] + self.AR_params[state,1]*self.r[t-2]\n self.y = 10*np.exp(np.cumsum(self.r))\n \n def plot(self, colored=True):\n \"Plot generated data\"\n plt.style.use('ggplot')\n r = self.r\n mc = self.markovchain\n y = self.y\n fig, axes = plt.subplots(2, figsize=(10,6))\n \n ax = axes[0]\n ax.plot(r, 'k', linewidth = .7)\n ax.margins(x=0)\n if colored == True:\n ax.fill_between(np.arange(len(r)), min(r), max(r), where=mc>=0,facecolor='green', alpha = .3)\n ax.fill_between(np.arange(len(r)), min(r), max(r), where=mc>=1, facecolor='yellow', alpha = .3)\n ax.fill_between(np.arange(len(r)), min(r), max(r), where = mc>=2, facecolor='red', alpha = .3)\n ax.set(title='Simulated Returns')\n \n ax = axes[1]\n ax.plot(y, 'k', linewidth = .7)\n ax.margins(x=0)\n if colored == True:\n ax.fill_between(np.arange(len(r)), min(y), max(y), where = mc>=0, \n facecolor='green', alpha = .3)\n ax.fill_between(np.arange(len(r)), min(y), max(y), where = mc>=1, \n facecolor='yellow', alpha = .3)\n ax.fill_between(np.arange(len(r)), min(y), max(y), where = mc>=2, \n facecolor='red', alpha = .3)\n ax.set(title='Simulated Prices')\n ax.set_yscale('log')\n plt.show()\n\n\n\n" ]
[ [ "numpy.repeat", "numpy.append", "numpy.array", "numpy.random.rand", "numpy.sum", "numpy.random.randn", "matplotlib.pyplot.subplots", "numpy.arange", "matplotlib.pyplot.style.use", "numpy.cumsum", "numpy.sqrt", "matplotlib.pyplot.show" ] ]
baby636/devdocs
[ "406a0989ab7a31f11e0b0da3e50503c0ad6193cd" ]
[ "reference/random/generated/numpy-random-Generator-standard_t-1.py" ]
[ "# From Dalgaard page 83 [Rb7c952f3992e-1]_, suppose the daily energy intake for 11\n# women in kilojoules (kJ) is:\n\nintake = np.array([5260., 5470, 5640, 6180, 6390, 6515, 6805, 7515, \\\n 7515, 8230, 8770])\n\n# Does their energy intake deviate systematically from the recommended\n# value of 7725 kJ? Our null hypothesis will be the absence of deviation,\n# and the alternate hypothesis will be the presence of an effect that could be\n# either positive or negative, hence making our test 2-tailed.\n\n# Because we are estimating the mean and we have N=11 values in our sample,\n# we have N-1=10 degrees of freedom. We set our significance level to 95% and\n# compute the t statistic using the empirical mean and empirical standard\n# deviation of our intake. We use a ddof of 1 to base the computation of our\n# empirical standard deviation on an unbiased estimate of the variance (note:\n# the final estimate is not unbiased due to the concave nature of the square\n# root).\n\nnp.mean(intake)\n# 6753.636363636364\nintake.std(ddof=1)\n# 1142.1232221373727\nt = (np.mean(intake)-7725)/(intake.std(ddof=1)/np.sqrt(len(intake)))\nt\n# -2.8207540608310198\n\n# We draw 1000000 samples from Student's t distribution with the adequate\n# degrees of freedom.\n\nimport matplotlib.pyplot as plt\ns = np.random.default_rng().standard_t(10, size=1000000)\nh = plt.hist(s, bins=100, density=True)\n\n# Does our t statistic land in one of the two critical regions found at\n# both tails of the distribution?\n\nnp.sum(np.abs(t) < np.abs(s)) / float(len(s))\n# 0.018318 #random < 0.05, statistic is in critical region\n\n# The probability value for this 2-tailed test is about 1.83%, which is\n# lower than the 5% pre-determined significance threshold.\n\n# Therefore, the probability of observing values as extreme as our intake\n# conditionally on the null hypothesis being true is too low, and we reject\n# the null hypothesis of no deviation.\n" ]
[ [ "matplotlib.pyplot.hist" ] ]
tomresearcher/roberta_fine_tuning
[ "411029dbe938c117494bbab74714cc28b8e8dd3d" ]
[ "src/scripts/predict_stance_model.py" ]
[ "import argparse\nfrom common.loadData import load_data\nfrom model.roberta.roberta_model import predict_task\nimport pandas as pd\nfrom common.score import scorePredict\n\n\ndef main(parser):\n args = parser.parse_args()\n test_set = args.test_set\n use_cuda = args.use_cuda\n model_dir_1_stage = args.model_dir_1_stage\n model_dir_2_stage = args.model_dir_2_stage\n features_1_stage = args.features_1_stage\n\n\n label_map = {'unrelated': 3, 'agree': 0, 'disagree': 1, 'discuss': 2}\n df_test = load_data(test_set, features_1_stage, label_map, 'test', '')\n\n if model_dir_1_stage != '':\n y_predict_1 = predict_task(df_test, use_cuda, model_dir_1_stage, len(features_1_stage))\n df_result = df_test\n df_result['predict'] = y_predict_1\n if model_dir_2_stage != '':\n df_y_1 = pd.DataFrame(y_predict_1, columns=['predict'])\n df_y_1_0 = df_y_1[df_y_1['predict'] == 0]\n df_y_1_1 = df_y_1[df_y_1['predict'] == 1]\n\n p_test_1 = df_test.loc[df_y_1_0.index]\n p_test_1['predict'] = df_y_1_0['predict'].values\n p_test_1['predict'] = p_test_1['predict'].replace(0, 3)\n\n df_test_2 = df_test.loc[df_y_1_1.index]\n y_predict_2 = predict_task(df_test_2, use_cuda, model_dir_2_stage, 0)\n df_test_2['predict'] = y_predict_2\n df_result = pd.concat([p_test_1, df_test_2], axis=0)\n\n labels = list(df_test['labels'].unique())\n labels.sort()\n result, f1 = scorePredict(df_result['predict'].values, df_result['labels'].values, labels)\n print(result)\n\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n ## Required parameters\n\n parser.add_argument(\"--use_cuda\",\n default=False,\n action='store_true',\n help=\"This parameter should be True if cuda is present.\")\n\n parser.add_argument(\"--test_set\",\n default=\"/data/FNC_PLM_originDataset_test_all_summary_v2.json\",\n type=str,\n help=\"This parameter is the relative dir of test set.\")\n\n parser.add_argument(\"--model_dir_1_stage\",\n default=\"\",\n type=str,\n help=\"This parameter is the relative dir of the model first stage to predict.\")\n\n parser.add_argument(\"--model_dir_2_stage\",\n default=\"\",\n type=str,\n help=\"This parameter is the relative dir of the model second stage to predict.\")\n\n parser.add_argument(\"--features_1_stage\",\n default=[],\n nargs='+',\n help=\"This parameter is features of model first stage for predict.\")\n\n main(parser)" ]
[ [ "pandas.DataFrame", "pandas.concat" ] ]
QAMCAS/Clustering_Vehicle_data
[ "e4fc977d6c31608e3dad7e945d49b5ccc8433701" ]
[ "Clustering_approach/Data_Clustering.py" ]
[ "import traceback\nimport weka.core.jvm as jvm\nimport os\nimport json\n\nfrom weka.core.dataset import Instances\nfrom weka.clusterers import Clusterer\nfrom weka.clusterers import ClusterEvaluation\nfrom weka.core.converters import Loader\nfrom weka.filters import Filter, MultiFilter\nimport weka.plot as plot\n\nif plot.matplotlib_available:\n import matplotlib.pyplot as plt\nfrom matplotlib import pyplot\nimport logging\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# logging setup\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n \"\"\"\n The available locations are \"Gaimersheim\", \"Munich\" and \"Ingolstadt\"\n \"\"\"\n\n location = \"Ingolstadt\"\n loader = Loader(\"weka.core.converters.ArffLoader\")\n path_to_clean_data = os.path.join(\"Clustering_input_data/\", location, \"test_data_cleaned.arff\")\n path_to_filtered_data = os.path.join(\"Clustering_input_data/\", location, \"test_data_Filtered.arff\")\n clean_data = loader.load_file(path_to_clean_data)\n input_data = loader.load_file(path_to_filtered_data)\n\n List_attributes = []\n for att in range(0, clean_data.num_attributes):\n List_attributes.append(clean_data.attribute(att))\n print(List_attributes)\n\n List_of_clusterers = ['SimpleKMeans', 'EM', 'Canopy',\n 'SelfOrganizingMap'] # for 'SelfOrganizingMap' you need to remove nb_clusters\n range_n_clusters = [4, 5, 6, 7, 8]\n\n for nb_clusters in range_n_clusters:\n\n for algorithm in List_of_clusterers:\n\n if algorithm != 'SelfOrganizingMap':\n clusterer = Clusterer(classname=\"weka.clusterers.\" + str(algorithm), options=[\"-N\", str(nb_clusters)])\n clusterer.build_clusterer(input_data)\n print(clusterer)\n evaluation = ClusterEvaluation()\n evaluation.set_model(clusterer)\n evaluation.test_model(input_data)\n print(\"eva;\" + str(evaluation.cluster_results))\n print(\"# clusters: \" + str(evaluation.num_clusters))\n print(\"log likelihood: \" + str(evaluation.log_likelihood))\n print(\"cluster assignments:\\n\" + str(evaluation.cluster_assignments))\n PCORR_file_name = \"PCORR_\" + str(clusterer.classname) + \"_K_\" + str(nb_clusters)\n AVG_Percentage_Pearson_correlation(location, input_data, clean_data, clusterer, evaluation,\n PCORR_file_name)\n\n if algorithm == 'SelfOrganizingMap' and nb_clusters == 4:\n clusterer = Clusterer(classname=\"weka.clusterers.\" + str(algorithm))\n clusterer.build_clusterer(input_data)\n print(clusterer)\n evaluation = ClusterEvaluation()\n evaluation.set_model(clusterer)\n evaluation.test_model(input_data)\n print(\"eva;\" + str(evaluation.cluster_results))\n print(\"# clusters: \" + str(evaluation.num_clusters))\n print(\"log likelihood: \" + str(evaluation.log_likelihood))\n print(\"cluster assignments:\\n\" + str(evaluation.cluster_assignments))\n PCORR_file_name = \"PCORR_\" + str(clusterer.classname) + \"_K_\" + str(nb_clusters)\n AVG_Percentage_Pearson_correlation(location, input_data, clean_data, clusterer, evaluation,\n PCORR_file_name)\n\n # Using autoencoder\n print(\"**** Clustering using auto-encoder ****\")\n Autoencoder = Filter(classname=\"weka.filters.unsupervised.attribute.MLPAutoencoder\")\n Autoencoder.inputformat(input_data)\n filtered = Autoencoder.filter(input_data) # data filtered with autoencoder\n clusterer = Clusterer(classname=\"weka.clusterers.SimpleKMeans\", options=[\"-N\", str(nb_clusters)])\n clusterer.build_clusterer(filtered)\n print(clusterer)\n evaluation = ClusterEvaluation()\n evaluation.set_model(clusterer)\n evaluation.test_model(filtered)\n\n PCORR_file_name = \"PCORR_\" + str(clusterer.classname) + \"withAutoencoder_K_\" + str(nb_clusters)\n AVG_Percentage_Pearson_correlation(location, input_data, clean_data, clusterer, evaluation, PCORR_file_name)\n\n\n# write extracted driving scenarios in json files\ndef create_JSON_files(location, data_filtered, clusterer, evaluation):\n cluster_data_path = 'Results/driving_scenarios/'\n for cluster in range(clusterer.number_of_clusters):\n\n with open(os.path.join(cluster_data_path, location, 'json_files',\n str(clusterer.classname) + '_scenario_' + str(cluster) + '.json'),\n 'w') as f:\n\n episodes_list = []\n for att in data_filtered.attribute_names():\n val_dict[att] = {'values': []}\n\n for inst in range(0, len(evaluation.cluster_assignments) - 1):\n if evaluation.cluster_assignments[inst] == cluster:\n for i in range(len(data_filtered.attribute_names())):\n att_name = data_filtered.attribute_names()[i]\n inst_att_value = data_filtered.get_instance(inst).get_value(i)\n val_dict[att_name]['values'].append(inst_att_value)\n if evaluation.cluster_assignments[inst + 1] != cluster and len(\n val_dict['timestamps']['values']) >= 150:\n episodes_list.append(val_dict)\n val_dict = {}\n for att in data_filtered.attribute_names():\n val_dict[att] = {'values': []}\n\n ep_dict = {'Episodes': []}\n for j in range(0, len(episodes_list)):\n ep_dict['Episodes'].append({'id_{}'.format(j): {'Sensors': episodes_list[j]}})\n\n json.dump(ep_dict, f, sort_keys=True, indent=4)\n\n print(\"Writing json files for each cluster is done\")\n\n\n# compute percentage of Pearson correlation in each cluster save PCorr results in a txt file\ndef AVG_Percentage_Pearson_correlation(location, data_filtered, clean_data, clusterer, evaluation, PCORR_file_name):\n file_correlation = open(\n \"Results/\" + location + \"/\" + PCORR_file_name + \".txt\",\n \"w\")\n delimiter = \";\"\n file_correlation.write(\n \"Cluster\" + delimiter + \"Nb of total episodes\" + delimiter + \"Percentage of Pearson correlation\")\n file_correlation.write(\"\\n\")\n sum_pcorr_cluster = 0\n for cluster in range(0, clusterer.number_of_clusters):\n fig = plt.figure(figsize=(8, 6))\n Sum_pcorr = 0\n file_correlation.write(str(cluster) + delimiter)\n for (att, num) in zip(range(0, data_filtered.num_attributes), range(1, 5)):\n List_episodes = []\n List_timestamps = []\n val_list = []\n for inst in range(len(evaluation.cluster_assignments) - 1):\n if evaluation.cluster_assignments[inst] == cluster:\n # print(\"ok3\")\n val_list.append(data_filtered.get_instance(inst).get_value(att))\n List_timestamps.append(clean_data.get_instance(inst).get_value(att))\n if evaluation.cluster_assignments[inst + 1] != cluster and len(val_list) > 20:\n List_episodes.append(val_list)\n val_list = []\n print(\" number of episodes in cluster \", cluster, \"for \", str(data_filtered.attribute(att)), \" \",\n len(List_episodes))\n\n if len(List_episodes) > 0:\n df = pd.DataFrame(List_episodes)\n df_t = df.T\n df_t.dropna()\n ax = fig.add_subplot(2, 2, num)\n fig.suptitle(clusterer.classname + \"/Cluster \" + str(cluster))\n ax.set_title(str(data_filtered.attribute_names()[att]))\n ax.plot(df_t)\n\n corr = df_t.corr(method='pearson', min_periods=1)\n List_i = []\n for i in range(0, len(corr)):\n nbr_correlated_ep_perLine = 0\n for j in range(0, len(corr[i])):\n if i < j:\n if corr[i][j] >= 0.5:\n if j not in List_i:\n List_i.append(j)\n nbr_correlated_ep_perLine += 1\n\n total_correlated_ep_per_att = len(List_i)\n print(\"total of correlated episodes per attribute\", total_correlated_ep_per_att)\n pcorr_per_att = (total_correlated_ep_per_att / len(List_episodes)) * 100\n pcorr_per_att = round(pcorr_per_att, 2)\n print(\"percentage of pearson correlation_per_att\", pcorr_per_att)\n Sum_pcorr = Sum_pcorr + pcorr_per_att\n\n avg_pcorr_cluster = round(Sum_pcorr / data_filtered.num_attributes, 2)\n sum_pcorr_cluster = sum_pcorr_cluster + avg_pcorr_cluster\n file_correlation.write(\n str(len(List_episodes)) + delimiter + str(avg_pcorr_cluster))\n file_correlation.write(\"\\n\")\n plt.tight_layout\n plt.show()\n avg_pcorr_final = round(sum_pcorr_cluster / clusterer.number_of_clusters, 2)\n\n print(\"Episodes evaluation is done\")\n file_correlation.write(\"\\n\")\n file_correlation.write(str(avg_pcorr_final))\n file_correlation.close()\n\n\n# extract episodes\ndef ExtractEpisodes(data_filtered, cluster, evaluation, att):\n List_episodes = []\n val_list = []\n for inst in range(len(evaluation.cluster_assignments) - 1):\n if evaluation.cluster_assignments[inst] == cluster:\n val_list.append(data_filtered.get_instance(inst).get_value(att))\n if evaluation.cluster_assignments[inst + 1] != cluster and len(val_list) > 20:\n List_episodes.append(val_list)\n val_list = []\n return List_episodes\n\n\n# plots graphs of extracted episodes, probability distribution and Pearson correlation matrix for each attribute and\n# save their files\ndef Plot_Episodes_ProbabilityDist_PearsonCorrMatrix(location, input_data, clean_data, clusterer, evaluation):\n for cluster in range(0, clusterer.number_of_clusters):\n # np.os.mkdir(\"Results/\" + location + \"/Cluster_\" + str(cluster))\n path_to_file = \"Results/\" + location + \"/Cluster_\" + str(cluster) + \"/\"\n\n for (att, num) in zip(range(0, input_data.num_attributes), range(1, 5)):\n List_episodes = []\n List_timestamps = []\n val_list = []\n for inst in range(len(evaluation.cluster_assignments) - 1):\n if evaluation.cluster_assignments[inst] == cluster:\n # print(\"ok3\")\n val_list.append(input_data.get_instance(inst).get_value(att))\n List_timestamps.append(clean_data.get_instance(inst).get_value(att))\n if evaluation.cluster_assignments[inst + 1] != cluster and len(val_list) > 20:\n List_episodes.append(val_list)\n val_list = []\n\n if len(List_episodes) > 0:\n df = pd.DataFrame(List_episodes)\n df_t = df.T\n df_t.dropna()\n\n # Plot extracted episodes for each attribute\n plt.figure(figsize=(10, 20))\n df_t.plot(legend=True)\n plt.title(\"Episodes \" + str(input_data.attribute_names()[att]) + \" in cluster \" + str(cluster))\n plt.savefig(path_to_file + \"Episodes \" + str(input_data.attribute(att)) + \"_cluster_\" + str(cluster))\n plt.show()\n\n # Plot episodes probability distribution for each attribute\n df_t.plot.hist(density=True, legend=False, grid=False, bins=20, rwidth=0.9)\n plt.xlabel('Values')\n plt.ylabel('Probability density distribution')\n plt.grid(axis='x', alpha=0.75)\n plt.title(str(input_data.attribute_names()[att]) + \" in cluster \" + str(cluster))\n plt.savefig(\n path_to_file + \"Probability_Distribution_\" + str(input_data.attribute(att)) + \"_cluster_\" + str(\n cluster))\n plt.show()\n\n # Plot Pearson coefficient matrix for each attribute\n corr = df_t.corr(method='pearson', min_periods=1)\n mask = np.zeros_like(corr, dtype='bool')\n f, ax = pyplot.subplots(figsize=(12, 10))\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n sns.heatmap(corr, mask=mask, cmap=cmap,\n square=True, annot=True, linewidths=.5, ax=ax)\n\n pyplot.title(\"Pearson Correlation matrix of the episodes \" + str(\n input_data.attribute_names()[att]) + \" in cluster \" + str(cluster))\n plt.savefig(path_to_file + \"Pearson Correlation matrix of the episodes_\" + str(\n input_data.attribute(att)) + \"_cluster_\" + str(cluster))\n\n\ndef Create_arff_outputfiles(data_cleaned, location, clusterer, evaluation, List_attributes):\n # Writing extracted driving scenarios in arff file for weka use\n for cluster in range(0, clusterer.number_of_clusters):\n dataset_scenario = Instances.create_instances(\"scenario_\" + str(cluster), List_attributes, 0)\n for att in range(0, data_cleaned.num_attributes):\n timestamps_list = []\n val_list = []\n stop = False\n for inst in range(len(evaluation.cluster_assignments)):\n if evaluation.cluster_assignments[inst] == cluster and stop == False:\n val_list.append(data_cleaned.get_instance(inst).get_value(att))\n timestamps_list.append(data_cleaned.get_instance(inst).get_value(0))\n dataset_scenario.add_instance(data_cleaned.get_instance(inst))\n\n if evaluation.cluster_assignments[inst + 1] != cluster and len(val_list) > 20:\n stop = True\n\n file_scenario = open(\n \"Results/driving_scenarios/\" + location + \"/arff_files/\" + str(clusterer.classname) + \"_Scenario_\" + str(\n cluster) + \".arff\", \"w\")\n file_scenario.write(str(dataset_scenario))\n\n file_scenario = open(\n \"Results/dataset_scenarios/\" + location + \"/arff_files/\" + str(clusterer.classname) + \"_Scenario_\" + str(\n cluster) + \".arff\", \"w\")\n file_scenario.write(str(dataset_scenario))\n\n file_scenario.close()\n print(\"Writing results in txt file and arff file is done\")\n\n\nif __name__ == \"__main__\":\n try:\n jvm.start(system_cp=True, packages=True, max_heap_size=\"512m\")\n main()\n except Exception as e:\n print(traceback.format_exc())\n finally:\n jvm.stop()\n" ]
[ [ "numpy.zeros_like", "pandas.DataFrame", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.grid", "matplotlib.pyplot.subplots", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
reinforcementdriving/JS3C-Net
[ "40326fdbebc688c10a6247f46ed08463de0db206" ]
[ "lib/sparseconvnet/inputBatch.py" ]
[ "# Copyright 2016-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom .metadata import Metadata\nfrom .utils import toLongTensor\nfrom .sparseConvNetTensor import SparseConvNetTensor\n\n\nclass InputBatch(SparseConvNetTensor):\n def __init__(self, dimension, spatial_size):\n SparseConvNetTensor.__init__(self, None, None, spatial_size)\n self.dimension = dimension\n self.spatial_size = toLongTensor(dimension, spatial_size)\n self.features = torch.FloatTensor()\n self.metadata = Metadata(dimension)\n self.metadata.setInputSpatialSize(self.spatial_size)\n\n def add_sample(self):\n self.metadata.batchAddSample()\n\n def set_location(self, location, vector, overwrite=False):\n assert location.min() >= 0 and (self.spatial_size - location).min() > 0\n self.metadata.setInputSpatialLocation(\n self.features, location.contiguous(), vector.contiguous(), overwrite)\n\n def set_location_(self, location, vector, overwrite=False):\n self.metadata.setInputSpatialLocation(\n self.features, location, vector, overwrite)\n\n def set_locations(self, locations, vectors, overwrite=False):\n \"\"\"\n To set n locations in d dimensions, locations can be\n - A size (n,d) LongTensor, giving d-dimensional coordinates -- points\n are added to the current sample, or\n - A size (n,d+1) LongTensor; the extra column specifies the sample\n number (within the minibatch of samples).\n\n Example with d==3 and n==2:\n Set\n locations = LongTensor([[1,2,3],\n [4,5,6]])\n to add points to the current sample at (1,2,3) and (4,5,6).\n Set\n locations = LongTensor([[1,2,3,7],\n [4,5,6,9]])\n to add point (1,2,3) to sample 7, and (4,5,6) to sample 9 (0-indexed).\n\n \"\"\"\n l = locations[:, :self.dimension]\n assert l.min() >= 0 and (self.spatial_size.expand_as(l) - l).min() > 0\n self.metadata.setInputSpatialLocations(\n self.features, locations.contiguous(), vectors.contiguous(), overwrite)\n\n def set_locations_(self, locations, vectors, overwrite=False):\n self.metadata.setInputSpatialLocations(\n self.features, locations, vectors, overwrite)\n\n def add_sample_from_tensor(self, tensor, offset, threshold=0):\n self.metadata.addSampleFromThresholdedTensor(\n self.features,\n tensor,\n offset,\n self.spatial_size,\n threshold)\n\n def precompute_metadata(self, size):\n \"\"\"\n Optional.\n Allows precomputation of 'rulebooks' in data loading threads.\n Use size == 2 if downsizing with size-2 stride-2 operations\n Use size == 3 if downsizing with size-3 stride-2 operations\n \"\"\"\n if size == 2:\n self.metadata.generateRuleBooks2s2()\n if size == 3 :\n self.metadata.generateRuleBooks3s2()\n\n \"Deprecated method names.\"\n def addSample(self):\n self.metadata.batchAddSample()\n\n def setLocation(self, location, vector, overwrite=False):\n assert location.min() >= 0 and (self.spatial_size - location).min() > 0\n self.metadata.setInputSpatialLocation(\n self.features, location, vector, overwrite)\n\n def setLocation_(self, location, vector, overwrite=False):\n self.metadata.setInputSpatialLocation(\n self.features, location, vector, overwrite)\n\n def setLocations(self, locations, vectors, overwrite=False):\n l = locations[:, :self.dimension]\n assert l.min() >= 0 and (self.spatial_size.expand_as(l) - l).min() > 0\n self.metadata.setInputSpatialLocations(\n self.features, locations, vectors, overwrite)\n\n def setLocations_(self, locations, vector, overwrite=False):\n self.metadata.setInputSpatialLocations(\n self.features, locations, vector, overwrite)\n\n def addSampleFromTensor(self, tensor, offset, threshold=0):\n self.metadata.addSampleFromThresholdedTensor(\n self.features,\n tensor,\n offset,\n self.spatial_size,\n threshold)\n\n def precomputeMetadata(self, size):\n \"\"\"\n Optional.\n Allows precomputation of 'rulebooks' in data loading threads.\n Use size == 2 if downsizing with size-2 stride-2 operations\n Use size == 3 if downsizing with size-3 stride-2 operations\n \"\"\"\n if size == 2:\n self.metadata.generateRuleBooks2s2()\n if size == 3 :\n self.metadata.generateRuleBooks3s2()\n" ]
[ [ "torch.FloatTensor" ] ]
zer01ike/HoleFilling
[ "b1591485f37975c0793839880dbb6185a132d3f9" ]
[ "Refine/TextureRefine.py" ]
[ "import numpy as np\nimport cv2\nimport math\n\nclass TextureRefine():\n def __init__(self,TextureImage,IniatialImage,DepthImage,HoleList):\n self.TextureImage = cv2.imread(TextureImage)\n self.InitialImage = cv2.imread(IniatialImage)\n self.DepthImage = cv2.imread(DepthImage,0)\n self.HoleList = HoleList\n self.height,self.width,self.channels= self.TextureImage.shape\n\n def setGradientMap(self):\n\n sobelx = cv2.Sobel(cv2.cvtColor(self.TextureImage,cv2.COLOR_RGB2GRAY),cv2.CV_64F,1,0,ksize=-1)\n sobely = cv2.Sobel(cv2.cvtColor(self.TextureImage,cv2.COLOR_RGB2GRAY),cv2.CV_64F,0,1,ksize=-1)\n sobelx_2 = 0\n sobely_2 = 0\n\n sobelx = cv2.pow(sobelx,2)\n sobely = cv2.pow(sobely,2)\n\n sum = np.add(sobelx,sobely)\n self.TextureGradientMap = np.sqrt(sum)\n #print(self.TextureGradientMap)\n\n sobelx = cv2.Sobel(cv2.cvtColor(self.InitialImage,cv2.COLOR_RGB2GRAY), cv2.CV_64F, 1, 0, ksize=-1)\n sobely = cv2.Sobel(cv2.cvtColor(self.InitialImage,cv2.COLOR_RGB2GRAY), cv2.CV_64F, 0, 1, ksize=-1)\n sum = cv2.pow(sobelx, 2) + cv2.pow(sobely, 2)\n self.InitialGradientMap = np.sqrt(sum)\n\n\n def LossFunction(self,patchsize,c_i,c_j,x_i,x_j):\n #c_center comes from the HoleList\n #x_center comes from the the 5* patchsize around the c_center\n\n #k = pathcsize * patchsize\n #Ko = intialed number\n x = self.get_kernel_content(x_i, x_j,patchsize,self.TextureImage)\n c = self.get_kernel_content(c_i, c_j,patchsize,self.TextureImage)\n\n x_initial = self.get_kernel_content(x_i,x_j,patchsize,self.InitialImage)\n c_initial = self.get_kernel_content(c_i,c_j,patchsize,self.InitialImage)\n\n gx = self.get_gradient_content(x_i,x_j,patchsize,self.TextureGradientMap)\n gc = self.get_gradient_content(c_i,c_j,patchsize,self.TextureGradientMap)\n\n gx_initial = self.get_gradient_content(x_i,x_j,patchsize,self.InitialGradientMap)\n gc_initial = self.get_gradient_content(c_i,c_j,patchsize,self.InitialGradientMap)\n #part 1\n\n E = 0\n P1 = 0\n for i in range(0,patchsize*patchsize):\n P1 += np.square(x[i]-c[i])\n #part 2\n P2 = 0\n for i in range(0,patchsize*patchsize):\n # if it's initial\n P2 += np.square(x_initial[i] - c_initial[i])\n pass\n #part 3\n P3 = 0\n for i in range(0,patchsize*patchsize):\n P3 += np.square(gx[i] - gc[i])\n\n P4 = 0\n for i in range(0,patchsize*patchsize):\n P4 += np.square(gx_initial[i] - gc_initial[i])\n\n E = P1+0.6*P2 +0.6*P3+0.6*0.6*P4\n\n return E\n\n def findPatchLoc(self, stepsize, x,y):\n E_List = []\n\n # get the region of X\n for x_s in range ((0 if int(x-2.5*stepsize)<0 else int(x-2.5*stepsize)) , (self.height if int(x+2.5*stepsize)>self.height else int(x+2.5*stepsize))):\n for y_s in range((0 if int(y-2.5*stepsize)<0 else int(y-2.5*stepsize)),(self.width if int(y+2.5*stepsize)>self.width else int(y+2.5*stepsize))):\n if x_s ==x and y_s == y :continue\n if self.DepthImage[x_s,y_s] >= self.DepthImage[x,y]+15 : continue\n if [x_s,y_s] in self.HoleList : continue\n E_temp = self.LossFunction(stepsize,x,y,x_s,y_s)\n E_List.append([E_temp,x_s,y_s])\n\n #E_List arrange\n first_ele = lambda s:s[0]\n\n list = sorted(E_List, key = first_ele)\n\n return list[0]\n\n def updateImage(self,stepsize):\n self.setGradientMap()\n total = len(self.HoleList)\n index =0\n #######################test\n # x, y = self.HoleList[13050]\n #\n # loc = self.findPatchLoc(stepsize,x,y)\n # x_i = loc[1]\n # x_j = loc[2]\n # # #using (x_i,x_j) to replace (x,y)\n # self.replace(stepsize,x,y,x_i,x_j)\n # index +=1\n # print(str(index)+'/'+str(x)+','+str(y)+'/'+str(total)+\"::processed!\")\n # for i in range(0,3):\n # self.InitialImage[x,y][i] = 100\n # self.InitialImage[x_i,x_j][i] = 100\n # print(str(x_i)+\" \"+str(x_j))\n\n ########################\n\n for i in self.HoleList:\n x,y = i\n loc = self.findPatchLoc(stepsize,x,y)\n if len(loc) == 0 :\n x_i = x\n x_j = y\n else :\n x_i = loc[1]\n x_j = loc[2]\n #using (x_i,x_j) to replace (x,y)\n self.replace(stepsize,x,y,x_i,x_j)\n index +=1\n print(str(index)+'/'+str(i)+'/'+str(total)+\"::processed!\")\n return self.InitialImage\n\n def replace(self,stepsize,x,y,x_i,y_j):\n for i in range(0,int(stepsize/2)+1):\n for j in range(0,int(stepsize/2)+1):\n if x-i < 0 or x_i -i< 0 : continue\n if y-j < 0 or y_j - j<0: continue\n if x+i >=self.height or x_i +i >=self.height: continue\n if y+j >=self.width or y_j + j >=self.width: continue\n self.InitialImage[x - i, y - j] = self.InitialImage[x_i - i, y_j - j]\n self.InitialImage[x - i, y + j] = self.InitialImage[x_i - i, y_j + j]\n self.InitialImage[x + i, y + j] = self.InitialImage[x_i + i, y_j + j]\n self.InitialImage[x + i, y - j] = self.InitialImage[x_i + i, y_j - j]\n\n\n def get_kernel_content(self,i,j,kernel_size,Image):\n kernel_content = np.zeros(kernel_size * kernel_size)\n half = int(kernel_size / 2)\n index = 0\n for kernel_v in range((0 if i - half <0 else i-half), (self.height if i + half + 1>self.height else i+half+1)):\n for kernel_h in range((0 if j - half<0 else j-half), (self.width if j + half + 1>self.width else j+half+1)):\n kernel_content[index] = self.getluminance(kernel_v,kernel_h,Image)\n index += 1\n return kernel_content\n\n def get_gradient_content(self,i,j,kernel_size,Image):\n gradient_content = np.zeros(kernel_size * kernel_size)\n half = int(kernel_size / 2)\n index = 0\n for kernel_v in range((0 if i - half <0 else i-half), (self.height if i + half + 1>self.height else i+half+1)):\n for kernel_h in range((0 if j - half<0 else j-half), (self.width if j + half + 1>self.width else j+half+1)):\n gradient_content[index] = Image[kernel_v][kernel_h]\n index += 1\n return gradient_content\n\n def getluminance(self,i,j,Image):\n\n if i<0 :return 0\n if j<0 :return 0\n if i>=self.height :return 0\n if j>=self.width : return 0\n\n return 0.1*Image[i,j][0] + 0.6 * Image[i,j][1] + 0.3 * Image[i,j][2]" ]
[ [ "numpy.square", "numpy.add", "numpy.zeros", "numpy.sqrt" ] ]
ashkanalinejad/SFU-Fairseq
[ "e96301bf49dce7ba23dc4c46dddfdd1ae0f95e10" ]
[ "train.py" ]
[ "#!/usr/bin/env python3 -u\n# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\"\"\"\nTrain a new model on one or across multiple GPUs.\n\"\"\"\n\nimport collections\nimport itertools\nimport os\nimport math\nimport torch\n\nfrom fairseq import distributed_utils, options, progress_bar, tasks, utils\nfrom fairseq.data import iterators\nfrom fairseq.trainer import Trainer\nfrom fairseq.meters import AverageMeter, StopwatchMeter\n\n\ndef main(args):\n if args.max_tokens is None:\n args.max_tokens = 6000\n print(args)\n\n if not torch.cuda.is_available():\n raise NotImplementedError('Training on CPU is not supported')\n torch.cuda.set_device(args.device_id)\n torch.manual_seed(args.seed)\n\n # Setup task, e.g., translation, language modeling, etc.\n task = tasks.setup_task(args)\n\n # Load dataset splits\n load_dataset_splits(task, ['train', 'valid'])\n\n # Build model and criterion\n model = task.build_model(args)\n criterion = task.build_criterion(args, model)\n '''\n print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))\n print('| num. model params: {}'.format(sum(p.numel() for p in model.parameters())))\n'''\n # Make a dummy batch to (i) warm the caching allocator and (ii) as a\n # placeholder DistributedDataParallel when there's an uneven number of\n # batches per worker.\n max_positions = utils.resolve_max_positions(\n task.max_positions(),\n model.max_positions(),\n )\n dummy_batch = task.dataset('valid').get_dummy_batch(args.max_tokens, max_positions)\n\n # Build trainer\n trainer = Trainer(args, task, model, criterion, dummy_batch)\n print('| training on {} GPUs'.format(args.distributed_world_size))\n print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(\n args.max_tokens,\n args.max_sentences,\n ))\n\n # Initialize dataloader\n epoch_itr = task.get_batch_iterator(\n dataset=task.dataset(args.train_subset),\n max_tokens=args.max_tokens,\n max_sentences=args.max_sentences,\n max_positions=max_positions,\n ignore_invalid_inputs=True,\n required_batch_size_multiple=8,\n seed=args.seed,\n num_shards=args.distributed_world_size,\n shard_id=args.distributed_rank,\n )\n\n # Load the latest checkpoint if one is available\n if not load_checkpoint(args, trainer, epoch_itr):\n trainer.dummy_train_step([dummy_batch])\n\n #Freeze encoder weights if requested\n if args.freeze_encoder:\n for p in model.encoder.parameters():\n p.requires_grad = False\n\n # Train until the learning rate gets too small\n max_epoch = args.task1_max_epoch or math.inf\n max_update = args.task1_max_update or math.inf\n lr = trainer.get_lr()\n train_meter = StopwatchMeter()\n train_meter.start()\n valid_losses = [None]\n valid_subsets = args.valid_subset.split(',')\n while lr[model.keys[0]] > args.task1_min_lr and epoch_itr.epoch < max_epoch and trainer.get_num_updates() < max_update:\n # train for one epoch\n train(args, trainer, task, epoch_itr)\n\n if epoch_itr.epoch % args.validate_interval == 0:\n valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)\n\n # only use first validation loss to update the learning rate\n lr = trainer.lr_step(epoch_itr.epoch, valid_losses)\n\n # save checkpoint\n if epoch_itr.epoch % args.save_interval == 0:\n save_checkpoint(args, trainer, epoch_itr, valid_losses)\n train_meter.stop()\n print('| done training in {:.1f} seconds'.format(train_meter.sum))\n\n\ndef train(args, trainer, task, epoch_itr):\n \"\"\"Train the model for one epoch.\"\"\"\n\n # Update parameters every N batches\n if epoch_itr.epoch <= len(args.task1_update_freq):\n update_freq = args.task1_update_freq[epoch_itr.epoch - 1]\n else:\n update_freq = args.task1_update_freq[-1]\n\n\n # Initialize data iterator\n itr = epoch_itr.next_epoch_itr(fix_batches_to_gpus=args.fix_batches_to_gpus)\n itr = iterators.GroupedIterator(itr, update_freq)\n progress = progress_bar.build_progress_bar(\n args, itr, epoch_itr.epoch, no_progress_bar='simple',\n )\n\n extra_meters = collections.defaultdict(lambda: AverageMeter())\n first_valid = args.valid_subset.split(',')[0]\n max_update = args.task1_max_update or math.inf\n for i, samples in enumerate(progress, start=epoch_itr.iterations_in_epoch):\n log_output = trainer.train_step(samples)\n if log_output is None:\n continue\n\n stats = collections.OrderedDict()\n for lang_pair in trainer.model.keys:\n # log mid-epoch stats\n stats = get_training_stats(stats, trainer, lang_pair)\n if i == 0:\n trainer.get_meter('wps')[lang_pair].reset()\n for k, v in log_output.items():\n if k in ['{}:loss'.format(lang_pair)]: #, '{}:nll_loss'.format(lang_pair)]:\n continue # these are already logged above\n if 'loss' in k:\n extra_meters[k].update(v, log_output['sample_size'])\n else:\n extra_meters[k].update(v)\n# stats[k] = extra_meters[k].avg\n# stats['disc_loss'] = log_output['disc:loss']\n# stats['neg_disc_loss'] = log_output['neg_disc:loss']\n progress.log(stats)\n\n # ignore the first mini-batch in words-per-second calculation\n\n num_updates = trainer.get_num_updates(lang_pair)\n if args.save_interval_updates > 0 and num_updates % args.save_interval_updates == 0 and num_updates > 0:\n valid_losses = validate(args, trainer, task, epoch_itr, [first_valid])\n save_checkpoint(args, trainer, epoch_itr, valid_losses)\n\n if num_updates >= max_update:\n break\n\n # log end-of-epoch stats\n stats = collections.OrderedDict()\n for lang_pair in trainer.model.keys:\n stats = get_training_stats(stats, trainer, lang_pair)\n# for k, meter in extra_meters.items():\n# stats[k] = meter.avg\n progress.print(stats)\n\n # reset training meters\n for k in [\n 'train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip',\n ]:\n for _, meter in trainer.get_meter(k).items():\n if meter is not None:\n meter.reset()\n\n\ndef get_training_stats(stats, trainer, lang_pair=None):\n stats['{}:loss'.format(lang_pair)] = '{:.3f}'.format(trainer.get_meter('train_loss')[lang_pair].avg)\n if trainer.get_meter('train_nll_loss')[lang_pair].count > 0:\n nll_loss = trainer.get_meter('train_nll_loss')[lang_pair].avg\n # stats['{}:nll_loss'.format(lang_pair)] = '{:.3f}'.format(nll_loss)\n else:\n nll_loss = trainer.get_meter('train_loss')[lang_pair].avg\n stats['{}:ppl'.format(lang_pair)] = get_perplexity(nll_loss)\n# stats['wps'] = round(trainer.get_meter('wps')[lang_pair].avg)\n# stats['ups'] = '{:.1f}'.format(trainer.get_meter('ups')[lang_pair].avg)\n# stats['wpb'] = round(trainer.get_meter('wpb')[lang_pair].avg)\n# stats['bsz'] = round(trainer.get_meter('bsz')[lang_pair].avg)\n stats['num_updates'] = trainer.get_num_updates(model_name=lang_pair)\n stats['{}:lr'.format(lang_pair)] = trainer.get_lr()[lang_pair]\n# stats['gnorm'] = '{:.3f}'.format(trainer.get_meter('gnorm')[lang_pair].avg)\n# stats['clip'] = '{:.0%}'.format(trainer.get_meter('clip')[lang_pair].avg)\n# stats['oom'] = trainer.get_meter('oom')[lang_pair].avg\n# if trainer.get_meter('loss_scale') is not None:\n# stats['loss_scale'] = '{:.3f}'.format(trainer.get_meter('loss_scale')[lang_pair].avg)\n# stats['wall'] = round(trainer.get_meter('wall')[lang_pair].elapsed_time)\n# stats['train_wall'] = round(trainer.get_meter('train_wall')[lang_pair].sum)\n stats['disc:loss'] = '{:.5f}'.format(trainer.get_meter('discriminator_loss')[lang_pair])\n stats['neg_disc:loss'] = '{:.5f}'.format(trainer.get_meter('negative_disc_loss')[lang_pair])\n return stats\n\n\ndef validate(args, trainer, task, epoch_itr, subsets):\n \"\"\"Evaluate the model on the validation set(s) and return the losses.\"\"\"\n valid_losses = {}\n for subset in subsets:\n # Initialize data iterator\n itr = task.get_batch_iterator(\n dataset=task.dataset(subset),\n max_tokens=args.max_tokens,\n max_sentences=args.max_sentences_valid,\n max_positions=utils.resolve_max_positions(\n task.max_positions(),\n trainer.get_model().max_positions(),\n ),\n ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,\n required_batch_size_multiple=8,\n seed=args.seed,\n num_shards=args.distributed_world_size,\n shard_id=args.distributed_rank,\n ).next_epoch_itr(shuffle=False)\n progress = progress_bar.build_progress_bar(\n args, itr, epoch_itr.epoch,\n prefix='valid on \\'{}\\' subset'.format(subset),\n no_progress_bar='simple'\n )\n\n for lang_pair in trainer.model.keys:\n\n # reset validation loss meters\n for k in ['valid_loss', 'valid_nll_loss']:\n meter = trainer.get_meter(k)[lang_pair]\n if meter is not None:\n meter.reset()\n extra_meters = collections.defaultdict(lambda: AverageMeter())\n\n for sample in progress:\n log_output = trainer.valid_step(sample)\n\n stats = collections.OrderedDict()\n for lang_pair in trainer.model.keys:\n for k, v in log_output.items():\n if k in ['{}:loss'.format(lang_pair), '{}:nll_loss'.format(lang_pair)]:\n continue\n if 'loss' in k:\n extra_meters[k].update(v, log_output['sample_size'])\n else:\n extra_meters[k].update(v)\n\n # log validation stats\n stats = get_valid_stats(stats, trainer, lang_pair)\n valid_losses[lang_pair] = stats['{}:valid_loss'.format(lang_pair)]\n\n progress.print(stats)\n\n return valid_losses\n\n\ndef get_valid_stats(stats, trainer, lang_pair=None):\n if lang_pair != 'discriminator':\n stats['{}:valid_loss'.format(lang_pair)] = trainer.get_meter('valid_loss')[lang_pair].avg\n if trainer.get_meter('valid_nll_loss')[lang_pair].count > 0:\n nll_loss = trainer.get_meter('valid_nll_loss')[lang_pair].avg\n stats['{}:valid_nll_loss'.format(lang_pair)] = nll_loss\n else:\n nll_loss = trainer.get_meter('valid_loss')[lang_pair].avg\n stats['{}:valid_ppl'.format(lang_pair)] = get_perplexity(nll_loss)\n stats['num_updates'] = trainer.get_num_updates(model_name=lang_pair)\n if hasattr(save_checkpoint, 'best'):\n stats['{}:best'.format(lang_pair)] = min(save_checkpoint.best[lang_pair], stats['{}:valid_loss'.format(lang_pair)])\n\n return stats\n\n\ndef get_perplexity(loss):\n try:\n return '{:.2f}'.format(math.pow(2, loss))\n except OverflowError:\n return float('inf')\n\n\ndef save_checkpoint(args, trainer, epoch_itr, val_loss):\n if args.no_save or not distributed_utils.is_master(args):\n return\n epoch = epoch_itr.epoch\n end_of_epoch = epoch_itr.end_of_epoch()\n updates = trainer.get_num_updates()\n\n checkpoint_conds = collections.OrderedDict()\n checkpoint_conds['checkpoint{}.pt'.format(epoch)] = (\n end_of_epoch and not args.no_epoch_checkpoints and\n epoch % args.save_interval == 0\n )\n checkpoint_conds['checkpoint_{}_{}.pt'.format(epoch, updates)] = (\n not end_of_epoch and args.save_interval_updates > 0 and\n updates % args.save_interval_updates == 0\n )\n for lang_pair in trainer.model.models.keys():\n if lang_pair != 'discriminator':\n checkpoint_conds['checkpoint_{}_best.pt'.format(lang_pair)] = (\n val_loss is not None and\n (not hasattr(save_checkpoint, 'best') or val_loss[lang_pair] < save_checkpoint.best[lang_pair])\n )\n checkpoint_conds['checkpoint_last.pt'] = True # keep this last so that it's a symlink\n\n prev_best = getattr(save_checkpoint, 'best', val_loss)\n save_checkpoint.best = {}\n for k, v in val_loss.items():\n if val_loss is not None:\n save_checkpoint.best[k] = min(v, prev_best[k])\n extra_state = {\n 'train_iterator': epoch_itr.state_dict(),\n 'val_loss': val_loss,\n }\n if hasattr(save_checkpoint, 'best'):\n extra_state.update({'best': save_checkpoint.best})\n\n checkpoints = [os.path.join(args.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond]\n if len(checkpoints) > 0:\n for cp in checkpoints:\n trainer.save_checkpoint(cp, extra_state)\n\n if not end_of_epoch and args.keep_interval_updates > 0:\n # remove old checkpoints; checkpoints are sorted in descending order\n checkpoints = utils.checkpoint_paths(args.save_dir, pattern=r'checkpoint_\\d+_(\\d+)\\.pt')\n for old_chk in checkpoints[args.keep_interval_updates:]:\n os.remove(old_chk)\n\n\ndef load_checkpoint(args, trainer, epoch_itr):\n \"\"\"Load a checkpoint and replay dataloader to match.\"\"\"\n os.makedirs(args.save_dir, exist_ok=True)\n checkpoint_path = os.path.join(args.save_dir, args.restore_file)\n if os.path.isfile(checkpoint_path):\n extra_state = trainer.load_checkpoint(checkpoint_path, args.reset_optimizer, args.reset_lr_scheduler,\n eval(args.optimizer_overrides))\n if extra_state is not None:\n # replay train iterator to match checkpoint\n epoch_itr.load_state_dict(extra_state['train_iterator'])\n\n print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(\n checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))\n\n trainer.lr_step(epoch_itr.epoch)\n trainer.lr_step_update(trainer.get_num_updates())\n if 'best' in extra_state:\n save_checkpoint.best = extra_state['best']\n return True\n return False\n\n\ndef load_dataset_splits(task, splits):\n for split in splits:\n if split == 'train':\n task.load_dataset(split, combine=True)\n else:\n for k in itertools.count():\n split_k = split + (str(k) if k > 0 else '')\n try:\n task.load_dataset(split_k, combine=False)\n except FileNotFoundError as e:\n if k > 0:\n break\n raise e\n\n\nif __name__ == '__main__':\n parser = options.get_training_parser(default_task='multimodal_pretraining')\n args = options.parse_args_and_arch(parser)\n\n if args.distributed_port > 0 or args.distributed_init_method is not None:\n from distributed_train import main as distributed_main\n\n distributed_main(args)\n elif args.distributed_world_size > 1:\n from multiprocessing_train import main as multiprocessing_main\n\n multiprocessing_main(args)\n else:\n main(args)\n" ]
[ [ "torch.manual_seed", "torch.cuda.set_device", "torch.cuda.is_available" ] ]
Yuricst/trajplotlib
[ "30c06d922cb4d7bf2b57e3cf11b84fe510f00e1f" ]
[ "examples/test_3d_traj.py" ]
[ "\"\"\"\nExample 3D trajectory\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nsys.path.append(\"../\") # provide path to library\nimport trajplotlib\n\n\nif __name__==\"__main__\":\n\t# load data\n\tdata = np.loadtxt('data_traj.csv', delimiter=',')\n\txs = data[:, 0]\n\tys = data[:, 1]\n\tzs = data[:, 2]\n\n\t# create plot\n\tfig, ax = trajplotlib.quickplot3(xs,ys,zs, radius=184.0)\n\n\t# labels\n\tax.set_xlabel('x, km')\n\tax.set_ylabel('y, km')\n\tax.set_zlabel('z, km')\n\tax.set_title(\"My trajectory\")\n\n\tplt.show()\n\n\tprint(type(ax))\n" ]
[ [ "matplotlib.pyplot.show", "numpy.loadtxt" ] ]
yutiansut/Quantitative_Finance
[ "7097ed399fd31b7e15dba8fc816626884aeba8de" ]
[ "Binomial Trees/BinomialAmericanOption.py" ]
[ "\"\"\" C++ For Quantitative Finance \"\"\" \n\"\"\" Binomial American Option \"\"\"\n\"\"\" David Li \"\"\"\n\nimport math\nimport numpy as np\n\nclass StockOption(object):\n def __init__(self, S0, K, r, T, N, params):\n self.S0 = S0\n self.K = K\n self.r = r\n self.T = T\n self.N = max(1, N) # Ensure N have at least 1 time step\n self.STs = None # Declare the stock prices tree\n \"\"\" Optional parameters used by derived classes \"\"\"\n self.pu = params.get(\"pu\", 0) # Probability of up state\n self.pd = params.get(\"pd\", 0) # Probability of down state\n\n self.div = params.get(\"div\", 0) # Dividend yield\n self.sigma = params.get(\"sigma\", 0) # Volatility\n self.is_call = params.get(\"is_call\", True) # Call or put\n self.is_european = params.get(\"is_eu\", True) # Eu or Am\n \"\"\" Computed values \"\"\"\n self.dt = T/float(N) # Single time step, in years\n self.df = math.exp(\n -(r-self.div) * self.dt) # Discount factor\n\nclass BinomialTreeOption(StockOption):\n\n def _setup_parameters_(self):\n self.u = 1 + self.pu # Expected value in the up state\n self.d = 1 - self.pd # Expected value in the down state\n self.qu = (math.exp((self.r-self.div)*self.dt) - self.d)/(self.u-self.d)\n self.qd = 1-self.qu\n\n def _initialize_stock_price_tree_(self):\n # Initialize a 2D tree at T=0\n self.STs = [np.array([self.S0])]\n\n # Simulate the possible stock prices path\n for i in range(self.N):\n prev_branches = self.STs[-1]\n st = np.concatenate((prev_branches*self.u,\n [prev_branches[-1]*self.d]))\n self.STs.append(st) # Add nodes at each time step\n\n def _initialize_payoffs_tree_(self):\n # The payoffs when option expires\n return np.maximum(\n 0, (self.STs[self.N]-self.K) if self.is_call\n else (self.K-self.STs[self.N]))\n\n def __check_early_exercise__(self, payoffs, node):\n early_ex_payoff = \\\n (self.STs[node] - self.K) if self.is_call \\\n else (self.K - self.STs[node])\n\n return np.maximum(payoffs, early_ex_payoff)\n\n def _traverse_tree_(self, payoffs):\n for i in reversed(range(self.N)):\n # The payoffs from NOT exercising the option\n payoffs = (payoffs[:-1] * self.qu +\n payoffs[1:] * self.qd) * self.df\n\n # Payoffs from exercising, for American options\n if not self.is_european:\n payoffs = self.__check_early_exercise__(payoffs,i)\n\n return payoffs\n\n def __begin_tree_traversal__(self):\n payoffs = self._initialize_payoffs_tree_()\n return self._traverse_tree_(payoffs)\n\n def price(self):\n self._setup_parameters_()\n self._initialize_stock_price_tree_()\n payoffs = self.__begin_tree_traversal__()\n\n return payoffs[0]\n\nam_option = BinomialTreeOption(\n 50, 50, 0.05, 0.5, 2,\n {\"pu\": 0.2, \"pd\": 0.2, \"is_call\": False, \"is_eu\": False})\nprint(am_option.price())\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.maximum" ] ]
Magixxxxxx/detectron2
[ "c1ee8cf73777c96cc8a89463d0dca6e0ffe148f4" ]
[ "detectron2/solver/build.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom enum import Enum\nfrom typing import Any, Callable, Dict, Iterable, List, Set, Type, Union\nimport torch\n\nfrom detectron2.config import CfgNode\n\nfrom .lr_scheduler import WarmupCosineLR, WarmupMultiStepLR\n\n_GradientClipperInput = Union[torch.Tensor, Iterable[torch.Tensor]]\n_GradientClipper = Callable[[_GradientClipperInput], None]\n\n\nclass GradientClipType(Enum):\n VALUE = \"value\"\n NORM = \"norm\"\n\n\ndef _create_gradient_clipper(cfg: CfgNode) -> _GradientClipper:\n \"\"\"\n Creates gradient clipping closure to clip by value or by norm,\n according to the provided config.\n \"\"\"\n cfg = cfg.clone()\n\n def clip_grad_norm(p: _GradientClipperInput):\n torch.nn.utils.clip_grad_norm_(p, cfg.CLIP_VALUE, cfg.NORM_TYPE)\n\n def clip_grad_value(p: _GradientClipperInput):\n torch.nn.utils.clip_grad_value_(p, cfg.CLIP_VALUE)\n\n _GRADIENT_CLIP_TYPE_TO_CLIPPER = {\n GradientClipType.VALUE: clip_grad_value,\n GradientClipType.NORM: clip_grad_norm,\n }\n return _GRADIENT_CLIP_TYPE_TO_CLIPPER[GradientClipType(cfg.CLIP_TYPE)]\n\n\ndef _generate_optimizer_class_with_gradient_clipping(\n optimizer_type: Type[torch.optim.Optimizer], gradient_clipper: _GradientClipper\n) -> Type[torch.optim.Optimizer]:\n \"\"\"\n Dynamically creates a new type that inherits the type of a given instance\n and overrides the `step` method to add gradient clipping\n \"\"\"\n\n def optimizer_wgc_step(self, closure=None):\n for group in self.param_groups:\n for p in group[\"params\"]:\n gradient_clipper(p)\n super(type(self), self).step(closure)\n\n OptimizerWithGradientClip = type(\n optimizer_type.__name__ + \"WithGradientClip\",\n (optimizer_type,),\n {\"step\": optimizer_wgc_step},\n )\n return OptimizerWithGradientClip\n\n\ndef maybe_add_gradient_clipping(\n cfg: CfgNode, optimizer: torch.optim.Optimizer\n) -> torch.optim.Optimizer:\n \"\"\"\n If gradient clipping is enabled through config options, wraps the existing\n optimizer instance of some type OptimizerType to become an instance\n of the new dynamically created class OptimizerTypeWithGradientClip\n that inherits OptimizerType and overrides the `step` method to\n include gradient clipping.\n\n Args:\n cfg: CfgNode\n configuration options\n optimizer: torch.optim.Optimizer\n existing optimizer instance\n\n Return:\n optimizer: torch.optim.Optimizer\n either the unmodified optimizer instance (if gradient clipping is\n disabled), or the same instance with adjusted __class__ to override\n the `step` method and include gradient clipping\n \"\"\"\n if not cfg.SOLVER.CLIP_GRADIENTS.ENABLED:\n return optimizer\n grad_clipper = _create_gradient_clipper(cfg.SOLVER.CLIP_GRADIENTS)\n OptimizerWithGradientClip = _generate_optimizer_class_with_gradient_clipping(\n type(optimizer), grad_clipper\n )\n optimizer.__class__ = OptimizerWithGradientClip\n return optimizer\n\n\ndef build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer:\n \"\"\"\n Build an optimizer from config.\n \"\"\"\n norm_module_types = (\n torch.nn.BatchNorm1d,\n torch.nn.BatchNorm2d,\n torch.nn.BatchNorm3d,\n torch.nn.SyncBatchNorm,\n # NaiveSyncBatchNorm inherits from BatchNorm2d\n torch.nn.GroupNorm,\n torch.nn.InstanceNorm1d,\n torch.nn.InstanceNorm2d,\n torch.nn.InstanceNorm3d,\n torch.nn.LayerNorm,\n torch.nn.LocalResponseNorm,\n )\n params: List[Dict[str, Any]] = []\n memo: Set[torch.nn.parameter.Parameter] = set()\n for module in model.modules():\n for key, value in module.named_parameters(recurse=False):\n if not value.requires_grad:\n continue\n # Avoid duplicating parameters\n if value in memo:\n continue\n memo.add(value)\n lr = cfg.SOLVER.BASE_LR\n weight_decay = cfg.SOLVER.WEIGHT_DECAY\n if isinstance(module, norm_module_types):\n weight_decay = cfg.SOLVER.WEIGHT_DECAY_NORM\n elif key == \"bias\":\n # NOTE: unlike Detectron v1, we now default BIAS_LR_FACTOR to 1.0\n # and WEIGHT_DECAY_BIAS to WEIGHT_DECAY so that bias optimizer\n # hyperparameters are by default exactly the same as for regular\n # weights.\n lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR\n weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS\n params += [{\"params\": [value], \"lr\": lr, \"weight_decay\": weight_decay}]\n\n optimizer = torch.optim.SGD(\n params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM, nesterov=cfg.SOLVER.NESTEROV\n )\n\n # zjw TODO:Adam\n if cfg.SOLVER.OPTIM == \"Adam\":\n masks = [p for n, p in model.named_parameters() if 'mask' in n]\n nomask_params = [p for n, p in model.named_parameters() if 'mask' not in n]\n\n optimizer = torch.optim.Adam([\n {'params': masks, 'lr': 1e-5, 'weight_decay':cfg.SOLVER.WEIGHT_DECAY_BIAS},\n {'params': nomask_params, 'lr': 1e-4, 'weight_decay':cfg.SOLVER.WEIGHT_DECAY_BIAS}\n ])\n # zjw TODO END\n\n optimizer = maybe_add_gradient_clipping(cfg, optimizer)\n print('\\nAdam lr m:{} w:{}'.format(1e-5,1e-4))\n print(optimizer)\n return optimizer\n\n\ndef build_lr_scheduler(\n cfg: CfgNode, optimizer: torch.optim.Optimizer\n) -> torch.optim.lr_scheduler._LRScheduler:\n \"\"\"\n Build a LR scheduler from config.\n \"\"\"\n name = cfg.SOLVER.LR_SCHEDULER_NAME\n if name == \"WarmupMultiStepLR\":\n return WarmupMultiStepLR(\n optimizer,\n cfg.SOLVER.STEPS,\n cfg.SOLVER.GAMMA,\n warmup_factor=cfg.SOLVER.WARMUP_FACTOR,\n warmup_iters=cfg.SOLVER.WARMUP_ITERS,\n warmup_method=cfg.SOLVER.WARMUP_METHOD,\n )\n elif name == \"WarmupCosineLR\":\n return WarmupCosineLR(\n optimizer,\n cfg.SOLVER.MAX_ITER,\n warmup_factor=cfg.SOLVER.WARMUP_FACTOR,\n warmup_iters=cfg.SOLVER.WARMUP_ITERS,\n warmup_method=cfg.SOLVER.WARMUP_METHOD,\n )\n else:\n raise ValueError(\"Unknown LR scheduler: {}\".format(name))\n" ]
[ [ "torch.optim.Adam", "torch.optim.SGD", "torch.nn.utils.clip_grad_value_", "torch.nn.utils.clip_grad_norm_" ] ]
bvegaus/electric
[ "bfb7d5644e30faad8a791489a7525252fba0972d" ]
[ "experiments/obtain_metrics_predictions.py" ]
[ "import openpyxl\nimport os\nimport pandas as pd\nimport numpy as np\nfrom metrics import METRICS\n\n\ndef get_models(datasets):\n \"\"\"It obtains the models used into the experiments\"\"\"\n dataframe = pd.read_csv('../results/' + datasets[0] + '/results.csv', sep=';')\n models = dataframe['MODEL'].unique()\n return models.tolist()\n\n\ndef get_best_prediction(results, metric, model, dataset):\n \"\"\"It calculates the best prediction of one model in one dataset\"\"\"\n model_rows = results.loc[results['MODEL'] == model, :]\n best_value = 9999999999\n best_model = None\n for index, row in model_rows.iterrows():\n\n path_y_test_denorm = '../data/' + dataset + '/' + np.str(row['NORMALIZATION']) + '/' + \\\n np.str(row['PAST_HISTORY_FACTOR']) + '/'\n\n path_preds = '../results/' + dataset + '/' + np.str(row['NORMALIZATION']) + '/' + np.str(\n row['PAST_HISTORY_FACTOR']) + '/' + np.str(int(row['EPOCHS'])) + '/' + np.str(int(row['BATCH_SIZE'])) + \\\n '/' + np.str(row['LEARNING_RATE']) + '/' + model + '/' + np.str(row['MODEL_INDEX']) + '.npy'\n\n y_test_denorm = np.load(path_y_test_denorm + 'y_test_denorm.np.npy').flatten()\n preds = np.load(path_preds).flatten()\n\n value = METRICS[metric](y_test_denorm, preds)\n\n if value < best_value:\n best_value = value\n best_model = 'normalization: ' + np.str(row['NORMALIZATION']) + ', past_history_factor: ' + \\\n np.str(row['PAST_HISTORY_FACTOR']) + ', epochs: ' + np.str(row['EPOCHS']) + ', batch_size: ' \\\n + np.str(row['BATCH_SIZE']) + ', lr: ' + np.str(row['LEARNING_RATE']) + ', index: ' + \\\n np.str(row['MODEL_INDEX']) + ', description: ' + row['MODEL_DESCRIPTION']\n\n return best_value, best_model\n\n\ndef create_excels():\n \"\"\"It create the excels where the results are going to be saved\"\"\"\n if not os.path.exists('../results_best/'):\n os.mkdir('../results_best/')\n\n excel_metrics = pd.ExcelWriter('../results_best/metrics_by_predictions.xlsx', engine='openpyxl')\n excel_metrics.book = openpyxl.Workbook()\n\n excel_models = pd.ExcelWriter('../results_best/metrics_by_predictions_models.xlsx', engine='openpyxl')\n excel_models.book = openpyxl.Workbook()\n return excel_metrics, excel_models\n\n\ndef calculate_metrics(datasets, models, metrics, excel_metrics, excel_models):\n \"\"\"It calculate the metrics, of each model in each dataset, and save them into the excel\"\"\"\n columns_names = ['dataset'] + models\n\n for metric in metrics:\n res_metric = pd.DataFrame(columns=columns_names).set_index('dataset')\n res_model = pd.DataFrame(columns=columns_names).set_index('dataset')\n for dataset in datasets:\n\n results = pd.read_csv('../results/' + dataset + '/results.csv', sep=';', index_col='Unnamed: 0')\n row_metric = []\n row_model = []\n\n for model in models:\n value, model_value = get_best_prediction(results, metric, model, dataset)\n row_metric.append(value)\n row_model.append(model_value)\n\n res_metric.loc[dataset, :] = row_metric\n res_model.loc[dataset, :] = row_model\n\n res_metric.to_excel(excel_metrics, sheet_name=metric)\n res_model.to_excel(excel_models, sheet_name=metric)\n\n return excel_metrics, excel_models\n\n\ndef save_excels(excel_metrics, excel_models):\n \"\"\"It saves the excels with the information\"\"\"\n default_sheet_metrics = excel_metrics.book[excel_metrics.book.sheetnames[0]]\n excel_metrics.book.remove(default_sheet_metrics)\n excel_metrics.save()\n excel_metrics.close()\n\n default_sheet_models = excel_models.book[excel_models.book.sheetnames[0]]\n excel_models.book.remove(default_sheet_models)\n excel_models.save()\n excel_models.close()\n\n\ndef get_metrics():\n \"\"\"Calculate the best values for a metrics of each model of each dataset, and saves the results into the sheets\n of an excel\"\"\"\n metrics = ['mse', 'rmse', 'mae', 'wape', 'mase']\n datasets = os.listdir('../results/')\n models = get_models(datasets)\n\n excel_metrics, excel_models = create_excels()\n excel_metrics, excel_models = calculate_metrics(datasets, models, metrics, excel_metrics, excel_models)\n save_excels(excel_metrics, excel_models)\n\n print('[INFO] Values of the metrics by predictions saved into \"./results_best/metrics_by_predictions.xlsx\"')\n print('[INFO] Models description of the best models saved into \"./results_best/metrics_by_predictions_models.xlsx\"')\n\n\nif __name__ == '__main__':\n get_metrics()\n" ]
[ [ "pandas.DataFrame", "numpy.load", "numpy.str", "pandas.ExcelWriter", "pandas.read_csv" ] ]
GavinHuttley/c3test
[ "c5bf7f8252b4f7b75a851e28275536a8c378897a" ]
[ "src/cogent3/maths/stats/distribution.py" ]
[ "#!/usr/bin/env python\n\"\"\"Translations of functions from Release 2.3 of the Cephes Math Library,\nwhich is (c) Stephen L. Moshier 1984, 1995.\n\"\"\"\n\nfrom numpy import arctan as atan\nfrom numpy import exp, sqrt\n\nfrom cogent3.maths.stats.special import (\n MACHEP,\n MAXNUM,\n PI,\n SQRTH,\n betai,\n erf,\n erfc,\n expm1,\n fix_rounding_error,\n igam,\n igamc,\n igami,\n incbi,\n ln_binomial,\n log1p,\n ndtri,\n)\n\n\n# ndtri import b/c it should be available via this module\n\n\n__author__ = \"Rob Knight\"\n__copyright__ = \"Copyright 2007-2020, The Cogent Project\"\n__credits__ = [\"Rob Knight\", \"Sandra Smit\", \"Gavin Huttley\", \"Daniel McDonald\"]\n__license__ = \"BSD-3\"\n__version__ = \"2020.2.7a\"\n__maintainer__ = \"Rob Knight\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\nincbet = betai # shouldn't have renamed it...\n\n# Probability integrals: low gives left-hand tail, high gives right-hand tail.\n\n\ndef z_low(x):\n \"\"\"Returns left-hand tail of z distribution (0 to x).\n\n x ranges from -infinity to +infinity; result ranges from 0 to 1\n\n See Cephes docs for details.\"\"\"\n y = x * SQRTH\n z = abs(y) # distribution is symmetric\n if z < SQRTH:\n return 0.5 + 0.5 * erf(y)\n else:\n if y > 0:\n return 1 - 0.5 * erfc(z)\n else:\n return 0.5 * erfc(z)\n\n\ndef z_high(x):\n \"\"\"Returns right-hand tail of z distribution (0 to x).\n\n x ranges from -infinity to +infinity; result ranges from 0 to 1\n\n See Cephes docs for details.\"\"\"\n y = x * SQRTH\n z = abs(y)\n if z < SQRTH:\n return 0.5 - 0.5 * erf(y)\n else:\n if x < 0:\n return 1 - 0.5 * erfc(z)\n else:\n return 0.5 * erfc(z)\n\n\ndef zprob(x):\n \"\"\"Returns both tails of z distribution (-inf to -x, inf to x).\"\"\"\n return 2 * z_high(abs(x))\n\n\ndef chi_low(x, df):\n \"\"\"Returns left-hand tail of chi-square distribution (0 to x), given df.\n\n x ranges from 0 to infinity.\n\n df, the degrees of freedom, ranges from 1 to infinity (assume integers).\n Typically, df is (r-1)*(c-1) for a r by c table.\n\n Result ranges from 0 to 1.\n\n See Cephes docs for details.\n \"\"\"\n x = fix_rounding_error(x)\n if x < 0:\n raise ValueError(\"chi_low: x must be >= 0 (got %s).\" % x)\n if df < 1:\n raise ValueError(\"chi_low: df must be >= 1 (got %s).\" % df)\n return igam(df / 2, x / 2)\n\n\ndef chi_high(x, df):\n \"\"\"Returns right-hand tail of chi-square distribution (x to infinity).\n\n df, the degrees of freedom, ranges from 1 to infinity (assume integers).\n Typically, df is (r-1)*(c-1) for a r by c table.\n\n Result ranges from 0 to 1.\n\n See Cephes docs for details.\n \"\"\"\n x = fix_rounding_error(x)\n\n if x < 0:\n raise ValueError(\"chi_high: x must be >= 0 (got %s).\" % x)\n if df < 1:\n raise ValueError(\"chi_high: df must be >= 1 (got %s).\" % df)\n return igamc(df / 2, x / 2)\n\n\ndef t_low(t, df):\n \"\"\"Returns left-hand tail of Student's t distribution (-infinity to x).\n\n df, the degrees of freedom, ranges from 1 to infinity.\n Typically, df is (n-1) for a sample size of n.\n\n Result ranges from 0 to 1.\n\n See Cephes docs for details.\n \"\"\"\n if df < 1:\n raise ValueError(\"t_low: df must be >= 1 (got %s).\" % df)\n return stdtr(df, t)\n\n\ndef t_high(t, df):\n \"\"\"Returns right-hand tail of Student's t distribution (x to infinity).\n\n df, the degrees of freedom, ranges from 1 to infinity.\n Typically, df is (n-1) for a sample size of n.\n\n Result ranges from 0 to 1.\n\n See Cephes docs for details.\n \"\"\"\n if df < 1:\n raise ValueError(\"t_high: df must be >= 1 (got %s).\" % df)\n return stdtr(df, -t) # distribution is symmetric\n\n\ndef tprob(t, df):\n \"\"\"Returns both tails of t distribution (-infinity to -x, infinity to x)\"\"\"\n return 2 * t_high(abs(t), df)\n\n\ndef poisson_high(successes, mean):\n \"\"\"Returns right tail of Poission distribution, Pr(X > x).\n\n successes ranges from 0 to infinity. mean must be positive.\n \"\"\"\n return pdtrc(successes, mean)\n\n\ndef poisson_low(successes, mean):\n \"\"\"Returns left tail of Poisson distribution, Pr(X <= x).\n\n successes ranges from 0 to infinity. mean must be positive.\n \"\"\"\n return pdtr(successes, mean)\n\n\ndef poisson_exact(successes, mean):\n \"\"\"Returns Poisson probablity for exactly Pr(X=successes).\n\n Formula is e^-(mean) * mean^(successes) / (successes)!\n \"\"\"\n if successes == 0:\n return pdtr(0, mean)\n elif successes < mean: # use left tail\n return pdtr(successes, mean) - pdtr(successes - 1, mean)\n else: # successes > mean: use right tail\n return pdtrc(successes - 1, mean) - pdtrc(successes, mean)\n\n\ndef binomial_high(successes, trials, prob):\n \"\"\"Returns right-hand binomial tail (X > successes) given prob(success).\"\"\"\n if -1 <= successes < 0:\n return 1\n return bdtrc(successes, trials, prob)\n\n\ndef binomial_low(successes, trials, prob):\n \"\"\"Returns left-hand binomial tail (X <= successes) given prob(success).\"\"\"\n return bdtr(successes, trials, prob)\n\n\ndef binomial_exact(successes, trials, prob):\n \"\"\"Returns binomial probability of exactly X successes.\n\n Works for integer and floating point values.\n\n Note: this function is only a probability mass function for integer\n values of 'trials' and 'successes', i.e. if you sum up non-integer\n values you probably won't get a sum of 1.\n \"\"\"\n if (prob < 0) or (prob > 1):\n raise ValueError(\"Binomial prob must be between 0 and 1.\")\n if (successes < 0) or (trials < successes):\n raise ValueError(\"Binomial successes must be between 0 and trials.\")\n return exp(ln_binomial(successes, trials, prob))\n\n\ndef f_low(df1, df2, x):\n \"\"\"Returns left-hand tail of f distribution (0 to x).\n\n x ranges from 0 to infinity.\n\n Result ranges from 0 to 1.\n\n See Cephes docs for details.\n \"\"\"\n return fdtr(df1, df2, x)\n\n\ndef f_high(df1, df2, x):\n \"\"\"Returns right-hand tail of f distribution (x to infinity).\n\n Result ranges from 0 to 1.\n\n See Cephes docs for details.\n \"\"\"\n return fdtrc(df1, df2, x)\n\n\ndef fprob(dfn, dfd, F, side=\"right\"):\n \"\"\"Returns both tails of F distribution (-inf to F and F to inf)\n\n Use in case of two-tailed test. Usually this method is called by\n f_two_sample, so you don't have to worry about choosing the right side.\n\n side: right means return twice the right-hand tail of the F-distribution.\n Use in case var(a) > var (b)\n left means return twice the left-hand tail of the F-distribution.\n Use in case var(a) < var(b)\n \"\"\"\n if F < 0:\n raise ValueError(\"fprob: F must be >= 0 (got %s).\" % F)\n if side == \"right\":\n return 2 * f_high(dfn, dfd, F)\n elif side == \"left\":\n return 2 * f_low(dfn, dfd, F)\n else:\n raise ValueError(\"Not a valid value for side %s\" % (side))\n\n\ndef stdtr(k, t):\n \"\"\"Student's t distribution, -infinity to t.\n\n See Cephes docs for details.\n \"\"\"\n if k <= 0:\n raise ValueError(\"stdtr: df must be > 0.\")\n if t == 0:\n return 0.5\n if t < -2:\n rk = k\n z = rk / (rk + t * t)\n return 0.5 * betai(0.5 * rk, 0.5, z)\n # compute integral from -t to + t\n if t < 0:\n x = -t\n else:\n x = t\n\n rk = k # degrees of freedom\n z = 1 + (x * x) / rk\n # test if k is odd or even\n if (k & 1) != 0:\n # odd k\n xsqk = x / sqrt(rk)\n p = atan(xsqk)\n if k > 1:\n f = 1\n tz = 1\n j = 3\n while (j <= (k - 2)) and ((tz / f) > MACHEP):\n tz *= (j - 1) / (z * j)\n f += tz\n j += 2\n p += f * xsqk / z\n p *= 2 / PI\n else:\n # even k\n f = 1\n tz = 1\n j = 2\n while (j <= (k - 2)) and ((tz / f) > MACHEP):\n tz *= (j - 1) / (z * j)\n f += tz\n j += 2\n p = f * x / sqrt(z * rk)\n # common exit\n if t < 0:\n p = -p # note destruction of relative accuracy\n p = 0.5 + 0.5 * p\n return p\n\n\ndef bdtr(k, n, p):\n \"\"\"Binomial distribution, 0 through k.\n\n Uses formula bdtr(k, n, p) = betai(n-k, k+1, 1-p)\n\n See Cephes docs for details.\n \"\"\"\n p = fix_rounding_error(p)\n if (p < 0) or (p > 1):\n raise ValueError(\"Binomial p must be between 0 and 1.\")\n if (k < 0) or (n < k):\n raise ValueError(\"Binomial k must be between 0 and n.\")\n if k == n:\n return 1\n dn = n - k\n if k == 0:\n return pow(1 - p, dn)\n else:\n return betai(dn, k + 1, 1 - p)\n\n\ndef bdtrc(k, n, p):\n \"\"\"Complement of binomial distribution, k+1 through n.\n\n Uses formula bdtrc(k, n, p) = betai(k+1, n-k, p)\n\n See Cephes docs for details.\n \"\"\"\n p = fix_rounding_error(p)\n if (p < 0) or (p > 1):\n raise ValueError(\"Binomial p must be between 0 and 1.\")\n if (k < 0) or (n < k):\n raise ValueError(\"Binomial k must be between 0 and n.\")\n if k == n:\n return 0\n dn = n - k\n if k == 0:\n if p < 0.01:\n dk = -expm1(dn * log1p(-p))\n else:\n dk = 1 - pow(1.0 - p, dn)\n else:\n dk = k + 1\n dk = betai(dk, dn, p)\n return dk\n\n\ndef pdtr(k, m):\n \"\"\"Returns sum of left tail of Poisson distribution, 0 through k.\n\n See Cephes docs for details.\n \"\"\"\n if k < 0:\n raise ValueError(\"Poisson k must be >= 0.\")\n if m < 0:\n raise ValueError(\"Poisson m must be >= 0.\")\n return igamc(k + 1, m)\n\n\ndef pdtrc(k, m):\n \"\"\"Returns sum of right tail of Poisson distribution, k+1 through infinity.\n\n See Cephes docs for details.\n \"\"\"\n if k < 0:\n raise ValueError(\"Poisson k must be >= 0.\")\n if m < 0:\n raise ValueError(\"Poisson m must be >= 0.\")\n return igam(k + 1, m)\n\n\ndef fdtr(a, b, x):\n \"\"\"Returns left tail of F distribution, 0 to x.\n\n See Cephes docs for details.\n \"\"\"\n if min(a, b) < 1:\n raise ValueError(\"F a and b (degrees of freedom) must both be >= 1.\")\n if x < 0:\n raise ValueError(\"F distribution value of f must be >= 0.\")\n w = a * x\n w /= float(b + w)\n return betai(0.5 * a, 0.5 * b, w)\n\n\ndef fdtrc(a, b, x):\n \"\"\"Returns right tail of F distribution, x to infinity.\n\n See Cephes docs for details.\n \"\"\"\n if min(a, b) < 1:\n raise ValueError(\"F a and b (degrees of freedom) must both be >= 1.\")\n if x < 0:\n raise ValueError(\"F distribution value of f must be >= 0.\")\n w = float(b) / (b + a * x)\n return betai(0.5 * b, 0.5 * a, w)\n\n\ndef gdtr(a, b, x):\n \"\"\"Returns integral from 0 to x of Gamma distribution with params a and b.\n \"\"\"\n if x < 0.0:\n raise ZeroDivisionError(\"x must be at least 0.\")\n return igam(b, a * x)\n\n\ndef gdtrc(a, b, x):\n \"\"\"Returns integral from x to inf of Gamma distribution with params a and b.\n \"\"\"\n if x < 0.0:\n raise ZeroDivisionError(\"x must be at least 0.\")\n return igamc(b, a * x)\n\n\n# note: ndtri for the normal distribution is already imported\n\n\ndef chdtri(df, y):\n \"\"\"Returns inverse of chi-squared distribution.\"\"\"\n y = fix_rounding_error(y)\n if y < 0.0 or y > 1.0 or df < 1.0:\n raise ZeroDivisionError(\"y must be between 0 and 1; df >= 1\")\n return 2 * igami(0.5 * df, y)\n\n\ndef stdtri(k, p):\n \"\"\"Returns inverse of Student's t distribution. k = df.\"\"\"\n p = fix_rounding_error(p)\n # handle easy cases\n if k <= 0 or p < 0.0 or p > 1.0:\n raise ZeroDivisionError(\"k must be >= 1, p between 1 and 0.\")\n rk = k\n # handle intermediate values\n if p > 0.25 and p < 0.75:\n if p == 0.5:\n return 0.0\n z = 1.0 - 2.0 * p\n z = incbi(0.5, 0.5 * rk, abs(z))\n t = sqrt(rk * z / (1.0 - z))\n if p < 0.5:\n t = -t\n return t\n # handle extreme values\n rflg = -1\n if p >= 0.5:\n p = 1.0 - p\n rflg = 1\n z = incbi(0.5 * rk, 0.5, 2.0 * p)\n\n if MAXNUM * z < rk:\n return rflg * MAXNUM\n t = sqrt(rk / z - rk)\n return rflg * t\n\n\ndef pdtri(k, p):\n \"\"\"Inverse of Poisson distribution.\n\n Finds Poission mean such that integral from 0 to k is p.\n \"\"\"\n p = fix_rounding_error(p)\n if k < 0 or p < 0.0 or p >= 1.0:\n raise ZeroDivisionError(\"k must be >=0, p between 1 and 0.\")\n v = k + 1\n return igami(v, p)\n\n\ndef bdtri(k, n, y):\n \"\"\"Inverse of binomial distribution.\n\n Finds binomial p such that sum of terms 0-k reaches cum probability y.\n \"\"\"\n y = fix_rounding_error(y)\n if y < 0.0 or y > 1.0:\n raise ZeroDivisionError(\"y must be between 1 and 0.\")\n if k < 0 or n <= k:\n raise ZeroDivisionError(\"k must be between 0 and n\")\n dn = n - k\n if k == 0:\n if y > 0.8:\n p = -expm1(log1p(y - 1.0) / dn)\n else:\n p = 1.0 - y ** (1.0 / dn)\n else:\n dk = k + 1\n p = incbet(dn, dk, 0.5)\n if p > 0.5:\n p = incbi(dk, dn, 1.0 - y)\n else:\n p = 1.0 - incbi(dn, dk, y)\n return p\n\n\ndef gdtri(a, b, y):\n \"\"\"Returns Gamma such that y is the probability in the integral.\n\n WARNING: if 1-y == 1, gives incorrect result. The scipy implementation\n gets around this by using cdflib, which is in Fortran. Until someone\n gets around to translating that, only use this function for values of\n p greater than 1e-15 or so!\n \"\"\"\n y = fix_rounding_error(y)\n if y < 0.0 or y > 1.0 or a <= 0.0 or b < 0.0:\n raise ZeroDivisionError(\"a and b must be non-negative, y from 0 to 1.\")\n return igami(b, 1.0 - y) / a\n\n\ndef fdtri(a, b, y):\n \"\"\"Returns inverse of F distribution.\"\"\"\n y = fix_rounding_error(y)\n if a < 1.0 or b < 1.0 or y <= 0.0 or y > 1.0:\n raise ZeroDivisionError(\"y must be between 0 and 1; a and b >= 1\")\n y = 1.0 - y\n # Compute probability for x = 0.5\n w = incbet(0.5 * b, 0.5 * a, 0.5)\n # If that is greater than y, then the solution w < .5.\n # Otherwise, solve at 1-y to remove cancellation in (b - b*w).\n if w > y or y < 0.001:\n w = incbi(0.5 * b, 0.5 * a, y)\n x = (b - b * w) / (a * w)\n else:\n w = incbi(0.5 * a, 0.5 * b, 1.0 - y)\n x = b * w / (a * (1.0 - w))\n return x\n" ]
[ [ "numpy.arctan", "numpy.sqrt" ] ]
knaaptime/healthacc
[ "0ffae2a7a223e9b319bff4920ca3a29a63dd0494" ]
[ "healthacc/network.py" ]
[ "import osmnet\nimport pandas as pd\nimport pandana as pdna\nimport urbanaccess as ua\nfrom . import feeds_from_bbox\n\n\ndef multimodal_from_bbox(\n bbox,\n gtfs_dir=None,\n save_osm=None,\n save_gtfs=None,\n excluded_feeds=None,\n transit_net_kwargs=None,\n headways=False,\n additional_feeds=None\n):\n \"\"\"Generate a combined walk/transit pandana Network from a bounding box of latitudes and longitudes\n\n Parameters\n ----------\n bbox : tuple\n A bounding box formatted as (lng_max, lat_min, lng_min, lat_max). e.g. For a geodataframe\n stored in epsg 4326, this can be obtained with geodataframe.total_bounds\n gtfs_dir : str, optional\n path to directory for storing downloaded GTFS data. If None, the current directory will be used\n save_osm : str, optional\n Path to store the intermediate OSM Network as an h5 file\n save_gtfs : str, optional\n Path to store the intermediate GTFS Network as an h5 file\n excluded_feeds : list, optional\n list of feed names to exclude from the GTFS downloaded\n transit_net_kwargs : dict, optional\n additional keyword arguments to be passed to the urbanaccess GTFS network instantiator.\n defaults to {'day':\"monday\", 'timerange':[\"07:00:00\", \"10:00:00\"]}\n headways : bool, optional\n Whether to include headway calculations for the combined network\n additional_feeds : dict, optional\n Dictionary of additional feed locations in case they are not hosted on transitland.\n Should be specified as {transitagencyname: url} \n\n Returns\n -------\n pandana.Network\n a multimodal (walk/transit) Network object built from OSM and GTFS data that lie within the bounding box\n \"\"\" \n assert bbox is not None, \"You must provide a bounding box to collect network data\"\n if not gtfs_dir:\n gtfs_dir=\".\"\n\n if not transit_net_kwargs:\n transit_net_kwargs = dict(\n day=\"monday\", timerange=[\"07:00:00\", \"10:00:00\"], calendar_dates_lookup=None\n )\n\n\n # Get gtfs data\n feeds = feeds_from_bbox(bbox)\n\n if excluded_feeds: # remove problematic feeds if necessary\n for feed in list(feeds.keys()):\n if feed in excluded_feeds:\n feeds.pop(feed)\n \n if len(ua.gtfsfeeds.feeds.to_dict()[\"gtfs_feeds\"]) > 0:\n ua.gtfsfeeds.feeds.remove_feed(\n remove_all=True\n ) # feeds object is global so reset it if there's anything leftover\n \n ua.gtfsfeeds.feeds.add_feed(feeds)\n if additional_feeds:\n ua.gtfsfeeds.feeds.add_feed(additional_feeds)\n\n ua.gtfsfeeds.download()\n\n loaded_feeds = ua.gtfs.load.gtfsfeed_to_df(\n f\"{gtfs_dir}/data/gtfsfeed_text/\", bbox=bbox, remove_stops_outsidebbox=True\n )\n if save_gtfs:\n ua_to_h5(loaded_feeds, f\"{gtfs_dir}/{save_gtfs}\")\n\n\n # Get OSM data\n nodes, edges = osmnet.network_from_bbox(bbox=bbox)\n osm_network = pdna.Network(\n nodes[\"x\"], nodes[\"y\"], edges[\"from\"], edges[\"to\"], edges[[\"distance\"]]\n )\n if save_osm:\n osm_network.save_hdf5(save_osm)\n\n\n # Create the transit network\n ua.create_transit_net(gtfsfeeds_dfs=loaded_feeds, **transit_net_kwargs)\n osm_network.nodes_df['id'] = osm_network.nodes_df.index\n\n ua.create_osm_net(\n osm_edges=osm_network.edges_df,\n osm_nodes=osm_network.nodes_df,\n travel_speed_mph=3,\n )\n if headways:\n ua.gtfs.headways.headways(\n gtfsfeeds_df=loaded_feeds, headway_timerange=transit_net_kwargs[\"timerange\"]\n )\n ua.network.integrate_network(\n urbanaccess_network=ua.ua_network,\n headways=True,\n urbanaccess_gtfsfeeds_df=loaded_feeds,\n headway_statistic=\"mean\",\n )\n else:\n ua.integrate_network(urbanaccess_network=ua.ua_network, headways=False)\n\n combined_net = pdna.Network(\n ua.ua_network.net_nodes[\"x\"],\n ua.ua_network.net_nodes[\"y\"],\n ua.ua_network.net_edges[\"from_int\"],\n ua.ua_network.net_edges[\"to_int\"],\n ua.ua_network.net_edges[[\"weight\"]],\n )\n\n return combined_net\n\n\ndef ua_to_h5(loaded_feeds, path):\n\n hdf = pd.HDFStore(path)\n hdf[\"calendar\"] = loaded_feeds.calendar\n hdf[\"calendar_dates\"] = loaded_feeds.calendar_dates\n hdf[\"headways\"] = loaded_feeds.headways\n hdf[\"routes\"] = loaded_feeds.routes\n hdf[\"stop_times\"] = loaded_feeds.stop_times\n hdf[\"stop_times_int\"] = loaded_feeds.stops\n hdf[\"stops\"] = loaded_feeds.stops\n hdf[\"trips\"] = loaded_feeds.trips\n hdf.close()\n" ]
[ [ "pandas.HDFStore" ] ]
TIFOSI528/icefall
[ "6f7860a0a60b53026216fa4ba19048955951333e" ]
[ "egs/librispeech/ASR/pruned_transducer_stateless3/train.py" ]
[ "#!/usr/bin/env python3\n# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang,\n# Wei Kang\n# Mingshuang Luo)\n#\n# See ../../../../LICENSE for clarification regarding multiple authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nUsage:\n\nexport CUDA_VISIBLE_DEVICES=\"0,1,2,3\"\n\ncd egs/librispeech/ASR/\n./prepare.sh\n./prepare_giga_speech.sh\n\n./pruned_transducer_stateless3/train.py \\\n --world-size 4 \\\n --num-epochs 30 \\\n --start-epoch 0 \\\n --exp-dir pruned_transducer_stateless3/exp \\\n --full-libri 1 \\\n --max-duration 300\n\n# For mix precision training:\n\n./pruned_transducer_stateless3/train.py \\\n --world-size 4 \\\n --num-epochs 30 \\\n --start-epoch 0 \\\n --use_fp16 1 \\\n --exp-dir pruned_transducer_stateless3/exp \\\n --full-libri 1 \\\n --max-duration 550\n\n\"\"\"\n\n\nimport argparse\nimport logging\nimport random\nimport warnings\nfrom pathlib import Path\nfrom shutil import copyfile\nfrom typing import Any, Dict, Optional, Tuple, Union\n\nimport k2\nimport optim\nimport sentencepiece as spm\nimport torch\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nfrom asr_datamodule import AsrDataModule\nfrom conformer import Conformer\nfrom decoder import Decoder\nfrom gigaspeech import GigaSpeech\nfrom joiner import Joiner\nfrom lhotse import CutSet, load_manifest\nfrom lhotse.cut import Cut\nfrom lhotse.dataset.sampling.base import CutSampler\nfrom lhotse.utils import fix_random_seed\nfrom librispeech import LibriSpeech\nfrom model import Transducer\nfrom optim import Eden, Eve\nfrom torch import Tensor\nfrom torch.cuda.amp import GradScaler\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom icefall import diagnostics\nfrom icefall.checkpoint import load_checkpoint, remove_checkpoints\nfrom icefall.checkpoint import save_checkpoint as save_checkpoint_impl\nfrom icefall.checkpoint import save_checkpoint_with_global_batch_idx\nfrom icefall.dist import cleanup_dist, setup_dist\nfrom icefall.env import get_env_info\nfrom icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool\n\nLRSchedulerType = Union[\n torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler\n]\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n parser.add_argument(\n \"--world-size\",\n type=int,\n default=1,\n help=\"Number of GPUs for DDP training.\",\n )\n\n parser.add_argument(\n \"--master-port\",\n type=int,\n default=12354,\n help=\"Master port to use for DDP training.\",\n )\n\n parser.add_argument(\n \"--tensorboard\",\n type=str2bool,\n default=True,\n help=\"Should various information be logged in tensorboard.\",\n )\n\n parser.add_argument(\n \"--full-libri\",\n type=str2bool,\n default=True,\n help=\"When enabled, use 960h LibriSpeech. \"\n \"Otherwise, use 100h subset.\",\n )\n\n parser.add_argument(\n \"--num-epochs\",\n type=int,\n default=30,\n help=\"Number of epochs to train.\",\n )\n\n parser.add_argument(\n \"--start-epoch\",\n type=int,\n default=0,\n help=\"\"\"Resume training from from this epoch.\n If it is positive, it will load checkpoint from\n transducer_stateless3/exp/epoch-{start_epoch-1}.pt\n \"\"\",\n )\n\n parser.add_argument(\n \"--start-batch\",\n type=int,\n default=0,\n help=\"\"\"If positive, --start-epoch is ignored and\n it loads the checkpoint from exp-dir/checkpoint-{start_batch}.pt\n \"\"\",\n )\n\n parser.add_argument(\n \"--exp-dir\",\n type=str,\n default=\"pruned_transducer_stateless3/exp\",\n help=\"\"\"The experiment dir.\n It specifies the directory where all training related\n files, e.g., checkpoints, log, etc, are saved\n \"\"\",\n )\n\n parser.add_argument(\n \"--bpe-model\",\n type=str,\n default=\"data/lang_bpe_500/bpe.model\",\n help=\"Path to the BPE model\",\n )\n\n parser.add_argument(\n \"--initial-lr\",\n type=float,\n default=0.003,\n help=\"The initial learning rate. This value should not need \"\n \"to be changed.\",\n )\n\n parser.add_argument(\n \"--lr-batches\",\n type=float,\n default=5000,\n help=\"\"\"Number of steps that affects how rapidly the learning rate decreases.\n We suggest not to change this.\"\"\",\n )\n\n parser.add_argument(\n \"--lr-epochs\",\n type=float,\n default=4,\n help=\"\"\"Number of epochs that affects how rapidly the learning rate decreases.\n \"\"\",\n )\n\n parser.add_argument(\n \"--context-size\",\n type=int,\n default=2,\n help=\"The context size in the decoder. 1 means bigram; \"\n \"2 means tri-gram\",\n )\n\n parser.add_argument(\n \"--prune-range\",\n type=int,\n default=5,\n help=\"The prune range for rnnt loss, it means how many symbols(context)\"\n \"we are using to compute the loss\",\n )\n\n parser.add_argument(\n \"--lm-scale\",\n type=float,\n default=0.25,\n help=\"The scale to smooth the loss with lm \"\n \"(output of prediction network) part.\",\n )\n\n parser.add_argument(\n \"--am-scale\",\n type=float,\n default=0.0,\n help=\"The scale to smooth the loss with am (output of encoder network)\"\n \"part.\",\n )\n\n parser.add_argument(\n \"--simple-loss-scale\",\n type=float,\n default=0.5,\n help=\"To get pruning ranges, we will calculate a simple version\"\n \"loss(joiner is just addition), this simple loss also uses for\"\n \"training (as a regularization item). We will scale the simple loss\"\n \"with this parameter before adding to the final loss.\",\n )\n\n parser.add_argument(\n \"--seed\",\n type=int,\n default=42,\n help=\"The seed for random generators intended for reproducibility\",\n )\n\n parser.add_argument(\n \"--print-diagnostics\",\n type=str2bool,\n default=False,\n help=\"Accumulate stats on activations, print them and exit.\",\n )\n\n parser.add_argument(\n \"--save-every-n\",\n type=int,\n default=8000,\n help=\"\"\"Save checkpoint after processing this number of batches\"\n periodically. We save checkpoint to exp-dir/ whenever\n params.batch_idx_train % save_every_n == 0. The checkpoint filename\n has the form: f'exp-dir/checkpoint-{params.batch_idx_train}.pt'\n Note: It also saves checkpoint to `exp-dir/epoch-xxx.pt` at the\n end of each epoch where `xxx` is the epoch number counting from 0.\n \"\"\",\n )\n\n parser.add_argument(\n \"--keep-last-k\",\n type=int,\n default=20,\n help=\"\"\"Only keep this number of checkpoints on disk.\n For instance, if it is 3, there are only 3 checkpoints\n in the exp-dir with filenames `checkpoint-xxx.pt`.\n It does not affect checkpoints with name `epoch-xxx.pt`.\n \"\"\",\n )\n\n parser.add_argument(\n \"--use-fp16\",\n type=str2bool,\n default=False,\n help=\"Whether to use half precision training.\",\n )\n\n parser.add_argument(\n \"--giga-prob\",\n type=float,\n default=0.5,\n help=\"The probability to select a batch from the GigaSpeech dataset\",\n )\n\n return parser\n\n\ndef get_params() -> AttributeDict:\n \"\"\"Return a dict containing training parameters.\n\n All training related parameters that are not passed from the commandline\n are saved in the variable `params`.\n\n Commandline options are merged into `params` after they are parsed, so\n you can also access them via `params`.\n\n Explanation of options saved in `params`:\n\n - best_train_loss: Best training loss so far. It is used to select\n the model that has the lowest training loss. It is\n updated during the training.\n\n - best_valid_loss: Best validation loss so far. It is used to select\n the model that has the lowest validation loss. It is\n updated during the training.\n\n - best_train_epoch: It is the epoch that has the best training loss.\n\n - best_valid_epoch: It is the epoch that has the best validation loss.\n\n - batch_idx_train: Used to writing statistics to tensorboard. It\n contains number of batches trained so far across\n epochs.\n\n - log_interval: Print training loss if batch_idx % log_interval` is 0\n\n - reset_interval: Reset statistics if batch_idx % reset_interval is 0\n\n - valid_interval: Run validation if batch_idx % valid_interval is 0\n\n - feature_dim: The model input dim. It has to match the one used\n in computing features.\n\n - subsampling_factor: The subsampling factor for the model.\n\n - encoder_dim: Hidden dim for multi-head attention model.\n\n - num_decoder_layers: Number of decoder layer of transformer decoder.\n\n - warm_step: The warm_step for Noam optimizer.\n \"\"\"\n params = AttributeDict(\n {\n \"best_train_loss\": float(\"inf\"),\n \"best_valid_loss\": float(\"inf\"),\n \"best_train_epoch\": -1,\n \"best_valid_epoch\": -1,\n \"batch_idx_train\": 0,\n \"log_interval\": 50,\n \"reset_interval\": 200,\n \"valid_interval\": 3000, # For the 100h subset, use 800\n # parameters for conformer\n \"feature_dim\": 80,\n \"subsampling_factor\": 4,\n \"encoder_dim\": 512,\n \"nhead\": 8,\n \"dim_feedforward\": 2048,\n \"num_encoder_layers\": 12,\n # parameters for decoder\n \"decoder_dim\": 512,\n # parameters for joiner\n \"joiner_dim\": 512,\n # parameters for Noam\n \"model_warm_step\": 3000, # arg given to model, not for lrate\n \"env_info\": get_env_info(),\n }\n )\n\n return params\n\n\ndef get_encoder_model(params: AttributeDict) -> nn.Module:\n # TODO: We can add an option to switch between Conformer and Transformer\n encoder = Conformer(\n num_features=params.feature_dim,\n subsampling_factor=params.subsampling_factor,\n d_model=params.encoder_dim,\n nhead=params.nhead,\n dim_feedforward=params.dim_feedforward,\n num_encoder_layers=params.num_encoder_layers,\n )\n return encoder\n\n\ndef get_decoder_model(params: AttributeDict) -> nn.Module:\n decoder = Decoder(\n vocab_size=params.vocab_size,\n decoder_dim=params.decoder_dim,\n blank_id=params.blank_id,\n context_size=params.context_size,\n )\n return decoder\n\n\ndef get_joiner_model(params: AttributeDict) -> nn.Module:\n joiner = Joiner(\n encoder_dim=params.encoder_dim,\n decoder_dim=params.decoder_dim,\n joiner_dim=params.joiner_dim,\n vocab_size=params.vocab_size,\n )\n return joiner\n\n\ndef get_transducer_model(params: AttributeDict) -> nn.Module:\n encoder = get_encoder_model(params)\n decoder = get_decoder_model(params)\n joiner = get_joiner_model(params)\n\n decoder_giga = get_decoder_model(params)\n joiner_giga = get_joiner_model(params)\n\n model = Transducer(\n encoder=encoder,\n decoder=decoder,\n joiner=joiner,\n decoder_giga=decoder_giga,\n joiner_giga=joiner_giga,\n encoder_dim=params.encoder_dim,\n decoder_dim=params.decoder_dim,\n joiner_dim=params.joiner_dim,\n vocab_size=params.vocab_size,\n )\n return model\n\n\ndef load_checkpoint_if_available(\n params: AttributeDict,\n model: nn.Module,\n optimizer: Optional[torch.optim.Optimizer] = None,\n scheduler: Optional[LRSchedulerType] = None,\n) -> Optional[Dict[str, Any]]:\n \"\"\"Load checkpoint from file.\n\n If params.start_batch is positive, it will load the checkpoint from\n `params.exp_dir/checkpoint-{params.start_batch}.pt`. Otherwise, if\n params.start_epoch is positive, it will load the checkpoint from\n `params.start_epoch - 1`.\n\n Apart from loading state dict for `model` and `optimizer` it also updates\n `best_train_epoch`, `best_train_loss`, `best_valid_epoch`,\n and `best_valid_loss` in `params`.\n\n Args:\n params:\n The return value of :func:`get_params`.\n model:\n The training model.\n optimizer:\n The optimizer that we are using.\n scheduler:\n The scheduler that we are using.\n Returns:\n Return a dict containing previously saved training info.\n \"\"\"\n if params.start_batch > 0:\n filename = params.exp_dir / f\"checkpoint-{params.start_batch}.pt\"\n elif params.start_epoch > 0:\n filename = params.exp_dir / f\"epoch-{params.start_epoch-1}.pt\"\n else:\n return None\n\n assert filename.is_file(), f\"{filename} does not exist!\"\n\n saved_params = load_checkpoint(\n filename,\n model=model,\n optimizer=optimizer,\n scheduler=scheduler,\n )\n\n keys = [\n \"best_train_epoch\",\n \"best_valid_epoch\",\n \"batch_idx_train\",\n \"best_train_loss\",\n \"best_valid_loss\",\n ]\n for k in keys:\n params[k] = saved_params[k]\n\n if params.start_batch > 0:\n if \"cur_epoch\" in saved_params:\n params[\"start_epoch\"] = saved_params[\"cur_epoch\"]\n\n return saved_params\n\n\ndef save_checkpoint(\n params: AttributeDict,\n model: nn.Module,\n optimizer: Optional[torch.optim.Optimizer] = None,\n scheduler: Optional[LRSchedulerType] = None,\n sampler: Optional[CutSampler] = None,\n scaler: Optional[GradScaler] = None,\n rank: int = 0,\n) -> None:\n \"\"\"Save model, optimizer, scheduler and training stats to file.\n\n Args:\n params:\n It is returned by :func:`get_params`.\n model:\n The training model.\n optimizer:\n The optimizer used in the training.\n sampler:\n The sampler for the training dataset.\n scaler:\n The scaler used for mix precision training.\n \"\"\"\n if rank != 0:\n return\n filename = params.exp_dir / f\"epoch-{params.cur_epoch}.pt\"\n save_checkpoint_impl(\n filename=filename,\n model=model,\n params=params,\n optimizer=optimizer,\n scheduler=scheduler,\n sampler=sampler,\n scaler=scaler,\n rank=rank,\n )\n\n if params.best_train_epoch == params.cur_epoch:\n best_train_filename = params.exp_dir / \"best-train-loss.pt\"\n copyfile(src=filename, dst=best_train_filename)\n\n if params.best_valid_epoch == params.cur_epoch:\n best_valid_filename = params.exp_dir / \"best-valid-loss.pt\"\n copyfile(src=filename, dst=best_valid_filename)\n\n\ndef is_libri(c: Cut) -> bool:\n \"\"\"Return True if this cut is from the LibriSpeech dataset.\n\n Note:\n During data preparation, we set the custom field in\n the supervision segment of GigaSpeech to dict(origin='giga')\n See ../local/preprocess_gigaspeech.py.\n \"\"\"\n return c.supervisions[0].custom is None\n\n\ndef compute_loss(\n params: AttributeDict,\n model: nn.Module,\n sp: spm.SentencePieceProcessor,\n batch: dict,\n is_training: bool,\n warmup: float = 1.0,\n) -> Tuple[Tensor, MetricsTracker]:\n \"\"\"\n Compute CTC loss given the model and its inputs.\n\n Args:\n params:\n Parameters for training. See :func:`get_params`.\n model:\n The model for training. It is an instance of Conformer in our case.\n batch:\n A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()`\n for the content in it.\n is_training:\n True for training. False for validation. When it is True, this\n function enables autograd during computation; when it is False, it\n disables autograd.\n warmup: a floating point value which increases throughout training;\n values >= 1.0 are fully warmed up and have all modules present.\n \"\"\"\n device = model.device\n feature = batch[\"inputs\"]\n # at entry, feature is (N, T, C)\n assert feature.ndim == 3\n feature = feature.to(device)\n\n supervisions = batch[\"supervisions\"]\n feature_lens = supervisions[\"num_frames\"].to(device)\n\n libri = is_libri(supervisions[\"cut\"][0])\n\n texts = batch[\"supervisions\"][\"text\"]\n y = sp.encode(texts, out_type=int)\n y = k2.RaggedTensor(y).to(device)\n\n with torch.set_grad_enabled(is_training):\n simple_loss, pruned_loss = model(\n x=feature,\n x_lens=feature_lens,\n y=y,\n libri=libri,\n prune_range=params.prune_range,\n am_scale=params.am_scale,\n lm_scale=params.lm_scale,\n warmup=warmup,\n )\n # after the main warmup step, we keep pruned_loss_scale small\n # for the same amount of time (model_warm_step), to avoid\n # overwhelming the simple_loss and causing it to diverge,\n # in case it had not fully learned the alignment yet.\n pruned_loss_scale = (\n 0.0\n if warmup < 1.0\n else (0.1 if warmup > 1.0 and warmup < 2.0 else 1.0)\n )\n loss = (\n params.simple_loss_scale * simple_loss\n + pruned_loss_scale * pruned_loss\n )\n\n assert loss.requires_grad == is_training\n\n info = MetricsTracker()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n info[\"frames\"] = (\n (feature_lens // params.subsampling_factor).sum().item()\n )\n\n # Note: We use reduction=sum while computing the loss.\n info[\"loss\"] = loss.detach().cpu().item()\n info[\"simple_loss\"] = simple_loss.detach().cpu().item()\n info[\"pruned_loss\"] = pruned_loss.detach().cpu().item()\n\n return loss, info\n\n\ndef compute_validation_loss(\n params: AttributeDict,\n model: nn.Module,\n sp: spm.SentencePieceProcessor,\n valid_dl: torch.utils.data.DataLoader,\n world_size: int = 1,\n) -> MetricsTracker:\n \"\"\"Run the validation process.\"\"\"\n model.eval()\n\n tot_loss = MetricsTracker()\n\n for batch_idx, batch in enumerate(valid_dl):\n loss, loss_info = compute_loss(\n params=params,\n model=model,\n sp=sp,\n batch=batch,\n is_training=False,\n )\n assert loss.requires_grad is False\n tot_loss = tot_loss + loss_info\n\n if world_size > 1:\n tot_loss.reduce(loss.device)\n\n loss_value = tot_loss[\"loss\"] / tot_loss[\"frames\"]\n if loss_value < params.best_valid_loss:\n params.best_valid_epoch = params.cur_epoch\n params.best_valid_loss = loss_value\n\n return tot_loss\n\n\ndef train_one_epoch(\n params: AttributeDict,\n model: nn.Module,\n optimizer: torch.optim.Optimizer,\n scheduler: LRSchedulerType,\n sp: spm.SentencePieceProcessor,\n train_dl: torch.utils.data.DataLoader,\n giga_train_dl: torch.utils.data.DataLoader,\n valid_dl: torch.utils.data.DataLoader,\n rng: random.Random,\n scaler: GradScaler,\n tb_writer: Optional[SummaryWriter] = None,\n world_size: int = 1,\n rank: int = 0,\n) -> None:\n \"\"\"Train the model for one epoch.\n\n The training loss from the mean of all frames is saved in\n `params.train_loss`. It runs the validation process every\n `params.valid_interval` batches.\n\n Args:\n params:\n It is returned by :func:`get_params`.\n model:\n The model for training.\n optimizer:\n The optimizer we are using.\n scheduler:\n The learning rate scheduler, we call step() every step.\n train_dl:\n Dataloader for the training dataset.\n giga_train_dl:\n Dataloader for the GigaSpeech training dataset.\n valid_dl:\n Dataloader for the validation dataset.\n rng:\n For selecting which dataset to use.\n scaler:\n The scaler used for mix precision training.\n tb_writer:\n Writer to write log messages to tensorboard.\n world_size:\n Number of nodes in DDP training. If it is 1, DDP is disabled.\n rank:\n The rank of the node in DDP training. If no DDP is used, it should\n be set to 0.\n \"\"\"\n model.train()\n\n libri_tot_loss = MetricsTracker()\n giga_tot_loss = MetricsTracker()\n tot_loss = MetricsTracker()\n\n # index 0: for LibriSpeech\n # index 1: for GigaSpeech\n # This sets the probabilities for choosing which datasets\n dl_weights = [1 - params.giga_prob, params.giga_prob]\n\n iter_libri = iter(train_dl)\n iter_giga = iter(giga_train_dl)\n\n batch_idx = 0\n\n while True:\n idx = rng.choices((0, 1), weights=dl_weights, k=1)[0]\n dl = iter_libri if idx == 0 else iter_giga\n\n try:\n batch = next(dl)\n except StopIteration:\n name = \"libri\" if idx == 0 else \"giga\"\n logging.info(f\"{name} reaches end of dataloader\")\n break\n\n batch_idx += 1\n\n params.batch_idx_train += 1\n batch_size = len(batch[\"supervisions\"][\"text\"])\n\n libri = is_libri(batch[\"supervisions\"][\"cut\"][0])\n\n with torch.cuda.amp.autocast(enabled=params.use_fp16):\n loss, loss_info = compute_loss(\n params=params,\n model=model,\n sp=sp,\n batch=batch,\n is_training=True,\n warmup=(params.batch_idx_train / params.model_warm_step),\n )\n # summary stats\n tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info\n\n if libri:\n libri_tot_loss = (\n libri_tot_loss * (1 - 1 / params.reset_interval)\n ) + loss_info\n prefix = \"libri\" # for logging only\n else:\n giga_tot_loss = (\n giga_tot_loss * (1 - 1 / params.reset_interval)\n ) + loss_info\n prefix = \"giga\"\n\n # NOTE: We use reduction==sum and loss is computed over utterances\n # in the batch and there is no normalization to it so far.\n scaler.scale(loss).backward()\n scheduler.step_batch(params.batch_idx_train)\n scaler.step(optimizer)\n scaler.update()\n optimizer.zero_grad()\n\n if params.print_diagnostics and batch_idx == 5:\n return\n\n if (\n params.batch_idx_train > 0\n and params.batch_idx_train % params.save_every_n == 0\n ):\n save_checkpoint_with_global_batch_idx(\n out_dir=params.exp_dir,\n global_batch_idx=params.batch_idx_train,\n model=model,\n params=params,\n optimizer=optimizer,\n scheduler=scheduler,\n sampler=train_dl.sampler,\n scaler=scaler,\n rank=rank,\n )\n remove_checkpoints(\n out_dir=params.exp_dir,\n topk=params.keep_last_k,\n rank=rank,\n )\n\n if batch_idx % params.log_interval == 0:\n cur_lr = scheduler.get_last_lr()[0]\n logging.info(\n f\"Epoch {params.cur_epoch}, \"\n f\"batch {batch_idx}, {prefix}_loss[{loss_info}], \"\n f\"tot_loss[{tot_loss}], \"\n f\"libri_tot_loss[{libri_tot_loss}], \"\n f\"giga_tot_loss[{giga_tot_loss}], \"\n f\"batch size: {batch_size}\"\n f\"lr: {cur_lr:.2e}\"\n )\n\n if tb_writer is not None:\n tb_writer.add_scalar(\n \"train/learning_rate\", cur_lr, params.batch_idx_train\n )\n\n loss_info.write_summary(\n tb_writer,\n f\"train/current_{prefix}_\",\n params.batch_idx_train,\n )\n tot_loss.write_summary(\n tb_writer, \"train/tot_\", params.batch_idx_train\n )\n libri_tot_loss.write_summary(\n tb_writer, \"train/libri_tot_\", params.batch_idx_train\n )\n giga_tot_loss.write_summary(\n tb_writer, \"train/giga_tot_\", params.batch_idx_train\n )\n\n if batch_idx > 0 and batch_idx % params.valid_interval == 0:\n logging.info(\"Computing validation loss\")\n valid_info = compute_validation_loss(\n params=params,\n model=model,\n sp=sp,\n valid_dl=valid_dl,\n world_size=world_size,\n )\n model.train()\n logging.info(f\"Epoch {params.cur_epoch}, validation: {valid_info}\")\n if tb_writer is not None:\n valid_info.write_summary(\n tb_writer, \"train/valid_\", params.batch_idx_train\n )\n\n loss_value = tot_loss[\"loss\"] / tot_loss[\"frames\"]\n params.train_loss = loss_value\n if params.train_loss < params.best_train_loss:\n params.best_train_epoch = params.cur_epoch\n params.best_train_loss = params.train_loss\n\n\ndef filter_short_and_long_utterances(cuts: CutSet) -> CutSet:\n def remove_short_and_long_utt(c: Cut):\n # Keep only utterances with duration between 1 second and 20 seconds\n #\n # Caution: There is a reason to select 20.0 here. Please see\n # ../local/display_manifest_statistics.py\n #\n # You should use ../local/display_manifest_statistics.py to get\n # an utterance duration distribution for your dataset to select\n # the threshold\n return 1.0 <= c.duration <= 20.0\n\n cuts = cuts.filter(remove_short_and_long_utt)\n\n return cuts\n\n\ndef run(rank, world_size, args):\n \"\"\"\n Args:\n rank:\n It is a value between 0 and `world_size-1`, which is\n passed automatically by `mp.spawn()` in :func:`main`.\n The node with rank 0 is responsible for saving checkpoint.\n world_size:\n Number of GPUs for DDP training.\n args:\n The return value of get_parser().parse_args()\n \"\"\"\n params = get_params()\n params.update(vars(args))\n if params.full_libri is False:\n params.valid_interval = 1600\n\n fix_random_seed(params.seed)\n rng = random.Random(params.seed)\n if world_size > 1:\n setup_dist(rank, world_size, params.master_port)\n\n setup_logger(f\"{params.exp_dir}/log/log-train\")\n logging.info(\"Training started\")\n\n if args.tensorboard and rank == 0:\n tb_writer = SummaryWriter(log_dir=f\"{params.exp_dir}/tensorboard\")\n else:\n tb_writer = None\n\n device = torch.device(\"cpu\")\n if torch.cuda.is_available():\n device = torch.device(\"cuda\", rank)\n logging.info(f\"Device: {device}\")\n\n sp = spm.SentencePieceProcessor()\n sp.load(params.bpe_model)\n\n # <blk> is defined in local/train_bpe_model.py\n params.blank_id = sp.piece_to_id(\"<blk>\")\n params.vocab_size = sp.get_piece_size()\n\n logging.info(params)\n\n logging.info(\"About to create model\")\n model = get_transducer_model(params)\n\n num_param = sum([p.numel() for p in model.parameters()])\n logging.info(f\"Number of model parameters: {num_param}\")\n\n checkpoints = load_checkpoint_if_available(params=params, model=model)\n\n model.to(device)\n if world_size > 1:\n logging.info(\"Using DDP\")\n model = DDP(model, device_ids=[rank], find_unused_parameters=True)\n model.device = device\n\n optimizer = Eve(model.parameters(), lr=params.initial_lr)\n\n scheduler = Eden(optimizer, params.lr_batches, params.lr_epochs)\n\n if checkpoints and \"optimizer\" in checkpoints:\n logging.info(\"Loading optimizer state dict\")\n optimizer.load_state_dict(checkpoints[\"optimizer\"])\n\n if (\n checkpoints\n and \"scheduler\" in checkpoints\n and checkpoints[\"scheduler\"] is not None\n ):\n logging.info(\"Loading scheduler state dict\")\n scheduler.load_state_dict(checkpoints[\"scheduler\"])\n\n if params.print_diagnostics:\n opts = diagnostics.TensorDiagnosticOptions(\n 2 ** 22\n ) # allow 4 megabytes per sub-module\n diagnostic = diagnostics.attach_diagnostics(model, opts)\n\n librispeech = LibriSpeech(manifest_dir=args.manifest_dir)\n\n train_cuts = librispeech.train_clean_100_cuts()\n if params.full_libri:\n train_cuts += librispeech.train_clean_360_cuts()\n train_cuts += librispeech.train_other_500_cuts()\n\n train_cuts = filter_short_and_long_utterances(train_cuts)\n\n gigaspeech = GigaSpeech(manifest_dir=args.manifest_dir)\n # XL 10k hours\n # L 2.5k hours\n # M 1k hours\n # S 250 hours\n # XS 10 hours\n # DEV 12 hours\n # Test 40 hours\n if params.full_libri:\n logging.info(\"Using the XL subset of GigaSpeech (10k hours)\")\n train_giga_cuts = gigaspeech.train_XL_cuts()\n else:\n logging.info(\"Using the S subset of GigaSpeech (250 hours)\")\n train_giga_cuts = gigaspeech.train_S_cuts()\n\n train_giga_cuts = filter_short_and_long_utterances(train_giga_cuts)\n train_giga_cuts = train_giga_cuts.repeat(times=None)\n\n if args.enable_musan:\n cuts_musan = load_manifest(\n Path(args.manifest_dir) / \"cuts_musan.json.gz\"\n )\n else:\n cuts_musan = None\n\n asr_datamodule = AsrDataModule(args)\n\n train_dl = asr_datamodule.train_dataloaders(\n train_cuts,\n dynamic_bucketing=False,\n on_the_fly_feats=False,\n cuts_musan=cuts_musan,\n )\n\n giga_train_dl = asr_datamodule.train_dataloaders(\n train_giga_cuts,\n dynamic_bucketing=True,\n on_the_fly_feats=False,\n cuts_musan=cuts_musan,\n )\n\n valid_cuts = librispeech.dev_clean_cuts()\n valid_cuts += librispeech.dev_other_cuts()\n valid_dl = asr_datamodule.valid_dataloaders(valid_cuts)\n\n # It's time consuming to include `giga_train_dl` here\n # for dl in [train_dl, giga_train_dl]:\n for dl in [train_dl]:\n scan_pessimistic_batches_for_oom(\n model=model,\n train_dl=dl,\n optimizer=optimizer,\n sp=sp,\n params=params,\n )\n\n scaler = GradScaler(enabled=params.use_fp16)\n if checkpoints and \"grad_scaler\" in checkpoints:\n logging.info(\"Loading grad scaler state dict\")\n scaler.load_state_dict(checkpoints[\"grad_scaler\"])\n\n for epoch in range(params.start_epoch, params.num_epochs):\n scheduler.step_epoch(epoch)\n fix_random_seed(params.seed + epoch)\n train_dl.sampler.set_epoch(epoch)\n\n if tb_writer is not None:\n tb_writer.add_scalar(\"train/epoch\", epoch, params.batch_idx_train)\n\n params.cur_epoch = epoch\n\n train_one_epoch(\n params=params,\n model=model,\n optimizer=optimizer,\n scheduler=scheduler,\n sp=sp,\n train_dl=train_dl,\n giga_train_dl=giga_train_dl,\n valid_dl=valid_dl,\n rng=rng,\n scaler=scaler,\n tb_writer=tb_writer,\n world_size=world_size,\n rank=rank,\n )\n\n if params.print_diagnostics:\n diagnostic.print_diagnostics()\n break\n\n save_checkpoint(\n params=params,\n model=model,\n optimizer=optimizer,\n scheduler=scheduler,\n sampler=train_dl.sampler,\n scaler=scaler,\n rank=rank,\n )\n\n logging.info(\"Done!\")\n\n if world_size > 1:\n torch.distributed.barrier()\n cleanup_dist()\n\n\ndef scan_pessimistic_batches_for_oom(\n model: nn.Module,\n train_dl: torch.utils.data.DataLoader,\n optimizer: torch.optim.Optimizer,\n sp: spm.SentencePieceProcessor,\n params: AttributeDict,\n):\n from lhotse.dataset import find_pessimistic_batches\n\n logging.info(\n \"Sanity check -- see if any of the batches in epoch 0 would cause OOM.\"\n )\n batches, crit_values = find_pessimistic_batches(train_dl.sampler)\n for criterion, cuts in batches.items():\n batch = train_dl.dataset[cuts]\n try:\n # warmup = 0.0 is so that the derivs for the pruned loss stay zero\n # (i.e. are not remembered by the decaying-average in adam), because\n # we want to avoid these params being subject to shrinkage in adam.\n with torch.cuda.amp.autocast(enabled=params.use_fp16):\n loss, _ = compute_loss(\n params=params,\n model=model,\n sp=sp,\n batch=batch,\n is_training=True,\n warmup=0.0,\n )\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n except RuntimeError as e:\n if \"CUDA out of memory\" in str(e):\n logging.error(\n \"Your GPU ran out of memory with the current \"\n \"max_duration setting. We recommend decreasing \"\n \"max_duration and trying again.\\n\"\n f\"Failing criterion: {criterion} \"\n f\"(={crit_values[criterion]}) ...\"\n )\n raise\n\n\ndef main():\n parser = get_parser()\n AsrDataModule.add_arguments(parser)\n args = parser.parse_args()\n args.exp_dir = Path(args.exp_dir)\n\n assert 0 <= args.giga_prob < 1, args.giga_prob\n\n world_size = args.world_size\n assert world_size >= 1\n if world_size > 1:\n mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True)\n else:\n run(rank=0, world_size=1, args=args)\n\n\ntorch.set_num_threads(1)\ntorch.set_num_interop_threads(1)\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.device", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.amp.autocast", "torch.multiprocessing.spawn", "torch.set_num_interop_threads", "torch.nn.parallel.DistributedDataParallel", "torch.cuda.is_available", "torch.cuda.amp.GradScaler", "torch.distributed.barrier", "torch.set_grad_enabled", "torch.set_num_threads" ] ]
SumeetBatra/crazyswarm-nn
[ "027a9de353c39ffa702c915acdf0cdbfda0b0eca" ]
[ "ros_ws/src/crazyswarm/scripts/logPositions.py" ]
[ "#!/usr/bin/env python\n\nimport numpy as np\nfrom pycrazyswarm import *\nimport rospy\nfrom crazyflie_driver.msg import GenericLogData\nimport uav_trajectory\nimport argparse\n\nfile = None\nHEIGHT = 1.0 #0.75\n\n\n# \"ctrltarget.x\", \"ctrltarget.y\", \"ctrltarget.z\", \"stateEstimate.x\", \"stateEstimate.y\", \"stateEstimate.z\"\ndef logData(data):\n global file\n\n print(data.values)\n file.write(\",\".join([str(i) for i in data.values]) + \"\\n\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"speed\", type=str, help=\"speed of trajectory\")\n parser.add_argument(\"output\", type=str, help=\"output file name\")\n args = parser.parse_args()\n\n swarm = Crazyswarm(parse_args = False)\n timeHelper = swarm.timeHelper\n allcfs = swarm.allcfs\n\n file = open(args.output, \"w\")\n\n rospy.Subscriber(\"/cf{}/log1/\".format(allcfs.crazyflies[0].id), GenericLogData, logData)\n\n # swarm = Crazyswarm()\n # timeHelper = swarm.timeHelper\n # allcfs = swarm.allcfs\n\n traj1 = uav_trajectory.Trajectory()\n traj1.loadcsv(\"figure8.csv\", swapxy=True)\n\n TIMESCALE = float(args.speed) #0.75\n for cf in allcfs.crazyflies:\n cf.uploadTrajectory(0, 0, traj1)\n\n allcfs.takeoff(targetHeight=HEIGHT, duration=3.0)\n timeHelper.sleep(2.5)\n for cf in allcfs.crazyflies:\n # pos = np.array(cf.initialPosition) + np.array([0, 0, 1.0])\n pos = np.array([0, 0, HEIGHT])\n cf.goTo(pos, 0, 2.0)\n timeHelper.sleep(2.5)\n\n allcfs.startTrajectory(0, timescale=TIMESCALE)\n timeHelper.sleep(traj1.duration * TIMESCALE + 2.0)\n # allcfs.startTrajectory(0, timescale=TIMESCALE, reverse=True)\n # timeHelper.sleep(traj1.duration * TIMESCALE + 2.0)\n\n allcfs.land(targetHeight=0.06, duration=2.0)\n timeHelper.sleep(2.0)\n" ]
[ [ "numpy.array" ] ]
Team-Project-OKG/kge
[ "d09e41ccc30b54c1de983e8c4f6f95b92983bcb3" ]
[ "data/olpbench_small/olpbench-small_dataset_creation/reset_ids.py" ]
[ "import os\r\nimport pandas as pd\r\n\r\nfiles_directory = './'\r\nreset_id_map_directory = './resetted_mapped_to_ids/'\r\n\r\ntoken_id_map_header = pd.read_csv(\"./token_id_map_header.txt\", delimiter=\"\\t\", names=[\"token\", \"token_id\"])\r\nentity_id_map = pd.read_csv(\"./entity_id_map.del\", delimiter=\"\\t\", names=[\"entity\", \"entity_id\"])\r\nentity_id_tokens_ids_map = pd.read_csv(\"./entity_id_tokens_ids_map.del\", delimiter=\"\\t\", names=[\"entity_id\", \"token_ids\"])\r\nentity_token_id_map = pd.concat([token_id_map_header, pd.read_csv(\"./entity_token_id_map.del\", delimiter=\"\\t\", names=[\"token\", \"token_id\"])], ignore_index=True)\r\nrelation_id_map = pd.read_csv(\"./relation_id_map.del\", delimiter=\"\\t\", names=[\"relation\", \"relation_id\"])\r\nrelation_id_tokens_ids_map = pd.read_csv(\"./relation_id_tokens_ids_map.del\", delimiter=\"\\t\", names=[\"relation_id\", \"token_ids\"])\r\nrelation_token_id_map = pd.concat([token_id_map_header, pd.read_csv(\"./relation_token_id_map.del\", delimiter=\"\\t\", names=[\"token\", \"token_id\"])], ignore_index=True)\r\n\r\n\r\nnew_entity_id_map = entity_id_map.reset_index().rename(columns={\"entity_id\": \"old_entity_id\", \"index\": \"entity_id\"}).set_index(\"old_entity_id\")\r\nnew_relation_id_map = relation_id_map.reset_index().rename(columns={\"relation_id\": \"old_relation_id\", \"index\": \"relation_id\"}).set_index(\"old_relation_id\")\r\n\r\nnew_entity_id_map_dict = new_entity_id_map[\"entity_id\"].to_dict()\r\nnew_relation_id_map_dict = new_relation_id_map[\"relation_id\"].to_dict()\r\n\r\n\r\nnew_entity_token_id_map = entity_token_id_map.reset_index().rename(columns={\"token_id\": \"old_token_id\", \"index\": \"token_id\"}).set_index(\"old_token_id\")\r\nnew_relation_token_id_map = relation_token_id_map.reset_index().rename(columns={\"token_id\": \"old_token_id\", \"index\": \"token_id\"}).set_index(\"old_token_id\")\r\n\r\nnew_entity_token_id_map_dict = new_entity_token_id_map[\"token_id\"].to_dict()\r\nnew_relation_token_id_map_dict = new_relation_token_id_map[\"token_id\"].to_dict()\r\n\r\n\r\n\r\nnew_entity_id_tokens_ids_map = entity_id_tokens_ids_map.copy()\r\n\r\nnew_entity_id_tokens_ids_map[\"entity_id\"] = entity_id_tokens_ids_map[\"entity_id\"].replace(new_entity_id_map_dict)\r\n\r\nnew_entity_id_tokens_ids_map[\"token_ids\"] = new_entity_id_tokens_ids_map[\"token_ids\"].apply(lambda token_ids: \" \".join([str(new_entity_token_id_map_dict.get(int(splitted), None)) for splitted in token_ids.split(\" \")]))\r\n\r\n\r\nnew_relation_id_tokens_ids_map = relation_id_tokens_ids_map.copy()\r\n\r\nnew_relation_id_tokens_ids_map[\"relation_id\"] = relation_id_tokens_ids_map[\"relation_id\"].replace(new_relation_id_map_dict)\r\n\r\nnew_relation_id_tokens_ids_map[\"token_ids\"] = new_relation_id_tokens_ids_map[\"token_ids\"].apply(lambda token_ids: \" \".join([str(new_relation_token_id_map_dict.get(int(splitted), None)) for splitted in token_ids.split(\" \")]))\r\n\r\n\r\n\r\nmapping_files = [f for f in os.listdir(files_directory) if any([x in f for x in [\"test\", \"train\", \"validation\"]])]\r\n\r\nfor file_name in mapping_files:\r\n file_df = pd.read_csv(files_directory+file_name, delimiter=\"\\t\", names=[\"subject_id\", \"relation_id\", \"object_id\", \"alternative_subjects\", \"alternative_objects\"])\r\n file_df[\"subject_id\"] = file_df[\"subject_id\"].replace(new_entity_id_map_dict)\r\n file_df[\"object_id\"] = file_df[\"object_id\"].replace(new_entity_id_map_dict)\r\n file_df[\"relation_id\"] = file_df[\"relation_id\"].replace(new_relation_id_map_dict)\r\n file_df[\"alternative_subjects\"] = file_df[\"alternative_subjects\"].apply(lambda token_ids: \" \".join([str(new_entity_id_map_dict.get(int(splitted), None)) for splitted in str(token_ids).split(\" \")]))\r\n file_df[\"alternative_objects\"] = file_df[\"alternative_objects\"].apply(lambda token_ids: \" \".join([str(new_entity_id_map_dict.get(int(splitted), None)) for splitted in str(token_ids).split(\" \")]))\r\n file_df.to_csv(reset_id_map_directory+file_name, sep=\"\\t\", header=False, index=False)\r\n print(file_name + \" done\")\r\n\r\n\r\nnew_entity_id_map.to_csv(reset_id_map_directory+\"entity_id_map.del\", sep=\"\\t\", header=False, index=False)\r\nnew_entity_id_tokens_ids_map.to_csv(reset_id_map_directory+\"entity_id_tokens_ids_map.del\", sep=\"\\t\", header=False, index=False)\r\nnew_entity_token_id_map.to_csv(reset_id_map_directory+\"entity_token_id_map.del\", sep=\"\\t\", header=False, index=False)\r\nnew_relation_id_map.to_csv(reset_id_map_directory+\"relation_id_map.del\", sep=\"\\t\", header=False, index=False)\r\nnew_relation_id_tokens_ids_map.to_csv(reset_id_map_directory+\"relation_id_tokens_ids_map.del\", sep=\"\\t\", header=False, index=False)\r\nnew_relation_token_id_map.to_csv(reset_id_map_directory+\"relation_token_id_map.del\", sep=\"\\t\", header=False, index=False)\r\n\r\n\r\nprint(\"done\")" ]
[ [ "pandas.read_csv" ] ]
Bleuje/pytorch-CycleGAN-and-pix2pix
[ "20070a7df7c6549a96461970c809405a8acd5c1f" ]
[ "models/base_model.py" ]
[ "import os\nimport torch\nfrom collections import OrderedDict\nfrom . import networks\n\nimport numpy as np\n\nclass BaseModel():\n\n # modify parser to add command line options,\n # and also change the default values if needed\n @staticmethod\n def modify_commandline_options(parser, is_train):\n return parser\n\n def name(self):\n return 'BaseModel'\n\n def initialize(self, opt):\n self.opt = opt\n self.gpu_ids = opt.gpu_ids\n self.isTrain = opt.isTrain\n self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')\n self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)\n if opt.resize_or_crop != 'scale_width':\n torch.backends.cudnn.benchmark = True\n self.loss_names = []\n self.model_names = []\n self.visual_names = []\n self.image_paths = []\n\n def set_input(self, input):\n pass\n\n def forward(self):\n pass\n\n # load and print networks; create schedulers\n def setup(self, opt, parser=None):\n if self.isTrain:\n self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]\n if not self.isTrain or opt.continue_train:\n load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch\n self.load_networks(load_suffix)\n self.print_networks(opt.verbose)\n\n # make models eval mode during test time\n def eval(self):\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n net.eval()\n\n # used in test time, wrapping `forward` in no_grad() so we don't save\n # intermediate steps for backprop\n def test(self):\n with torch.no_grad():\n self.forward(b=True,seed=np.random.randint(1000),length=200)\n self.compute_visuals()\n\n # compute additional output images for visualization\n def compute_visuals(self):\n pass\n\n # get image paths\n def get_image_paths(self):\n return self.image_paths\n\n def optimize_parameters(self):\n pass\n\n # update learning rate (called once every epoch)\n def update_learning_rate(self):\n for scheduler in self.schedulers:\n scheduler.step()\n lr = self.optimizers[0].param_groups[0]['lr']\n print('learning rate = %.7f' % lr)\n\n # return visualization images. train.py will display these images, and save the images to a html\n def get_current_visuals(self):\n visual_ret = OrderedDict()\n for name in self.visual_names:\n if isinstance(name, str):\n visual_ret[name] = getattr(self, name)\n return visual_ret\n\n # return traning losses/errors. train.py will print out these errors as debugging information\n def get_current_losses(self):\n errors_ret = OrderedDict()\n for name in self.loss_names:\n if isinstance(name, str):\n # float(...) works for both scalar tensor and float number\n errors_ret[name] = float(getattr(self, 'loss_' + name))\n return errors_ret\n\n # save models to the disk\n def save_networks(self, epoch):\n for name in self.model_names:\n if isinstance(name, str):\n save_filename = '%s_net_%s.pth' % (epoch, name)\n save_path = os.path.join(self.save_dir, save_filename)\n net = getattr(self, 'net' + name)\n\n if len(self.gpu_ids) > 0 and torch.cuda.is_available():\n torch.save(net.module.cpu().state_dict(), save_path)\n net.cuda(self.gpu_ids[0])\n else:\n torch.save(net.cpu().state_dict(), save_path)\n\n def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):\n key = keys[i]\n if i + 1 == len(keys): # at the end, pointing to a parameter/buffer\n if module.__class__.__name__.startswith('InstanceNorm') and \\\n (key == 'running_mean' or key == 'running_var'):\n if getattr(module, key) is None:\n state_dict.pop('.'.join(keys))\n if module.__class__.__name__.startswith('InstanceNorm') and \\\n (key == 'num_batches_tracked'):\n state_dict.pop('.'.join(keys))\n else:\n self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)\n\n # load models from the disk\n def load_networks(self, epoch):\n for name in self.model_names:\n if isinstance(name, str):\n load_filename = '%s_net_%s.pth' % (epoch, name)\n load_path = os.path.join(self.save_dir, load_filename)\n net = getattr(self, 'net' + name)\n if isinstance(net, torch.nn.DataParallel):\n net = net.module\n print('loading the model from %s' % load_path)\n # if you are using PyTorch newer than 0.4 (e.g., built from\n # GitHub source), you can remove str() on self.device\n state_dict = torch.load(load_path, map_location=str(self.device))\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n\n # patch InstanceNorm checkpoints prior to 0.4\n for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop\n self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))\n net.load_state_dict(state_dict)\n\n # print network information\n def print_networks(self, verbose):\n print('---------- Networks initialized -------------')\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n if verbose:\n print(net)\n print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))\n print('-----------------------------------------------')\n\n # set requies_grad=Fasle to avoid computation\n def set_requires_grad(self, nets, requires_grad=False):\n if not isinstance(nets, list):\n nets = [nets]\n for net in nets:\n if net is not None:\n for param in net.parameters():\n param.requires_grad = requires_grad\n" ]
[ [ "torch.device", "torch.no_grad", "torch.cuda.is_available", "numpy.random.randint" ] ]
FunmiKesa/JLA
[ "4fcd6a0a382d451a54703e432e476c3a16166232" ]
[ "src/gen_labels_15.py" ]
[ "import os.path as osp\nimport os\nimport numpy as np\n\n\ndef mkdirs(d):\n if not osp.exists(d):\n os.makedirs(d)\n\n\nseq_root = '/media2/funmi/MOT/MOT15/images/train'\nlabel_root = '/media2/funmi/MOT/MOT15/labels_with_ids/train'\nmkdirs(label_root)\n#seqs = [s for s in os.listdir(seq_root)]\nseqs = ['ADL-Rundle-6', 'ETH-Bahnhof', 'KITTI-13', 'PETS09-S2L1', 'TUD-Stadtmitte', 'ADL-Rundle-8', 'KITTI-17',\n 'ETH-Pedcross2', 'ETH-Sunnyday', 'TUD-Campus', 'Venice-2']\n\ntid_curr = 0\ntid_last = -1\nfor seq in seqs:\n with open(osp.join(seq_root, seq, 'seqinfo.ini'), 'r') as file:\n seq_info = file.read()\n seq_width = int(seq_info[seq_info.find('imWidth=') + 8:seq_info.find('\\nimHeight')])\n seq_height = int(seq_info[seq_info.find('imHeight=') + 9:seq_info.find('\\nimExt')])\n\n gt_txt = osp.join(seq_root, seq, 'gt', 'gt.txt')\n gt = np.loadtxt(gt_txt, dtype=np.float64, delimiter=',')\n idx = np.lexsort(gt.T[:2, :])\n gt = gt[idx, :]\n\n seq_label_root = osp.join(label_root, seq, 'img1')\n mkdirs(seq_label_root)\n\n for fid, tid, x, y, w, h, mark, _, _, _ in gt:\n if mark == 0:\n continue\n fid = int(fid)\n tid = int(tid)\n if not tid == tid_last:\n tid_curr += 1\n tid_last = tid\n x += w / 2\n y += h / 2\n label_fpath = osp.join(seq_label_root, '{:06d}.txt'.format(fid))\n label_str = '0 {:d} {:.6f} {:.6f} {:.6f} {:.6f}\\n'.format(\n tid_curr, x / seq_width, y / seq_height, w / seq_width, h / seq_height)\n with open(label_fpath, 'a') as f:\n f.write(label_str)\n" ]
[ [ "numpy.loadtxt", "numpy.lexsort" ] ]
n3urovirtual/EyeTracking_Experiment
[ "00a534540a524db8606d54e33ebc43de49a959ac" ]
[ "Memory Scripts/Mem_collation.py" ]
[ "''' MEMORY DATA COLLATION (VARIABLES OF INTEREST FOR EACH PARTICIPANT/TRIAL)'''\n\nimport os\nimport itertools\nimport pandas as pd\nimport numpy as np\nfrom helper import *\n\ncollation=pd.DataFrame()\n\n##TO DO: check why it omits subject 1, image 1.\n\nfor i,k in itertools.product(sub_id, img_id):\n file='Sub_'+str(i)+'_Image_'+str(k)+'.csv'\n events=pd.read_csv(os.path.join(EVENTS_PATH,file),low_memory=False)\n dir_route=pd.read_csv(os.path.join(DIR_ROUTE_PATH,file),low_memory=False)\n collation['Subject_ID']=str(i)\n collation['Image_ID']=str(k)\n collation['Clutter']=events['Clutter'].iloc[0]\n collation['Total_num_fixations']=events['Event_ID'].count()\n collation['Mean_fixation_dur']=events['FPOG_DUR'].mean()\n collation['First_saccade_latency']=events['SAC_LATENCY'].iloc[0]\n collation['Mean_saccade_length']=events['SAC_AMPLITUDE'].mean()\n collation['Scanpath_length']=events['SAC_AMPLITUDE'].sum()\n path_ratio=collation['Scanpath_length']/dir_route['Direct_path_in_pixels']\n collation['Scanpath_ratio']=path_ratio\n file_to_write='Mem_collate.csv'\n output_path=os.path.join(COLLATION_PATH,file_to_write)\n head=not os.path.exists(output_path)\n collation.to_csv(output_path, index=False,mode='a',header=head)\n " ]
[ [ "pandas.DataFrame" ] ]
JerryMa90/whitenoise-system
[ "66c99c8108e7a73c1b5a3fa5cb3729a5d8f95e20" ]
[ "sdk/opendp/whitenoise/evaluation/_dp_verification.py" ]
[ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport copy\nimport os\nfrom scipy import stats\nimport opendp.whitenoise.evaluation._aggregation as agg\nimport opendp.whitenoise.evaluation._exploration as exp\nfrom opendp.whitenoise.metadata.collection import *\n\nclass DPVerification:\n \"\"\" This class contains a list of methods that can be passed DP analysis \n for stochastic verification. It tries to use a set of neighboring datasets \n D1 and D2 that differ by single individual. On these neighboring datasets, \n it applies the DP analysis repeatedly. \n \n It tests the DP condition to let the DP implementer know whether repeated analysis\n results are not enough to re-identify D1 or D2 which differ by single individual \n i.e. passing epsilon-DP condition. \n \n If the DP condition is not passed, there is a bug and analysis is not \n differentially private. Similarly, it has methods to evaluate accuracy, \n utility and bias of DP analysis. \n \"\"\"\n def __init__(self, epsilon=1.0, dataset_size=10000, csv_path=\".\"):\n \"\"\"\n Instantiates DP Verification class initializing privacy parameters\n Creates a simulation dataset for use in verification testing\n \"\"\"\n self.epsilon = epsilon\n self.dataset_size = dataset_size\n self.file_dir = os.path.dirname(os.path.abspath(__file__))\n self.csv_path = csv_path\n self.df, self.dataset_path, self.file_name, self.metadata = self.create_simulated_dataset()\n self.N = len(self.df)\n self.delta = 1/(self.N * math.sqrt(self.N))\n\n def create_simulated_dataset(self, file_name = \"simulation\"):\n \"\"\"\n Returns a simulated dataset of configurable size and following\n geometric distribution. Adds a couple of dimension columns for \n analysis related to GROUP BY queries. \n \"\"\"\n np.random.seed(1)\n userids = list(range(1, self.dataset_size+1))\n userids = [\"A\" + str(user) for user in userids]\n segment = ['A', 'B', 'C']\n role = ['R1', 'R2']\n roles = np.random.choice(role, size=self.dataset_size, p=[0.7, 0.3]).tolist()\n segments = np.random.choice(segment, size=self.dataset_size, p=[0.5, 0.3, 0.2]).tolist()\n usage = np.random.geometric(p=0.5, size=self.dataset_size).tolist()\n df = pd.DataFrame(list(zip(userids, segments, roles, usage)), columns=['UserId', 'Segment', 'Role', 'Usage'])\n\n # Storing the data as a CSV\n file_path = os.path.join(self.file_dir, self.csv_path, file_name + \".csv\")\n df.to_csv(file_path, sep=',', encoding='utf-8', index=False)\n metadata = Table(file_name, file_name, self.dataset_size, \\\n [\\\n String(\"UserId\", self.dataset_size, True), \\\n String(\"Segment\", 3, False), \\\n String(\"Role\", 2, False), \\\n Int(\"Usage\", 0, 25)\n ])\n\n return df, file_path, file_name, metadata\n\n def generate_neighbors(self, load_csv = False):\n \"\"\"\n Generate dataframes that differ by a single record that is randomly chosen\n Returns the neighboring datasets and their corresponding metadata\n \"\"\"\n if(load_csv):\n self.df = pd.read_csv(self.dataset_path)\n\n if(self.N == 0):\n print(\"No records in dataframe to run the test\")\n return None, None\n\n d1 = self.df\n drop_idx = np.random.choice(self.df.index, 1, replace=False)\n d2 = self.df.drop(drop_idx)\n\n if(load_csv):\n # Storing the data as a CSV for applying queries via Burdock querying system\n d1_file_path = os.path.join(self.file_dir, self.csv_path , \"d1.csv\")\n d2_file_path = os.path.join(self.file_dir, self.csv_path , \"d2.csv\")\n\n d1.to_csv(d1_file_path, sep=',', encoding='utf-8', index=False)\n d2.to_csv(d2_file_path, sep=',', encoding='utf-8', index=False)\n\n d1_table = self.metadata\n d2_table = copy.copy(d1_table)\n d1_table.schema, d2_table.schema = \"d1\", \"d2\"\n d1_table.name, d2_table.name = \"d1\", \"d2\"\n d2_table.rowcount = d1_table.rowcount - 1\n d1_metadata, d2_metadata = CollectionMetadata([d1_table], \"csv\"), CollectionMetadata([d2_table], \"csv\")\n\n return d1, d2, d1_metadata, d2_metadata\n\n def apply_aggregation_neighbors(self, f, args1, args2):\n \"\"\"\n If there is an aggregation function that we need to test, \n we need to apply it on neighboring datasets. This function applies \n the aggregation repeatedly to log results in two vectors that are \n then used for generating histogram. The histogram is then passed \n through the DP test.\n \"\"\"\n fD1 = f(*args1)\n fD2 = f(*args2)\n return fD1, fD2\n\n def generate_histogram_neighbors(self, fD1, fD2, numbins=0, binsize=\"auto\", exact=False):\n \"\"\"\n Generate histograms given the vectors of repeated aggregation results\n applied on neighboring datasets\n \"\"\"\n d1 = fD1\n d2 = fD2\n d = np.concatenate((d1, d2), axis=None)\n n = d.size\n binlist = []\n minval = min(min(d1), min(d2))\n maxval = max(max(d1), max(d2))\n if(exact):\n binlist = np.linspace(minval, maxval, 2)\n elif(numbins > 0):\n binlist = np.linspace(minval, maxval, numbins)\n elif(binsize == \"auto\"):\n iqr = np.subtract(*np.percentile(d, [75, 25]))\n numerator = 2 * iqr if iqr > 0 else maxval - minval\n denominator = n ** (1. / 3)\n binwidth = numerator / denominator # Freedman–Diaconis' choice\n numbins = int(math.ceil((maxval - minval) / binwidth)) if maxval > minval else 20\n binlist = np.linspace(minval, maxval, numbins)\n else:\n # Choose bin size of unity\n binlist = np.arange(np.floor(minval),np.ceil(maxval))\n\n # Calculating histograms of fD1 and fD2\n d1hist, bin_edges = np.histogram(d1, bins = binlist, density = False)\n d2hist, bin_edges = np.histogram(d2, bins = binlist, density = False)\n\n return d1hist, d2hist, bin_edges\n\n def plot_histogram_neighbors(self, fD1, fD2, d1histupperbound, d2histupperbound, d1hist, d2hist, d1lower, d2lower, binlist, bound=True, exact=False):\n \"\"\"\n Plot histograms given the vectors of repeated aggregation results \n applied on neighboring datasets\n \"\"\"\n plt.figure(figsize=(15,5))\n if(exact):\n ax = plt.subplot(1, 1, 1)\n ax.ticklabel_format(useOffset=False)\n plt.xlabel('Bin')\n plt.ylabel('Probability')\n plt.hist(fD1, width=0.2, alpha=0.5, ec=\"k\", align = \"right\", bins = 1)\n plt.hist(fD2, width=0.2, alpha=0.5, ec=\"k\", align = \"right\", bins = 1)\n ax.legend(['D1', 'D2'], loc=\"upper right\")\n return\n\n ax = plt.subplot(1, 2, 1)\n ax.ticklabel_format(useOffset=False)\n plt.xlabel('Bin')\n plt.ylabel('Probability')\n if(bound):\n plt.bar(binlist[:-1], d2histupperbound, alpha=0.5, width=np.diff(binlist), ec=\"k\", align=\"edge\")\n plt.bar(binlist[:-1], d1lower, alpha=0.5, width=np.diff(binlist), ec=\"k\", align=\"edge\")\n plt.legend(['D1', 'D2'], loc=\"upper right\")\n else:\n plt.bar(binlist[:-1], d1hist, alpha=0.5, width=np.diff(binlist), ec=\"k\", align=\"edge\")\n plt.bar(binlist[:-1], d2hist, alpha=0.5, width=np.diff(binlist), ec=\"k\", align=\"edge\")\n plt.legend(['D1', 'D2'], loc=\"upper right\")\n\n ax = plt.subplot(1, 2, 2)\n ax.ticklabel_format(useOffset=False)\n plt.xlabel('Bin')\n plt.ylabel('Probability')\n if(bound):\n plt.bar(binlist[:-1], d1histupperbound, alpha=0.5, width=np.diff(binlist), ec=\"k\", align=\"edge\")\n plt.bar(binlist[:-1], d2lower, alpha=0.5, width=np.diff(binlist), ec=\"k\", align=\"edge\")\n plt.legend(['D2', 'D1'], loc=\"upper right\")\n else:\n plt.bar(binlist[:-1], d2hist, alpha=0.5, width=np.diff(binlist), ec=\"k\", align=\"edge\")\n plt.bar(binlist[:-1], d1hist, alpha=0.5, width=np.diff(binlist), ec=\"k\", align=\"edge\")\n plt.legend(['D2', 'D1'], loc=\"upper right\")\n plt.show()\n\n def get_bounded_histogram(self, d1hist, d2hist, binlist, d1size, d2size, exact, alpha=0.05):\n \"\"\"\n Check if histogram of fD1 values multiplied by e^epsilon and \n summed by delta is bounding fD2 and vice versa\n Use the histogram results and create bounded histograms \n to compare in DP test\n \"\"\"\n d1_error_interval = 0.0\n d2_error_interval = 0.0\n # Lower and Upper bound\n if(not exact):\n num_buckets = binlist.size - 1\n critical_value = stats.norm.ppf(1-(alpha / 2 / num_buckets), loc=0.0, scale=1.0)\n d1_error_interval = critical_value * math.sqrt(num_buckets / d1size) / 2\n d2_error_interval = critical_value * math.sqrt(num_buckets / d2size) / 2\n\n num_buckets = binlist.size - 1\n px = np.divide(d1hist, d1size)\n py = np.divide(d2hist, d2size)\n\n d1histbound = px * math.exp(self.epsilon) + self.delta\n d2histbound = py * math.exp(self.epsilon) + self.delta\n\n d1upper = np.power(np.sqrt(px * num_buckets) + d1_error_interval, 2) / num_buckets\n d2upper = np.power(np.sqrt(py * num_buckets) + d2_error_interval, 2) / num_buckets\n d1lower = np.power(np.sqrt(px * num_buckets) - d1_error_interval, 2) / num_buckets\n d2lower = np.power(np.sqrt(py * num_buckets) - d2_error_interval, 2) / num_buckets\n\n np.maximum(d1lower, 0.0, d1lower)\n np.maximum(d2lower, 0.0, d1lower)\n\n d1histupperbound = d1upper * math.exp(self.epsilon) + self.delta\n d2histupperbound = d2upper * math.exp(self.epsilon) + self.delta\n\n return px, py, d1histupperbound, d2histupperbound, d1histbound, d2histbound, d1lower, d2lower\n\n def dp_test(self, d1hist, d2hist, binlist, d1size, d2size, debug=False, exact=False):\n \"\"\"\n Differentially Private Predicate Test\n \"\"\"\n px, py, d1histupperbound, d2histupperbound, d1histbound, d2histbound, d1lower, d2lower = \\\n self.get_bounded_histogram(d1hist, d2hist, binlist, d1size, d2size, exact)\n if(debug):\n print(\"Parameters\")\n print(\"epsilon: \", self.epsilon, \" delta: \", self.delta)\n print(\"Bins\\n\", binlist)\n print(\"Original D1 Histogram\\n\", d1hist)\n print(\"Probability of D1 Histogram\\n\", px)\n print(\"D1 Lower\\n\", d1lower)\n print(\"D1 Upper\\n\", d1histupperbound)\n print(\"D1 Histogram to bound D2\\n\", d1histbound)\n print(\"Original D2 Histogram\\n\", d2hist)\n print(\"Probability of D2 Histogram\\n\", py)\n print(\"D2 Lower\\n\", d2lower)\n print(\"D2 Upper\\n\", d2histupperbound)\n print(\"D2 Histogram to bound D1\\n\", d2histbound)\n print(\"Comparison - D2 bound to D1\\n\", np.greater(d1hist, np.zeros(d1hist.size)), np.logical_and(np.greater(d1hist, np.zeros(d1hist.size)), np.greater(d1lower, d2histupperbound)))\n print(\"Comparison - D1 bound to D2\\n\", np.greater(d2hist, np.zeros(d2hist.size)), np.logical_and(np.greater(d2hist, np.zeros(d2hist.size)), np.greater(d2lower, d1histupperbound)))\n\n # Check if any of the bounds across the bins violate the relaxed DP condition\n bound_exceeded = np.any(np.logical_and(np.greater(d1hist, np.zeros(d1hist.size)), np.greater(d1lower, d2histupperbound))) or \\\n np.any(np.logical_and(np.greater(d2hist, np.zeros(d2hist.size)), np.greater(d2lower, d1histupperbound)))\n return not bound_exceeded, d1histupperbound, d2histupperbound, d1lower, d2lower\n\n def ks_test(self, fD1, fD2):\n \"\"\"\n K-S Two sample test between the repeated query results on neighboring datasets\n \"\"\"\n return stats.ks_2samp(fD1, fD2)\n\n def anderson_ksamp(self, fD1, fD2):\n \"\"\"\n Anderson Darling Test\n \"\"\"\n return stats.anderson_ksamp([fD1, fD2])\n\n def kl_divergence(self, p, q):\n \"\"\"\n Kullback-Leibler divergence D(P || Q) for discrete distributions\n \"\"\"\n return np.sum(np.where(p != 0, p * np.log(p / q), 0))\n\n def wasserstein_distance(self, d1hist, d2hist):\n \"\"\"\n Wasserstein Distance between histograms of repeated analysis on neighboring datasets\n \"\"\"\n return stats.wasserstein_distance(d1hist, d2hist)\n\n def aggtest(self, f, colname, numbins=0, binsize=\"auto\", debug=False, plot=True, bound=True, exact=False):\n \"\"\"\n Verification of SQL aggregation mechanisms\n Returns statistical distance measures between repeated analysis \n responses on neighboring datasets\n \"\"\"\n d1, d2, d1_metadata, d2_metadata = self.generate_neighbors()\n fD1, fD2 = self.apply_aggregation_neighbors(f, (d1, colname), (d2, colname))\n d1size, d2size = fD1.size, fD2.size\n ks_res = self.ks_test(fD1, fD2)\n d1hist, d2hist, bin_edges = \\\n self.generate_histogram_neighbors(fD1, fD2, numbins, binsize, exact=exact)\n dp_res, d1histupperbound, d2histupperbound, d1lower, d2lower = self.dp_test(d1hist, d2hist, bin_edges, d1size, d2size, debug, exact=exact)\n ws_res = 0.0\n if(exact):\n return False, 0.0, 0.0\n else:\n ws_res = self.wasserstein_distance(d1hist, d2hist)\n\n if(plot):\n self.plot_histogram_neighbors(fD1, fD2, d1histupperbound, d2histupperbound, d1hist, d2hist, d1lower, d2lower, bin_edges, bound, exact)\n return dp_res, ks_res, ws_res\n\n def accuracy_test(self, actual, low, high, confidence=0.95):\n \"\"\"\n Performs accuracy and utility tests given lower and upper bounds.\n 95% of times actual response (without DP noise) should fall within the error bounds\n Utility Test finds whether 5% of times, actual response falls outside the bounds \n Else error bounds are too large and noisy responses are low utility\n \"\"\"\n n = len(low)\n actual = [actual] * n\n error_interval = 0.05*confidence\n relaxed_low = confidence - error_interval\n relaxed_high = 1 - (confidence + error_interval)\n within_bounds = np.sum(np.logical_and(np.greater_equal(actual, low), np.greater_equal(high, actual)))\n outside_bounds = n - within_bounds\n acc_res = (within_bounds / n >= relaxed_low)\n utility_res = (outside_bounds / n >= relaxed_high)\n return acc_res, utility_res, float('%.2f'%((within_bounds / n) * 100))\n\n def bias_test(self, actual, fD, sig_level = 0.05):\n \"\"\"\n Given actual response, calculates mean signed deviation of noisy responses\n Also, performs 1-sample two tailed t-test to find whether \n the difference between actual response and repeated noisy responses \n is statistically significant i.e. biased result\n \"\"\"\n n = len(fD)\n actual = [actual] * n\n diff = fD - actual\n msd = (np.sum(diff) / n) / actual[0]\n tset, pval = stats.ttest_1samp(diff, 0.0)\n return (pval >= sig_level), msd\n\n def dp_query_test(self, d1_query, d2_query, debug=False, plot=True, bound=True, exact=False, repeat_count=10000, confidence=0.95, get_exact=True):\n \"\"\"\n Applying singleton queries repeatedly against DP SQL-92 implementation \n by WhiteNoise-System\n \"\"\"\n ag = agg.Aggregation(t=1, repeat_count=repeat_count)\n d1, d2, d1_metadata, d2_metadata = self.generate_neighbors(load_csv=True)\n\n fD1, fD1_actual, fD1_low, fD1_high = ag.run_agg_query(d1, d1_metadata, d1_query, confidence, get_exact)\n fD2, fD2_actual, fD2_low, fD2_high = ag.run_agg_query(d2, d2_metadata, d2_query, confidence, get_exact)\n d1hist, d2hist, bin_edges = self.generate_histogram_neighbors(fD1, fD2, binsize=\"auto\")\n d1size, d2size = fD1.size, fD2.size\n dp_res, d1histupperbound, d2histupperbound, d1lower, d2lower = self.dp_test(d1hist, d2hist, bin_edges, d1size, d2size, debug)\n #acc_res, utility_res, within_bounds = self.accuracy_test(fD1_actual, fD1_low, fD1_high, confidence)\n acc_res, utility_res = None, None\n bias_res, msd = self.bias_test(fD1_actual, fD1)\n if(plot):\n self.plot_histogram_neighbors(fD1, fD2, d1histupperbound, d2histupperbound, d1hist, d2hist, d1lower, d2lower, bin_edges, bound, exact)\n return dp_res, acc_res, utility_res, bias_res\n\n def dp_groupby_query_test(self, d1_query, d2_query, debug=False, plot=True, bound=True, exact=False, repeat_count=10000, confidence=0.95):\n \"\"\"\n Allows DP Predicate test on both singleton and GROUP BY SQL queries\n \"\"\"\n ag = agg.Aggregation(t=1, repeat_count=repeat_count)\n d1, d2, d1_metadata, d2_metadata = self.generate_neighbors(load_csv=True)\n\n d1_res, d1_exact, dim_cols, num_cols = ag.run_agg_query_df(d1, d1_metadata, d1_query, confidence, file_name = \"d1\")\n d2_res, d2_exact, dim_cols, num_cols = ag.run_agg_query_df(d2, d2_metadata, d2_query, confidence, file_name = \"d2\")\n\n res_list = []\n for col in num_cols:\n d1_gp = d1_res.groupby(dim_cols)[col].apply(list).reset_index(name=col)\n d2_gp = d2_res.groupby(dim_cols)[col].apply(list).reset_index(name=col)\n exact_gp = d1_exact.groupby(dim_cols)[col].apply(list).reset_index(name=col)\n # Both D1 and D2 should have dimension key for histograms to be created\n d1_d2 = d1_gp.merge(d2_gp, on=dim_cols, how='inner')\n d1_d2 = d1_d2.merge(exact_gp, on=dim_cols, how='left')\n n_cols = len(d1_d2.columns)\n for index, row in d1_d2.iterrows():\n # fD1 and fD2 will have the results of the K repeated query results that can be passed through histogram test\n # These results are for that particular numerical column and the specific dimension key of d1_d2\n fD1 = np.array([val[0] for val in d1_d2.iloc[index, n_cols - 3]])\n fD2 = np.array([val[0] for val in d1_d2.iloc[index, n_cols - 2]])\n exact_val = d1_d2.iloc[index, n_cols - 1][0]\n d1hist, d2hist, bin_edges = self.generate_histogram_neighbors(fD1, fD2, binsize=\"auto\")\n d1size, d2size = fD1.size, fD2.size\n dp_res, d1histupperbound, d2histupperbound, d1lower, d2lower = self.dp_test(d1hist, d2hist, bin_edges, d1size, d2size, debug)\n\n # Accuracy Test\n #low = np.array([val[1] for val in d1_d2.iloc[index, n_cols - 2]])\n #high = np.array([val[2] for val in d1_d2.iloc[index, n_cols - 2]])\n #acc_res, utility_res, within_bounds = self.accuracy_test(exact_val, low, high, confidence)\n acc_res, utility_res = None, None\n bias_res, msd = self.bias_test(exact_val, fD1)\n res_list.append([dp_res, acc_res, utility_res, bias_res, msd])\n if(plot):\n self.plot_histogram_neighbors(fD1, fD2, d1histupperbound, d2histupperbound, d1hist, d2hist, d1lower, d2lower, bin_edges, bound, exact)\n\n res_list = res_list.values() if hasattr(res_list, \"values\") else res_list # TODO why is this needed?\n dp_res = np.all(np.array([res[0] for res in res_list]))\n #acc_res = np.all(np.array([res[1] for res in res_list]))\n #utility_res = np.all(np.array([res[2] for res in res_list]))\n acc_res, utility_res = None, None\n bias_res = np.all(np.array([res[3] for res in res_list]))\n return dp_res, acc_res, utility_res, bias_res\n\n def dp_powerset_test(self, query_str, debug=False, plot=True, bound=True, exact=False, repeat_count=10000, confidence=0.95, test_cases=5):\n \"\"\"\n Use the powerset based neighboring datasets to scan through \n all edges of database search graph\n \"\"\"\n ag = agg.Aggregation(t=1, repeat_count=repeat_count)\n ex = exp.Exploration()\n res_list = {}\n halton_samples = ex.generate_halton_samples(bounds = ex.corners, dims = ex.N, n_sample=test_cases)\n # Iterate through each sample generated by halton sequence\n for sample in halton_samples:\n df, metadata = ex.create_small_dataset(sample)\n ex.generate_powerset(df)\n print(\"Test case: \", list(sample))\n for filename in ex.visited:\n print(\"Testing: \", filename)\n d1_query = query_str + \"d1_\" + filename + \".\" + \"d1_\" + filename\n d2_query = query_str + \"d2_\" + filename + \".\" + \"d2_\" + filename\n [d1, d2, d1_metadata, d2_metadata] = ex.neighbor_pair[filename]\n fD1, fD1_actual, fD1_low, fD1_high = ag.run_agg_query(d1, d1_metadata, d1_query, confidence)\n fD2, fD2_actual, fD2_low, fD2_high = ag.run_agg_query(d2, d2_metadata, d2_query, confidence)\n\n #acc_res, utility_res, within_bounds = self.accuracy_test(fD1_actual, fD1_low, fD1_high, confidence)\n acc_res, utility_res, within_bounds = None, None, None\n bias_res, msd = self.bias_test(fD1_actual, fD1)\n d1hist, d2hist, bin_edges = self.generate_histogram_neighbors(fD1, fD2, binsize=\"auto\")\n d1size, d2size = fD1.size, fD2.size\n dp_res, d1histupperbound, d2histupperbound, d1lower, d2lower = self.dp_test(d1hist, d2hist, bin_edges, d1size, d2size, debug)\n if(plot):\n self.plot_histogram_neighbors(fD1, fD2, d1histupperbound, d2histupperbound, d1hist, d2hist, d1lower, d2lower, bin_edges, bound, exact)\n key = \"[\" + ','.join(str(e) for e in list(sample)) + \"] - \" + filename\n res_list[key] = [dp_res, acc_res, utility_res, within_bounds, bias_res, msd]\n\n print(\"Halton sequence based Powerset Test Result\")\n for data, res in res_list.items():\n print(data, \"-\", res)\n\n dp_res = np.all(np.array([res[0] for data, res in res_list.items()]))\n #acc_res = np.all(np.array([res[1] for res in res_list]))\n #utility_res = np.all(np.array([res[2] for res in res_list]))\n acc_res, utility_res = None, None\n bias_res = np.all(np.array([res[4] for data, res in res_list.items()]))\n return dp_res, acc_res, utility_res, bias_res\n" ]
[ [ "numpy.random.choice", "scipy.stats.ttest_1samp", "scipy.stats.anderson_ksamp", "pandas.read_csv", "numpy.concatenate", "numpy.histogram", "numpy.divide", "numpy.log", "numpy.sqrt", "numpy.greater", "matplotlib.pyplot.subplot", "numpy.array", "numpy.zeros", "numpy.percentile", "matplotlib.pyplot.figure", "numpy.diff", "matplotlib.pyplot.hist", "matplotlib.pyplot.show", "numpy.floor", "numpy.ceil", "scipy.stats.norm.ppf", "numpy.random.seed", "matplotlib.pyplot.xlabel", "numpy.sum", "matplotlib.pyplot.legend", "scipy.stats.ks_2samp", "numpy.greater_equal", "matplotlib.pyplot.ylabel", "scipy.stats.wasserstein_distance", "numpy.linspace", "numpy.random.geometric", "numpy.maximum" ] ]
SKKU-ESLAB/Latency-Predictor
[ "c09ee81606a2f7263568a8704d9bd3e4e0721b86" ]
[ "incubator-tvm/topi/tests/python/test_topi_broadcast.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Test code for broadcasting operators.\"\"\"\nfrom common import get_all_backend\nimport numpy as np\nimport tvm\nimport topi\n\n\ndef verify_broadcast_to_ele(in_shape, out_shape, fbcast):\n # Build the logic and compile the function\n A = tvm.placeholder(shape=in_shape, name=\"A\")\n B = fbcast(A, out_shape)\n\n def check_device(device):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n with tvm.target.create(device):\n s = topi.generic.schedule_broadcast(B)\n foo = tvm.build(s, [A, B], device, name=\"broadcast_to\")\n data_npy = np.random.uniform(size=in_shape).astype(A.dtype)\n out_npy = np.broadcast_to(data_npy, out_shape)\n data_nd = tvm.nd.array(data_npy, ctx)\n out_nd = tvm.nd.array(np.empty(out_shape).astype(B.dtype), ctx)\n foo(data_nd, out_nd)\n tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)\n\n for target in get_all_backend():\n check_device(target)\n check_device(\"sdaccel\")\n\n\ndef verify_broadcast_binary_ele(lhs_shape, rhs_shape,\n ftopi, fnumpy,\n lhs_min=-100, lhs_max=100,\n rhs_min=-100, rhs_max=100,\n dtype=\"float32\"):\n # Build the logic and compile the function\n A = (tvm.var(\"A\", dtype=dtype) if lhs_shape is None\n else tvm.placeholder(shape=lhs_shape, name=\"A\", dtype=dtype))\n B = (tvm.var(\"B\", dtype=dtype) if rhs_shape is None\n else tvm.placeholder(shape=rhs_shape, name=\"B\", dtype=dtype))\n C = ftopi(A, B)\n if isinstance(A, tvm.expr.Expr) and isinstance(B, tvm.expr.Expr):\n assert(isinstance(C, tvm.expr.Expr))\n return\n\n def check_device(device):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n with tvm.target.create(device):\n s = topi.generic.schedule_broadcast(C)\n foo = tvm.build(s, [A, B, C], device, name=\"broadcast_binary\" + \"_\" + ftopi.__name__)\n if lhs_shape is None:\n lhs_npy = float(np.random.uniform(low=lhs_min, high=lhs_max))\n if dtype.startswith('int'):\n lhs_npy = int(lhs_npy)\n lhs_nd = lhs_npy\n else:\n lhs_npy = np.random.uniform(low=lhs_min, high=lhs_max,\n size=lhs_shape).astype(A.dtype)\n lhs_nd = tvm.nd.array(lhs_npy, ctx)\n\n if rhs_shape is None:\n rhs_npy = float(np.random.uniform(low=rhs_min, high=rhs_max))\n if dtype.startswith('int'):\n rhs_npy = int(rhs_npy)\n rhs_nd = rhs_npy\n else:\n rhs_npy = np.random.uniform(low=rhs_min, high=rhs_max,\n size=rhs_shape).astype(A.dtype)\n rhs_nd = tvm.nd.array(rhs_npy, ctx)\n\n out_npy = fnumpy(lhs_npy, rhs_npy)\n out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(C.dtype), ctx)\n foo(lhs_nd, rhs_nd, out_nd)\n tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy, rtol=1E-4, atol=1E-4)\n\n for target in get_all_backend():\n check_device(target)\n check_device(\"sdaccel\")\n\n\ndef test_broadcast_to():\n verify_broadcast_to_ele((1,), (10,), topi.broadcast_to)\n verify_broadcast_to_ele((), (10,), topi.broadcast_to)\n verify_broadcast_to_ele((1, 1, 5, 4), (3, 4, 4, 4, 5, 4), topi.broadcast_to)\n verify_broadcast_to_ele((1, 128, 1, 32), (64, 128, 64, 32), topi.broadcast_to)\n\n\ndef test_add():\n verify_broadcast_binary_ele(\n (), (), topi.add, np.add)\n verify_broadcast_binary_ele(\n (5, 2, 3), (2, 1), topi.add, np.add)\n\n\ndef test_subtract():\n verify_broadcast_binary_ele(\n (5, 2, 3), (), topi.subtract, np.subtract)\n verify_broadcast_binary_ele(\n (5, 2, 3), None, topi.subtract, np.subtract)\n verify_broadcast_binary_ele(\n None, None, topi.subtract, np.subtract)\n verify_broadcast_binary_ele(\n (1, 32), (64, 32), topi.subtract, np.subtract)\n\n\ndef test_multiply():\n verify_broadcast_binary_ele(\n (5, 64, 128), (2, 5, 64, 1), topi.multiply, np.multiply)\n\n\ndef test_divide():\n verify_broadcast_binary_ele(\n None, (10,), topi.divide, np.divide, rhs_min=0.0001)\n verify_broadcast_binary_ele(\n (), None, topi.divide, np.divide, rhs_min=0.0001)\n verify_broadcast_binary_ele(\n (2, 3, 1, 32), (64, 32), topi.divide, np.divide, rhs_min=0.0001)\n\ndef test_floor_divide():\n verify_broadcast_binary_ele(\n None, (10,), topi.floor_divide, np.floor_divide, rhs_min=0.0001)\n verify_broadcast_binary_ele(\n (), None, topi.floor_divide, np.floor_divide, rhs_min=0.0001)\n verify_broadcast_binary_ele(\n (2, 3, 1, 32), (64, 32), topi.floor_divide, np.floor_divide, rhs_min=0.0001)\n\ndef test_maximum_minmum():\n verify_broadcast_binary_ele(\n (32,), (64, 32), topi.maximum, np.maximum)\n verify_broadcast_binary_ele(\n (1, 2, 2, 1, 32), (64, 32), topi.minimum, np.minimum)\n\n\ndef test_power():\n verify_broadcast_binary_ele(\n (1, 2, 2), (2,), topi.power, np.power, lhs_min=0.001, rhs_min=0.001, rhs_max=2)\n\n\ndef test_mod():\n verify_broadcast_binary_ele(\n (1, 2, 2), (2,), topi.mod, np.mod, lhs_min=0.001, rhs_min=1, dtype=\"int32\")\n\ndef test_floor_mod():\n verify_broadcast_binary_ele(\n (1, 2, 2), (2,), topi.floor_mod, np.fmod, lhs_min=0.001, rhs_min=1, dtype=\"int32\")\n verify_broadcast_binary_ele(\n (3, 4, 5), (3, 4, 5), topi.floor_mod, np.fmod, lhs_min=0.001, rhs_min=1, dtype=\"float32\")\n\ndef test_cmp():\n # explicit specify the output type\n def greater(x, y):\n return topi.greater(x, y).astype(\"int8\")\n\n def less(x, y):\n return topi.less(x, y).astype(\"int8\")\n\n def equal(x, y):\n return topi.equal(x, y).astype(\"int8\")\n\n def not_equal(x, y):\n return topi.not_equal(x, y).astype(\"int8\")\n\n def greater_equal(x, y):\n return topi.greater_equal(x, y).astype(\"int8\")\n\n def less_equal(x, y):\n return topi.less_equal(x, y).astype(\"int8\")\n verify_broadcast_binary_ele(\n (1, 2, 2), (2,), greater, np.greater)\n verify_broadcast_binary_ele(\n (2, 1, 2), (2, 3, 1), less, np.less)\n verify_broadcast_binary_ele(\n (2, 1, 2), (2, 3, 1), equal, np.equal,\n lhs_min=-2, lhs_max=2, rhs_min=-2, rhs_max=2, dtype='int32')\n verify_broadcast_binary_ele(\n (2, 1, 2), (2, 3, 1), not_equal, np.not_equal,\n lhs_min=-2, lhs_max=2, rhs_min=-2, rhs_max=2, dtype='int32')\n verify_broadcast_binary_ele(\n (7, 1, 5), (7, 3, 1), greater_equal, np.greater_equal,\n lhs_min=-3, lhs_max=3, rhs_min=-3, rhs_max=3, dtype='int32')\n verify_broadcast_binary_ele(\n (7, 1, 5), (7, 3, 1), less_equal, np.less_equal,\n lhs_min=-3, lhs_max=3, rhs_min=-3, rhs_max=3, dtype='int32')\n\n\ndef test_shift():\n # explicit specify the output type\n verify_broadcast_binary_ele(\n (2, 1, 2), None, topi.right_shift, np.right_shift,\n dtype=\"int32\", rhs_min=0, rhs_max=32)\n\n verify_broadcast_binary_ele(\n (1, 2, 2), (2,), topi.left_shift, np.left_shift,\n dtype=\"int32\", rhs_min=0, rhs_max=32)\n\n verify_broadcast_binary_ele(\n (1, 2, 2), (2,), topi.left_shift, np.left_shift,\n dtype=\"int8\", rhs_min=0, rhs_max=32)\n\n\ndef test_logical_single_ele():\n def test_apply(\n func,\n name,\n f_numpy,\n indata,\n dtype=\"bool\",\n ):\n # Build the logic and compile the function\n A = tvm.placeholder(shape=indata.shape, name=\"A\", dtype=dtype)\n B = func(A)\n if isinstance(A, tvm.expr.Expr):\n assert (isinstance(B, tvm.expr.Expr))\n return\n\n def check_device(device):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n with tvm.target.create(device):\n s = topi.generic.schedule_broadcast(B)\n foo = tvm.build(s, [A, B], device, name=name)\n\n data_npy = indata.astype(A.dtype)\n data_nd = tvm.nd.array(data_npy, ctx)\n\n out_npy = f_numpy(indata)\n out_nd = tvm.nd.array(np.empty(data_npy.shape).astype(B.dtype), ctx)\n foo(data_nd, out_nd)\n tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)\n\n for device in get_all_backend():\n check_device(device)\n\n test_apply(topi.logical_not, \"logical_not\", np.logical_not, np.array([True, False, 0, 1]))\n test_apply(topi.logical_not, \"logical_not\", np.logical_not, np.array(np.arange(5) < 3))\n\n\ndef test_logical_binary_ele():\n def test_apply(\n func,\n name,\n f_numpy,\n lhs,\n rhs,\n dtype=\"bool\",\n ):\n # Build the logic and compile the function\n A = (tvm.var(\"A\", dtype=dtype))\n B = (tvm.var(\"B\", dtype=dtype))\n C = func(A, B)\n if isinstance(A, tvm.expr.Expr) and isinstance(B, tvm.expr.Expr):\n assert (isinstance(C, tvm.expr.Expr))\n return\n\n def check_device(device):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n with tvm.target.create(device):\n s = topi.generic.schedule_broadcast(C)\n foo = tvm.build(s, [A, B, C], device, name=name)\n\n lhs_nd = tvm.nd.array(lhs, ctx)\n rhs_nd = tvm.nd.array(rhs, ctx)\n\n out_npy = f_numpy(lhs, rhs)\n out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(C.dtype), ctx)\n foo(lhs_nd, rhs_nd, out_nd)\n tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy, rtol=1E-4, atol=1E-4)\n\n for device in get_all_backend():\n check_device(device)\n\n test_apply(topi.logical_and, \"logical_and\", np.logical_and, True, False)\n test_apply(topi.logical_and, \"logical_and\", np.logical_and, [True, False], [False, False])\n test_apply(topi.logical_or, \"logical_or\", np.logical_or, True, False)\n test_apply(topi.logical_or, \"logical_or\", np.logical_or, [True, False], [False, False])\n\n\nif __name__ == \"__main__\":\n test_add()\n test_shift()\n test_cmp()\n test_mod()\n test_floor_mod()\n test_subtract()\n test_multiply()\n test_divide()\n test_floor_divide()\n test_maximum_minmum()\n test_power()\n test_broadcast_to()\n test_logical_single_ele()\n test_logical_binary_ele()\n" ]
[ [ "numpy.array", "numpy.empty", "numpy.random.uniform", "numpy.arange", "numpy.broadcast_to" ] ]
BunsenFeng/BotRGCN
[ "c48cf903fe3f875b11fa1557e5a66a947a360832" ]
[ "Dataset.py" ]
[ "import torch\nimport numpy as np\nimport pandas as pd\nimport json\nimport os\nfrom transformers import pipeline\nfrom datetime import datetime as dt\nfrom torch.utils.data import Dataset\nfrom tqdm import tqdm\n\n\nclass Twibot20(Dataset):\n def __init__(self,root='./Data/',device='cpu',process=True,save=True):\n self.root = root\n self.device = device\n if process:\n print('Loading train.json')\n df_train=pd.read_json('./Twibot-20/train.json')\n print('Loading test.json')\n df_test=pd.read_json('./Twibot-20/test.json')\n print('Loading support.json')\n df_support=pd.read_json('./Twibot-20/support.json')\n print('Loading dev.json')\n df_dev=pd.read_json('./Twibot-20/dev.json')\n print('Finished')\n df_train=df_train.iloc[:,[0,1,2,3,5]]\n df_test=df_test.iloc[:,[0,1,2,3,5]]\n df_support=df_support.iloc[:,[0,1,2,3]]\n df_dev=df_dev.iloc[:,[0,1,2,3,5]]\n df_support['label']='None'\n self.df_data_labeled=pd.concat([df_train,df_dev,df_test],ignore_index=True)\n self.df_data=pd.concat([df_train,df_dev,df_test,df_support],ignore_index=True)\n self.df_data=self.df_data\n self.df_data_labeled=self.df_data_labeled\n self.save=save\n \n def load_labels(self):\n print('Loading labels...',end=' ')\n path=self.root+'label.pt'\n if not os.path.exists(path):\n labels=torch.LongTensor(self.df_data_labeled['label']).to(self.device)\n if self.save:\n torch.save(labels,'./Data/label.pt')\n else:\n labels=torch.load(self.root+\"label.pt\").to(self.device)\n print('Finished')\n \n return labels\n \n def Des_Preprocess(self):\n print('Loading raw feature1...',end=' ')\n path=self.root+'description.npy'\n if not os.path.exists(path):\n description=[]\n for i in range (self.df_data.shape[0]):\n if self.df_data['profile'][i] is None or self.df_data['profile'][i]['description'] is None:\n description.append('None')\n else:\n description.append(self.df_data['profile'][i]['description'])\n description=np.array(description)\n if self.save:\n np.save(path,description)\n else:\n description=np.load(path,allow_pickle=True)\n print('Finished')\n return description\n\n def Des_embbeding(self):\n print('Running feature1 embedding')\n path=self.root+\"des_tensor.pt\"\n if not os.path.exists(path):\n description=np.load(self.root+'description.npy',allow_pickle=True)\n print('Loading RoBerta')\n feature_extraction = pipeline('feature-extraction', model=\"distilroberta-base\", tokenizer=\"distilroberta-base\",device=0)\n des_vec=[]\n #for (j,each) in tqdm(enumerate(description)):\n for each in tqdm(description):\n feature=torch.Tensor(feature_extraction(each))\n for (i,tensor) in enumerate(feature[0]):\n if i==0:\n feature_tensor=tensor\n else:\n feature_tensor+=tensor\n feature_tensor/=feature.shape[1]\n des_vec.append(feature_tensor)\n #if (j%1000==0):\n #print('[{:>6d}/229580]'.format(j+1))\n des_tensor=torch.stack(des_vec,0).to(self.device)\n if self.save:\n torch.save(des_tensor,'./Data/des_tensor.pt')\n else:\n des_tensor=torch.load(self.root+\"des_tensor.pt\").to(self.device)\n print('Finished')\n return des_tensor\n \n def tweets_preprocess(self):\n print('Loading raw feature2...',end=' ')\n path=self.root+'tweets.npy'\n if not os.path.exists(path):\n tweets=[]\n for i in range (self.df_data.shape[0]):\n one_usr_tweets=[]\n if self.df_data['tweet'][i] is None:\n one_usr_tweets.append('')\n else:\n for each in self.df_data['tweet'][i]:\n one_usr_tweets.append(each)\n tweets.append(one_usr_tweets)\n tweets=np.array(tweets)\n if self.save:\n np.save(path,tweets)\n else:\n tweets=np.load(path,allow_pickle=True)\n print('Finished')\n return tweets\n \n def tweets_embedding(self):\n print('Running feature2 embedding')\n path=self.root+\"tweets_tensor.pt\"\n if not os.path.exists(path):\n tweets=np.load(\"./Data/tweets.npy\",allow_pickle=True)\n print('Loading RoBerta')\n feature_extract=pipeline('feature-extraction',model='roberta-base',tokenizer='roberta-base',device=0,padding=True, truncation=True,max_length=500, add_special_tokens = True)\n tweets_list=[]\n for each_person_tweets in tqdm(tweets):\n for j,each_tweet in enumerate(each_person_tweets):\n each_tweet_tensor=torch.tensor(feature_extract(each_tweet))\n for k,each_word_tensor in enumerate(each_tweet_tensor[0]):\n if k==0:\n total_word_tensor=each_word_tensor\n else:\n total_word_tensor+=each_word_tensor\n total_word_tensor/=each_tweet_tensor.shape[1]\n if j==0:\n total_each_person_tweets=total_word_tensor\n else:\n total_each_person_tweets+=total_word_tensor\n total_each_person_tweets/=len(each_person_tweets)\n tweets_list.append(total_each_person_tweets)\n #if (i%500==0):\n #print('[{:>6d}/229580]'.format(i+1))\n tweet_tensor=torch.stack(tweets_list).to(self.device)\n if self.save:\n torch.save(tweet_tensor,path)\n else:\n tweets_tensor=torch.load(self.root+\"tweets_tensor.pt\").to(self.device)\n print('Finished')\n return tweets_tensor\n \n def num_prop_preprocess(self):\n print('Processing feature3...',end=' ')\n path0=self.root+'num_prop.pt'\n if not os.path.exists(path0):\n path=self.root\n if not os.path.exists(path+\"followers_count.pt\"):\n followers_count=[]\n for i in range (self.df_data.shape[0]):\n if self.df_data['profile'][i] is None or self.df_data['profile'][i]['followers_count'] is None:\n followers_count.append(0)\n else:\n followers_count.append(self.df_data['profile'][i]['followers_count'])\n followers_count=torch.tensor(np.array(followers_count,dtype=np.float32)).to(self.device)\n if self.save:\n torch.save(followers_count,path+\"followers_count.pt\")\n \n friends_count=[]\n for i in range (self.df_data.shape[0]):\n if self.df_data['profile'][i] is None or self.df_data['profile'][i]['friends_count'] is None:\n friends_count.append(0)\n else:\n friends_count.append(self.df_data['profile'][i]['friends_count'])\n friends_count=torch.tensor(np.array(friends_count,dtype=np.float32)).to(self.device)\n if self.save:\n torch.save(friends_count,path+'friends_count.pt')\n \n screen_name_length=[]\n for i in range (self.df_data.shape[0]):\n if self.df_data['profile'][i] is None or self.df_data['profile'][i]['screen_name'] is None:\n screen_name_length.append(0)\n else:\n screen_name_length.append(len(self.df_data['profile'][i]['screen_name']))\n screen_name_length=torch.tensor(np.array(screen_name_length,dtype=np.float32)).to(self.device)\n if self.save:\n torch.save(screen_name_length,path+'screen_name_length.pt')\n \n favourites_count=[]\n for i in range (self.df_data.shape[0]):\n if self.df_data['profile'][i] is None or self.df_data['profile'][i]['favourites_count'] is None:\n favourites_count.append(0)\n else:\n favourites_count.append(self.df_data['profile'][i]['favourites_count'])\n favourites_count=torch.tensor(np.array(favourites_count,dtype=np.float32)).to(self.device)\n if self.save:\n torch.save(favourites_count,path+'favourites_count.pt')\n \n active_days=[]\n date0=dt.strptime('Tue Sep 1 00:00:00 +0000 2020 ','%a %b %d %X %z %Y ')\n for i in range (self.df_data.shape[0]):\n if self.df_data['profile'][i] is None or self.df_data['profile'][i]['created_at'] is None:\n active_days.append(0)\n else:\n date=dt.strptime(self.df_data['profile'][i]['created_at'],'%a %b %d %X %z %Y ')\n active_days.append((date0-date).days)\n active_days=torch.tensor(np.array(active_days,dtype=np.float32)).to(self.device)\n if self.save:\n torch.save(active_days,path+'active_days.pt')\n \n statuses_count=[]\n for i in range (self.df_data.shape[0]):\n if self.df_data['profile'][i] is None or self.df_data['profile'][i]['statuses_count'] is None:\n statuses_count.append(0)\n else:\n statuses_count.append(int(self.df_data['profile'][i]['statuses_count']))\n statuses_count=torch.tensor(np.array(statuses_count,dtype=np.float32)).to(self.device)\n if self.save:\n torch.save(statuses_count,path+'statuses_count.pt')\n \n else:\n active_days=torch.load(path+\"active_days.pt\")\n screen_name_length=torch.load(path+\"screen_name_length.pt\")\n favourites_count=torch.load(path+\"favourites_count.pt\")\n followers_count=torch.load(path+\"followers_count.pt\")\n friends_count=torch.load(path+\"friends_count.pt\")\n statuses_count=torch.load(path+\"statuses_count.pt\")\n \n active_days=pd.Series(active_days.to('cpu').detach().numpy())\n active_days=(active_days-active_days.mean())/active_days.std()\n active_days=torch.tensor(np.array(active_days))\n\n screen_name_length=pd.Series(screen_name_length.to('cpu').detach().numpy())\n screen_name_length_days=(screen_name_length-screen_name_length.mean())/screen_name_length.std()\n screen_name_length_days=torch.tensor(np.array(screen_name_length_days))\n\n favourites_count=pd.Series(favourites_count.to('cpu').detach().numpy())\n favourites_count=(favourites_count-favourites_count.mean())/favourites_count.std()\n favourites_count=torch.tensor(np.array(favourites_count))\n\n followers_count=pd.Series(followers_count.to('cpu').detach().numpy())\n followers_count=(followers_count-followers_count.mean())/followers_count.std()\n followers_count=torch.tensor(np.array(followers_count))\n\n friends_count=pd.Series(friends_count.to('cpu').detach().numpy())\n friends_count=(friends_count-friends_count.mean())/friends_count.std()\n friends_count=torch.tensor(np.array(friends_count))\n\n statuses_count=pd.Series(statuses_count.to('cpu').detach().numpy())\n statuses_count=(statuses_count-statuses_count.mean())/statuses_count.std()\n statuses_count=torch.tensor(np.array(statuses_count))\n\n num_prop=torch.cat((followers_count.reshape([229580,1]),friends_count.reshape([229580,1]),favourites_count.reshape([229580,1]),statuses_count.reshape([229580,1]),screen_name_length_days.reshape([229580,1]),active_days.reshape([229580,1])),1).to(self.device)\n\n if self.save:\n torch.save(num_prop,\"./Data/num_prop.pt\")\n \n else:\n num_prop=torch.load(self.root+\"num_prop.pt\").to(self.device)\n print('Finished')\n return num_prop\n \n def cat_prop_preprocess(self):\n print('Processing feature4...',end=' ')\n path=self.root+'category_properties.pt'\n if not os.path.exists(path):\n category_properties=[]\n properties=['protected','geo_enabled','verified','contributors_enabled','is_translator','is_translation_enabled','profile_background_tile','profile_use_background_image','has_extended_profile','default_profile','default_profile_image']\n for i in range (self.df_data.shape[0]):\n prop=[]\n if self.df_data['profile'][i] is None:\n for i in range(11):\n prop.append(0)\n else:\n for each in properties:\n if self.df_data['profile'][i][each] is None:\n prop.append(0)\n else:\n if self.df_data['profile'][i][each] == \"True \":\n prop.append(1)\n else:\n prop.append(0)\n prop=np.array(prop)\n category_properties.append(prop)\n category_properties=torch.tensor(np.array(category_properties,dtype=np.float32)).to(self.device)\n if self.save:\n torch.save(category_properties,self.root+'category_properties.pt')\n else:\n category_properties=torch.load(self.root+\"category_properties.pt\").to(self.device)\n print('Finished')\n return category_properties\n \n def Build_Graph(self):\n print('Building graph',end=' ')\n path=self.root+'edge_index.pt'\n if not os.path.exists(path):\n id2index_dict={id:index for index,id in enumerate(self.df_data['ID'])}\n edge_index=[]\n edge_type=[]\n for i,relation in enumerate(self.df_data['neighbor']):\n if relation is not None:\n for each_id in relation['following']:\n try:\n target_id=id2index_dict[int(each_id)]\n except KeyError:\n continue\n else:\n edge_index.append([i,target_id])\n edge_type.append(0)\n for each_id in relation['follower']:\n try:\n target_id=id2index_dict[int(each_id)]\n except KeyError:\n continue\n else:\n edge_index.append([i,target_id])\n edge_type.append(1)\n else:\n continue\n edge_index=torch.tensor(edge_index,dtype=torch.long).t().contiguous().to(self.device)\n edge_type=torch.tensor(edge_type,dtype=torch.long).to(self.device)\n if self.save:\n torch.save(edge_index,self.root+\"edge_index.pt\")\n torch.save(edge_type,self.root+\"edge_type.pt\")\n else:\n edge_index=torch.load(self.root+\"edge_index.pt\").to(self.device)\n edge_type=torch.load(self.root+\"edge_type.pt\").to(self.device)\n print('Finished')\n return edge_index,edge_type\n \n def train_val_test_mask(self):\n train_idx=range(8278)\n val_idx=range(8278,8278+2365)\n test_idx=range(8278+2365,8278+2365+1183)\n return train_idx,val_idx,test_idx\n \n def dataloader(self):\n labels=self.load_labels()\n self.Des_Preprocess()\n des_tensor=self.Des_embbeding()\n self.tweets_preprocess()\n tweets_tensor=self.tweets_embedding()\n num_prop=self.num_prop_preprocess()\n category_prop=self.cat_prop_preprocess()\n edge_index,edge_type=self.Build_Graph()\n train_idx,val_idx,test_idx=self.train_val_test_mask()\n return des_tensor,tweets_tensor,num_prop,category_prop,edge_index,edge_type,labels,train_idx,val_idx,test_idx\n " ]
[ [ "numpy.array", "torch.stack", "torch.save", "numpy.load", "numpy.save", "pandas.read_json", "torch.LongTensor", "torch.tensor", "torch.load", "pandas.concat" ] ]
yhzhang1/deep_rl_trader_modified
[ "5ef1b776bb54048d42deb59396b3112113ca4d60" ]
[ "util.py" ]
[ "import numpy as np\nfrom rl.core import Processor\nfrom rl.util import WhiteningNormalizer\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\n\nfrom time import sleep\n\nADDITIONAL_STATE = 2\nclass NormalizerProcessor(Processor):\n def __init__(self):\n self.scaler = StandardScaler()\n self.normalizer = None\n\n def process_state_batch(self, batch):\n batch_len = batch.shape[0]\n k = []\n for i in range(batch_len):\n observe = batch[i][..., :-ADDITIONAL_STATE]\n #print('observe.shape: ', observe.shape)\n #print('observe: ', observe)\n #observe = self.scaler.fit_transform(observe)\n #print('observe: ', observe)\n agent_state = batch[i][..., -ADDITIONAL_STATE:]\n #print('agent_state: ', agent_state)\n temp = np.concatenate((observe, agent_state),axis=1)\n #print('temp: ', temp)\n temp = temp.reshape((1,) + temp.shape)\n #print('temp: ', temp)\n #sleep(10)\n k.append(temp)\n batch = np.concatenate(tuple(k))\n return batch\n \nclass DDPGProcessor(Processor):\n def process_action(self, action):\n action = np.clip(action[0], -1, 1)\n \n return action\n \n" ]
[ [ "numpy.concatenate", "numpy.clip", "sklearn.preprocessing.StandardScaler" ] ]
FilippoAleotti/unsupervised_detection
[ "aeb673ac34eb80b1fb22bc28af8148bd7fc8ee77" ]
[ "models/nets.py" ]
[ "import tensorflow as tf\nfrom .utils.convolution_utils import gen_conv, gen_deconv, conv, deconv\n\ndef generator_net(images, flows, scope, reuse=None, training=True):\n \"\"\"Mask network.\n Args:\n image: input rgb image [-0.5, +0.5]\n flows: rgb flow image masked [-0.5, +0.5]\n Returns:\n mask: mask region [0, 1], 1 is fully masked, 0 is not.\n \"\"\"\n\n mask_channels = 2 # probability of 1 and zero\n x = tf.concat((images, flows), 3) #[B, H, W, 5]\n\n cnum = 32\n with tf.variable_scope(scope, reuse=reuse):\n # stage1\n x_0 = gen_conv(x, cnum, 5, 1, name='conv1', training=training) # ---------------------------\n x = gen_conv(x_0, 2*cnum, 3, 2, name='conv2_downsample', training=training) # Skip connection\n x_1 = gen_conv(x, 2*cnum, 3, 1, name='conv3', training=training) # -------------------\n x = gen_conv(x_1, 4*cnum, 3, 2, name='conv4_downsample', training=training)\n x = gen_conv(x, 4*cnum, 3, 1, name='conv5', training=training)\n x_2 = gen_conv(x, 4*cnum, 3, 1, name='conv6', training=training) # -----------------\n x = gen_conv(x_2, 4*cnum, 3, rate=2, name='conv7_atrous', training=training)\n x = gen_conv(x, 4*cnum, 3, rate=4, name='conv8_atrous', training=training)\n x = gen_conv(x, 4*cnum, 3, rate=8, name='conv9_atrous', training=training)\n x = gen_conv(x, 4*cnum, 3, rate=16, name='conv10_atrous', training=training)\n x = gen_conv(x, 4*cnum, 3, 1, name='conv11', training=training) + x_2 #-------------\n x = gen_conv(x, 4*cnum, 3, 1, name='conv12', training=training)\n x = gen_deconv(x, 2*cnum, name='conv13_upsample', training=training)\n x = gen_conv(x, 2*cnum, 3, 1, name='conv14', training=training) + x_1 # --------------------\n x = gen_deconv(x, cnum, name='conv15_upsample', training=training) + x_0 #-------------------\n x = gen_conv(x, cnum//2, 3, 1, name='conv16', training=training)\n x = gen_conv(x, mask_channels, 3, 1, activation=tf.identity,\n name='conv17', training=training)\n # Division by constant experimentally improved training\n x = tf.divide(x, tf.constant(10.0))\n generated_mask = tf.nn.softmax(x, axis=-1)\n # get logits for probability 1\n generated_mask = tf.expand_dims(generated_mask[:,:,:,0], axis=-1)\n return generated_mask\n\n\ndef recover_net( img1, flow_masked, mask, scope, reuse=None, f=0.25, training=True ):\n batch_size = tf.shape(img1)[0]\n C = flow_masked.get_shape().as_list()[-1]\n orisize = img1.get_shape().as_list()[1:-1]\n\n ones_x = tf.ones_like(flow_masked)[:, :, :, 0:1]\n # Augmentation of the flow\n flow_masked = tf.concat([flow_masked, ones_x, 1.0-mask], axis=3)\n flow_in_channels = flow_masked.get_shape().as_list()[-1]\n\n with tf.variable_scope(scope, reuse=reuse):\n\n aconv1 = conv( img1, 'aconv1', shape=[7,7, 3, int(64*f)], stride=2, reuse=reuse, training=training ) # h/2(192), 64\n aconv2 = conv( aconv1, 'aconv2', shape=[5,5,int(64*f), int(128*f)], stride=2, reuse=reuse, training=training ) # h/4(96), 128\n aconv3 = conv( aconv2, 'aconv3', shape=[5,5,int(128*f),int(256*f)], stride=2, reuse=reuse, training=training ) # h/8(48), 256\n aconv31= conv( aconv3, 'aconv31', shape=[3,3,int(256*f),int(256*f)], stride=1, reuse=reuse, training=training )\n aconv4 = conv( aconv31, 'aconv4', shape=[3,3,int(256*f),int(512*f)], stride=2, reuse=reuse, training=training ) # h/16(24), 512\n aconv41= conv( aconv4, 'aconv41', shape=[3,3,int(512*f),int(512*f)], stride=1, reuse=reuse, training=training )\n aconv5 = conv( aconv41, 'aconv5', shape=[3,3,int(512*f),int(512*f)], stride=2, reuse=reuse, training=training ) # h/32(12), 512\n aconv51= conv( aconv5, 'aconv51', shape=[3,3,int(512*f),int(512*f)], stride=1, reuse=reuse, training=training )\n aconv6 = conv( aconv51, 'aconv6', shape=[3,3,int(512*f),int(512*f)], stride=2, reuse=reuse, training=training ) # h/64(6), 512\n\n bconv1 = conv( flow_masked, 'bconv1', shape=[7,7, flow_in_channels, int(64*f)], stride=2, reuse=reuse, training=training ) # h/2(192), 64\n bconv2 = conv( bconv1, 'bconv2', shape=[5,5,int(64*f), int(128*f)], stride=2, reuse=reuse, training=training ) # h/4(96), 128\n bconv3 = conv( bconv2, 'bconv3', shape=[5,5,int(128*f),int(256*f)], stride=2, reuse=reuse, training=training ) # h/8(48), 256\n bconv31= conv( bconv3, 'bconv31', shape=[3,3,int(256*f),int(256*f)], stride=1, reuse=reuse, training=training )\n bconv4 = conv( bconv31, 'bconv4', shape=[3,3,int(256*f),int(512*f)], stride=2, reuse=reuse, training=training ) # h/16(24), 512\n bconv41= conv( bconv4, 'bconv41', shape=[3,3,int(512*f),int(512*f)], stride=1, reuse=reuse, training=training )\n bconv5 = conv( bconv41, 'bconv5', shape=[3,3,int(512*f),int(512*f)], stride=2, reuse=reuse, training=training ) # h/32(12), 512\n bconv51= conv( bconv5, 'bconv51', shape=[3,3,int(512*f),int(512*f)], stride=1, reuse=reuse, training=training )\n bconv6 = conv( bconv51, 'bconv6', shape=[3,3,int(512*f),int(512*f)], stride=2, reuse=reuse, training=training ) # h/64(6), 512\n\n #conv6 = tf.add( aconv6, bconv6 )\n conv6 = tf.concat( (aconv6, bconv6), 3 ) #h/64(6) 512*2*f\n outsz = bconv51.get_shape() # h/32(12), 512*f\n deconv5 = deconv( conv6, size=[outsz[1],outsz[2]], name='deconv5', shape=[4,4,int(512*2*f),int(512*f)], reuse=reuse, training=training )\n concat5 = tf.concat( (deconv5,bconv51,aconv51), 3 ) # h/32(12), 512*3*f\n\n flow5 = conv( concat5, 'flow5', shape=[3,3,int(512*3*f),C], stride=1, reuse=reuse, training=training, activation=tf.identity ) # h/32(12), C\n outsz = bconv41.get_shape() # h/16(24), 512*f\n deconv4 = deconv( concat5, size=[outsz[1],outsz[2]], name='deconv4', shape=[4,4,int(512*3*f),int(512*f)], reuse=reuse, training=training )\n upflow4 = deconv( flow5, size=[outsz[1],outsz[2]], name='upflow4', shape=[4,4,C,C], reuse=reuse, training=training, activation=tf.identity )\n concat4 = tf.concat( (deconv4,bconv41,aconv41,upflow4), 3 ) # h/16(24), 512*3*f+C\n\n flow4 = conv( concat4, 'flow4', shape=[3,3,int(512*3*f+C),C], stride=1, reuse=reuse, training=training, activation=tf.identity ) # h/16(24), C\n outsz = bconv31.get_shape() # h/8(48), 256*f\n deconv3 = deconv( concat4, size=[outsz[1],outsz[2]], name='deconv3', shape=[4,4,int(512*3*f+C),int(256*f)], reuse=reuse, training=training )\n upflow3 = deconv( flow4, size=[outsz[1],outsz[2]], name='upflow3', shape=[4,4,C,C], reuse=reuse, training=training, activation=tf.identity )\n concat3 = tf.concat( (deconv3,bconv31,aconv31,upflow3), 3 ) # h/8(48), 256*3*f+C\n\n flow3 = conv( concat3, 'flow3', shape=[3,3,int(256*3*f+C),C], stride=1, reuse=reuse, training=training, activation=tf.identity ) # h/8(48), C\n outsz = bconv2.get_shape() # h/4(96), 128*f\n deconv2 = deconv( concat3, size=[outsz[1],outsz[2]], name='deconv2', shape=[4,4,int(256*3*f+C),int(128*f)], reuse=reuse, training=training )\n upflow2 = deconv( flow3, size=[outsz[1],outsz[2]], name='upflow2', shape=[4,4,C,C], reuse=reuse, training=training, activation=tf.identity )\n concat2 = tf.concat( (deconv2,bconv2,aconv2,upflow2), 3 ) # h/4(96), 128*3*f+C\n\n flow2 = conv( concat2, 'flow2', shape=[3,3,int(128*3*f+C),C], stride=1, reuse=reuse, training=training, activation=tf.identity ) # h/4(96), C\n outsz = bconv1.get_shape() # h/2(192), 64*f\n deconv1 = deconv( concat2, size=[outsz[1],outsz[2]], name='deconv1', shape=[4,4,int(128*3*f+C),int(64*f)], reuse=reuse, training=training )\n upflow1 = deconv( flow2, size=[outsz[1],outsz[2]], name='upflow1', shape=[4,4,C,C], reuse=reuse, training=training, activation=tf.identity )\n concat1 = tf.concat( (deconv1,bconv1,aconv1,upflow1), 3 ) # h/2(192), 64*3*f+C\n\n flow1 = conv( concat1, 'flow1', shape=[5,5,int(64*3*f+C),C], stride=1, reuse=reuse, training=training, activation=tf.identity ) # h/2(192), C\n pred_flow = tf.image.resize_images( flow1, size=orisize )\n\n return pred_flow\n" ]
[ [ "tensorflow.shape", "tensorflow.concat", "tensorflow.expand_dims", "tensorflow.ones_like", "tensorflow.constant", "tensorflow.variable_scope", "tensorflow.nn.softmax", "tensorflow.image.resize_images" ] ]
goruck/nilm
[ "6c1a18b9a3fda1f204c92ae1958e99cf07091585" ]
[ "ml/predict.py" ]
[ "\"\"\"\nPredict appliance type and power using novel data from my home.\n\nCopyright (c) 2022 Lindo St. Angel\n\"\"\"\n\nimport os\nimport argparse\nimport socket\n\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom logger import log\nfrom common import WindowGenerator, params_appliance\nfrom nilm_metric import get_Epd\n\nWINDOW_LENGTH = 599\nAGGREGATE_MEAN = 522\nAGGREGATE_STD = 814\nSAMPLE_PERIOD = 8 # Mains sample period in seconds.\n\nif __name__ == '__main__':\n default_appliances = [\n 'kettle', 'fridge', 'microwave', 'washingmachine', 'dishwasher'\n ]\n default_dataset_dir = '/home/lindo/Develop/nilm/ml/dataset_management/my-house'\n default_panel_location = 'garage'\n default_model_dir = '/home/lindo/Develop/nilm/ml/models'\n default_ckpt_dir = 'checkpoints'\n default_results_dir = '/home/lindo/Develop/nilm/ml/results'\n default_rt_preds_dataset_dir = '/home/lindo/Develop/nilm-datasets/my-house/garage/samples_4_26_22.csv'\n\n parser = argparse.ArgumentParser(description='Predict appliance\\\n given a trained neural network\\\n for energy disaggregation -\\\n network input = mains window.')\n parser.add_argument('--appliances',\n type=str,\n nargs='+',\n default=default_appliances,\n help='name(s) of target appliance')\n parser.add_argument('--datadir',\n type=str,\n default=default_dataset_dir,\n help='directory location of test data')\n parser.add_argument('--rt_preds_datadir',\n type=str,\n default=default_rt_preds_dataset_dir,\n help='directory location of real-time prediction dataset')\n parser.add_argument('--panel',\n type=str,\n default=default_panel_location,\n help='sub-panel location')\n parser.add_argument('--trained_model_dir',\n type=str,\n default=default_model_dir,\n help='directory to the trained models')\n parser.add_argument('--ckpt_dir',\n type=str,\n default=default_ckpt_dir,\n help='directory name of model checkpoint')\n parser.add_argument('--save_results_dir',\n type=str,\n default=default_results_dir,\n help='directory to save the predictions')\n parser.add_argument('--plot', action='store_true',\n help='show predicted appliance and mains power plots')\n parser.add_argument('--show_rt_preds', action='store_true',\n help='show real-time predictions on plot')\n parser.add_argument('--crop',\n type=int,\n default=None,\n help='use part of the dataset for predictions')\n parser.add_argument('--batch_size',\n type=int,\n default=1000,\n help='mini-batch size')\n parser.set_defaults(plot=False)\n parser.set_defaults(show_rt_preds=False)\n\n log(f'Machine name: {socket.gethostname()}')\n args = parser.parse_args()\n log('Arguments: ')\n log(args)\n\n log(f'Target appliance(s): {args.appliances}')\n\n # offset parameter from window length\n offset = int(0.5 * (WINDOW_LENGTH - 1.0))\n\n def load_dataset(file_name, crop=None) -> np.array:\n \"\"\"Load input dataset file and return as np array..\"\"\"\n df = pd.read_csv(file_name, header=None, nrows=crop)\n\n return np.array(df, dtype=np.float32)\n\n test_set_x = load_dataset(os.path.join(\n args.datadir, f'{args.panel}.csv'), args.crop)\n ts_size = test_set_x.size\n log(f'There are {ts_size/10**6:.3f}M test samples.')\n\n test_provider = WindowGenerator(\n dataset=(test_set_x.flatten(), None),\n offset=offset,\n train=False,\n shuffle=False,\n batch_size=args.batch_size)\n \n def prediction(appliance) -> np.array:\n \"\"\"Make appliance prediction and return post-processed result.\"\"\"\n log(f'Making prediction for {appliance}.')\n model_file_path = os.path.join(\n args.trained_model_dir, appliance, args.ckpt_dir)\n log(f'Loading saved model from {model_file_path}.')\n model = tf.keras.models.load_model(model_file_path)\n model.summary()\n prediction = model.predict(\n x=test_provider,\n verbose=1,\n workers=24,\n use_multiprocessing=True)\n # De-normalize.\n mean = params_appliance[appliance]['mean']\n std = params_appliance[appliance]['std']\n log(f'appliance_mean: {str(mean)}')\n log(f'appliance_std: {str(std)}')\n prediction = prediction * std + mean\n # Apply on-power thresholds.\n threshold = params_appliance[appliance]['on_power_threshold']\n prediction[prediction <= threshold] = 0.0\n return prediction\n predictions = {\n appliance : prediction(\n appliance\n ) for appliance in args.appliances\n }\n\n log('aggregate_mean: ' + str(AGGREGATE_MEAN))\n log('aggregate_std: ' + str(AGGREGATE_STD))\n aggregate = test_set_x.flatten() * AGGREGATE_STD + AGGREGATE_MEAN\n\n # Calculate metrics. \n # get_Epd returns a relative metric between two powers, so zero out one.\n target = np.zeros_like(aggregate)\n aggregate_epd = get_Epd(target, aggregate, SAMPLE_PERIOD)\n log(f'Aggregate energy: {aggregate_epd/1000:.3f} kWh per day')\n for appliance in args.appliances:\n epd = get_Epd(target, predictions[appliance], SAMPLE_PERIOD)\n log(f'{appliance} energy: {epd/1000:.3f} kWh per day')\n\n save_path = os.path.join(args.save_results_dir, args.panel)\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n # Find max value in predictions for setting plot limits.\n max_pred = np.ceil(np.max(list(predictions.values())))\n\n if args.show_rt_preds:\n # Load real-time prediction dataset.\n df = pd.read_csv(args.rt_preds_datadir, usecols=default_appliances)\n df = df.fillna(0) # convert NaN's into zero's\n # Define real-time predictions columns to appliance names.\n # Select appliance prediction column then adjust for output timing.\n # Adjustment is simply moving samples earlier in time by\n # a WINDOW_LENGTH since the real-time code places the prediction\n # at the end of a window of samples.\n rt_preds_to_appliances = {\n appliance: np.array(\n df[[appliance]][WINDOW_LENGTH:], dtype=np.float32\n ) for appliance in args.appliances\n }\n\n # Save and perhaps show powers in a single row of subplots.\n nrows = len(args.appliances) + 1\n fig, ax = plt.subplots(nrows=nrows, ncols=1, constrained_layout=True)\n ax[0].set_ylabel('Watts')\n ax[0].set_title('aggregate')\n ax[0].plot(aggregate[offset:-offset], color='tab:orange', linewidth=1.8)\n row = 1\n for appliance in args.appliances:\n ax[row].set_ylabel('Watts')\n ax[row].set_title(appliance)\n ax[row].set_ylim(0, max_pred)\n ax[row].plot(\n predictions[appliance], color='tab:red', \n linewidth=1.5, label='prediction'\n )\n if args.show_rt_preds:\n ax[row].plot(\n rt_preds_to_appliances[appliance], color='tab:green',\n linewidth=1.5, label='real-time prediction'\n )\n ax[row].legend(loc='upper right')\n row+=1\n fig.suptitle(f'Prediction results for {args.panel}',\n fontsize=16, fontweight='bold')\n plot_savepath = os.path.join(save_path, f'{args.panel}.png')\n plt.savefig(fname=plot_savepath)\n if args.plot:\n plt.show()\n plt.close()" ]
[ [ "numpy.zeros_like", "numpy.array", "matplotlib.pyplot.savefig", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots", "tensorflow.keras.models.load_model", "matplotlib.pyplot.show", "pandas.read_csv" ] ]
BigDataHa/Nonlinear_Face_3DMM
[ "1c0c7d388231e26460303134ad9b0f87357d010c" ]
[ "model_non_linear_3DMM.py" ]
[ "'''\r\nOutline of the main training script\r\nPart of data/input pipeline is ommitted\r\n\r\n'''\r\n\r\n\r\nfrom __future__ import division\r\nimport os\r\nimport time\r\nimport csv\r\nimport random\r\nfrom random import randint\r\nfrom math import floor\r\nfrom glob import glob\r\nimport tensorflow as tf\r\nimport tensorflow.contrib.slim as slim\r\nimport numpy as np\r\nfrom six.moves import xrange\r\n#from progress.bar import Bar\r\nfrom rendering_ops import *\r\nfrom ops import *\r\nfrom utils import *\r\n\r\n\r\nTRI_NUM = 105840\r\nVERTEX_NUM = 53215\r\nCONST_PIXELS_NUM = 20\r\n\r\n\r\n\r\nclass DCGAN(object):\r\n def __init__(self, sess, config, devices=None):\r\n \"\"\"\r\n Args:\r\n sess: TensorFlow session\r\n batch_size: The size of batch. Should be specified before training.\r\n \"\"\"\r\n self.sess = sess\r\n self.c_dim = config.c_dim\r\n self.gpu_num = len(config.gpu.split())\r\n\r\n \r\n \r\n self.batch_size = config.batch_size\r\n self.image_size = config.image_size\r\n self.sample_size = config.sample_size\r\n self.image_size = 224 #config.image_size\r\n self.texture_size = [192, 224]\r\n self.z_dim = config.z_dim\r\n self.gf_dim = config.gf_dim\r\n self.df_dim = config.df_dim\r\n self.gfc_dim = config.gfc_dim\r\n self.dfc_dim = config.dfc_dim\r\n\r\n self.shape_loss = config.shape_loss if hasattr(config, 'shape_loss') else \"l2\"\r\n self.tex_loss = config.tex_loss if hasattr(config, 'tex_loss') else \"l1\"\r\n\r\n self.is_using_landmark = config.is_using_landmark\r\n self.is_using_symetry = config.is_using_symetry\r\n self.is_using_recon = config.is_using_recon\r\n self.is_using_frecon = config.is_using_frecon\r\n self.is_batchwise_white_shading = config.is_batchwise_white_shading\r\n self.is_const_albedo = config.is_const_albedo\r\n self.is_const_local_albedo = config.is_const_local_albedo\r\n self.is_smoothness = config.is_smoothness\r\n \r\n self.mDim = 8\r\n self.ilDim = 27\r\n \r\n self.vertexNum = VERTEX_NUM\r\n self.landmark_num = 68\r\n\r\n \r\n self.checkpoint_dir = config.checkpoint_dir\r\n self.samples_dir = config.samples_dir\r\n\r\n if not os.path.exists(self.samples_dir+\"/\"+self.model_dir):\r\n os.makedirs(self.samples_dir+\"/\"+self.model_dir)\r\n if not os.path.exists(self.checkpoint_dir+\"/\"+self.model_dir):\r\n os.makedirs(self.checkpoint_dir+\"/\"+self.model_dir)\r\n\r\n self.setupParaStat()\r\n #self.setupValData()\r\n self.build_model()\r\n\r\n def build_model(self):\r\n def filename2image(input_filenames, offset_height = None, offset_width = None, target_height=None, target_width=None):\r\n batch_size = len(input_filenames)\r\n if offset_height != None:\r\n offset_height = tf.split(offset_height, batch_size)\r\n offset_width = tf.split(offset_width, batch_size)\r\n\r\n images = [] \r\n for i in range(batch_size):\r\n file_contents = tf.read_file(input_filenames[i])\r\n image = tf.image.decode_png(file_contents, channels=3)\r\n if offset_height != None:\r\n image = tf.image.crop_to_bounding_box(image, tf.reshape(offset_height[i], []), tf.reshape(offset_width[i], []), target_height, target_width)\r\n\r\n images.append(image)\r\n return tf.cast(tf.stack(images), tf.float32)\r\n\r\n \r\n self.m_300W_labels = tf.placeholder(tf.float32, [self.batch_size, self.mDim], name='m_300W_labels')\r\n self.shape_300W_labels = tf.placeholder(tf.float32, [self.batch_size, self.vertexNum * 3], name='shape_300W_labels')\r\n self.texture_300W_labels = tf.placeholder(tf.float32, [self.batch_size, self.texture_size[0], self.texture_size[1], self.c_dim], name='tex_300W_labels')\r\n #self.exp_300W_labels = tf.placeholder(tf.float32, [self.batch_size, self.expDim], name='exp_300W_labels')\r\n #self.il_300W_labels = tf.placeholder(tf.float32, [self.batch_size, self.ilDim], name='lighting_300W_labels')\r\n\r\n self.input_offset_height = tf.placeholder(tf.int32, [self.batch_size], name='input_offset_height')\r\n self.input_offset_width = tf.placeholder(tf.int32, [self.batch_size], name='input_offset_width')\r\n\r\n self.input_images_fn_300W = [tf.placeholder(dtype=tf.string) for _ in range(self.batch_size)]\r\n self.input_masks_fn_300W = [tf.placeholder(dtype=tf.string) for _ in range(self.batch_size)]\r\n self.texture_labels_fn_300W = [tf.placeholder(dtype=tf.string) for _ in range(self.batch_size)]\r\n self.texture_masks_fn_300W = [tf.placeholder(dtype=tf.string) for _ in range(self.batch_size)]\r\n\r\n\r\n # For const alb loss\r\n self.albedo_indexes_x1 = tf.placeholder(tf.int32, [self.batch_size, CONST_PIXELS_NUM, 1], name='idexes_x1')\r\n self.albedo_indexes_y1 = tf.placeholder(tf.int32, [self.batch_size, CONST_PIXELS_NUM, 1], name='idexes_y1')\r\n\r\n self.albedo_indexes_x2 = tf.placeholder(tf.int32, [self.batch_size, CONST_PIXELS_NUM, 1], name='idexes_x2')\r\n self.albedo_indexes_y2 = tf.placeholder(tf.int32, [self.batch_size, CONST_PIXELS_NUM, 1], name='idexes_y2')\r\n\r\n self.const_alb_mask = load_const_alb_mask()\r\n\r\n def model_and_loss(input_images_fn_300W, input_masks_fn_300W, texture_labels_fn_300W, texture_masks_fn_300W, input_offset_height, input_offset_width, m_300W_labels, shape_300W_labels, albedo_indexes_x1, albedo_indexes_y1, albedo_indexes_x2, albedo_indexes_y2):\r\n batch_size = self.batch_size / self.gpu_num\r\n input_images_300W_ = filename2image(input_images_fn_300W, offset_height = input_offset_height, offset_width = input_offset_width, target_height=self.image_size, target_width=self.image_size)\r\n input_images_300W = input_images_300W_ /127.5 - 1\r\n\r\n input_masks_300W = filename2image(input_masks_fn_300W, offset_height = input_offset_height, offset_width = input_offset_width, target_height=self.image_size, target_width=self.image_size)\r\n input_masks_300W = input_masks_300W / 255.0\r\n\r\n texture_300W_labels = filename2image(texture_labels_fn_300W)\r\n texture_300W_labels = texture_300W_labels / 127.5 - 1\r\n\r\n texture_mask_300W_labels = filename2image(texture_masks_fn_300W)\r\n texture_mask_300W_labels = texture_mask_300W_labels / 255.0\r\n\r\n\r\n ## ------------------------- Network ---------------------------\r\n shape_fx_300W, tex_fx_300W, m_300W, il_300W = self.generator_encoder( input_images_300W, is_reuse=False)\r\n shape_300W, shape_2d_300W = self.generator_decoder_shape(shape_fx_300W, is_reuse=False, is_training=True)\r\n albedo_300W = self.generator_decoder_albedo(tex_fx_300W, is_reuse=False, is_training=True)\r\n\r\n m_300W_full = m_300W * self.std_m_tf + self.mean_m_tf\r\n shape_300W_full = shape_300W * self.std_shape_tf + self.mean_shape_tf\r\n shape_300W_labels_full = shape_300W_labels * self.std_shape_tf + self.mean_shape_tf\r\n m_300W_labels_full = m_300W_labels * self.std_m_tf + self.mean_m_tf\r\n\r\n shape_for_synthesize = shape_300W_full\r\n m_for_synthesize = m_300W_full\r\n\r\n # Rendering\r\n shade_300W = generate_shade(il_300W, m_for_synthesize, shape_for_synthesize, self.texture_size)\r\n texture_300W = 2.0*tf.multiply( (albedo_300W + 1.0)/2.0, shade_300W) - 1\r\n\r\n\r\n G_images_300W, G_images_300W_mask = warp_texture(texture_300W, m_for_synthesize, shape_for_synthesize, output_size=self.image_size)\r\n\r\n G_images_300W_mask = tf.multiply(input_masks_300W, tf.expand_dims(G_images_300W_mask, -1))\r\n G_images_300W = tf.multiply(G_images_300W, G_images_300W_mask) + tf.multiply(input_images_300W, 1 - G_images_300W_mask)\r\n\r\n landmark_u_300W, landmark_v_300W = compute_landmarks(m_300W_full, shape_300W_full, output_size=self.image_size)\r\n landmark_u_300W_labels, landmark_v_300W_labels = compute_landmarks(m_300W_labels_full, shape_300W_labels_full, output_size=self.image_size)\r\n\r\n\r\n \r\n\r\n ##---------------- Losses -------------------------\r\n g_loss = tf.zeros(1)\r\n\r\n G_loss_shape = 10*norm_loss(shape_300W, shape_300W_labels, loss_type = self.shape_loss) #tf.zeros(1) \r\n G_loss_m = 5*norm_loss(m_300W, m_300W_labels, loss_type = 'l2')\r\n\r\n\r\n texture_vis_mask = tf.cast(tf.not_equal(texture_300W_labels, tf.ones_like(texture_300W_labels)*(-1)), tf.float32)\r\n texture_vis_mask = tf.multiply(texture_vis_mask, texture_mask_300W_labels)\r\n texture_ratio = tf.reduce_sum(texture_vis_mask) / (batch_size* self.texture_size[0] * self.texture_size[1] * self.c_dim)\r\n\r\n \r\n\r\n if self.is_batchwise_white_shading:\r\n uv_mask_tf = tf.expand_dims(tf.expand_dims(tf.constant( self.uv_mask, dtype = tf.float32 ), 0), -1)\r\n\r\n mean_shade = tf.reduce_mean( tf.multiply(shade_300W, uv_mask_tf) , axis=[0,1,2]) * 16384 / 10379\r\n G_loss_white_shading = 10*norm_loss(mean_shade, 0.99*tf.ones([1, 3], dtype=tf.float32), loss_type = \"l2\")\r\n else:\r\n G_loss_white_shading = tf.zeros(1)\r\n\r\n \r\n\r\n G_loss_texture = norm_loss(texture_300W, texture_300W_labels, mask = texture_vis_mask, loss_type = self.tex_loss) / texture_ratio\r\n\r\n G_loss_recon = 10*norm_loss(G_images_300W, input_images_300W, loss_type = self.tex_loss ) / (tf.reduce_sum(G_images_300W_mask)/ (batch_size* self.image_size * self.image_size))\r\n\r\n g_loss += G_loss_m + G_loss_shape + G_loss_white_shading\r\n\r\n if self.is_smoothness:\r\n G_loss_smoothness = 1000*norm_loss( (shape_2d_300W[:, :-2, 1:-1, :] + shape_2d_300W[:, 2:, 1:-1, :] + shape_2d_300W[:, 1:-1, :-2, :] + shape_2d_300W[:, 1:-1, 2:, :])/4.0,\r\n shape_2d_300W[:, 1:-1, 1:-1, :], loss_type = self.shape_loss)\r\n else:\r\n G_loss_smoothness = tf.zeros(1)\r\n g_loss = g_loss + G_loss_smoothness\r\n\r\n G_landmark_loss = (tf.reduce_mean(tf.nn.l2_loss(landmark_u_300W - landmark_u_300W_labels )) + tf.reduce_mean(tf.nn.l2_loss(landmark_v_300W - landmark_v_300W_labels ))) / self.landmark_num / batch_size / 50\r\n\r\n if self.is_using_symetry:\r\n albedo_300W_flip = tf.map_fn(lambda img: tf.image.flip_left_right(img), albedo_300W)\r\n G_loss_symetry = norm_loss(tf.maximum(tf.abs(albedo_300W-albedo_300W_flip), 0.05), 0, loss_type = self.tex_loss)\r\n else:\r\n G_loss_symetry = tf.zeros(1)\r\n g_loss += G_loss_symetry\r\n\r\n if self.is_const_albedo:\r\n\r\n albedo_1 = get_pixel_value(albedo_300W, albedo_indexes_x1, albedo_indexes_y1)\r\n albedo_2 = get_pixel_value(albedo_300W, albedo_indexes_x2, albedo_indexes_y2)\r\n\r\n G_loss_albedo_const = 5*norm_loss( tf.maximum(tf.abs(albedo_1- albedo_2), 0.05), 0, loss_type = self.tex_loss)\r\n else:\r\n G_loss_albedo_const = tf.zeros(1)\r\n g_loss += G_loss_albedo_const\r\n\r\n if self.is_const_local_albedo:\r\n local_albedo_alpha = 0.9\r\n texture_300W_labels_chromaticity = (texture_300W_labels + 1.0)/2.0\r\n texture_300W_labels_chromaticity = tf.divide(texture_300W_labels_chromaticity, tf.reduce_sum(texture_300W_labels_chromaticity, axis=[-1], keep_dims=True) + 1e-6)\r\n\r\n \r\n w_u = tf.stop_gradient(tf.exp(-15*tf.norm( texture_300W_labels_chromaticity[:, :-1, :, :] - texture_300W_labels_chromaticity[:, 1:, :, :], ord='euclidean', axis=-1, keep_dims=True)) * texture_vis_mask[:, :-1, :, :] )\r\n G_loss_local_albedo_const_u = tf.reduce_mean(norm_loss( albedo_300W[:, :-1, :, :], albedo_300W[:, 1:, :, :], loss_type = 'l2,1', reduce_mean=False, p=0.8) * w_u) / tf.reduce_sum(w_u+1e-6)\r\n\r\n \r\n w_v = tf.stop_gradient(tf.exp(-15*tf.norm( texture_300W_labels_chromaticity[:, :, :-1, :] - texture_300W_labels_chromaticity[:, :, 1:, :], ord='euclidean', axis=-1, keep_dims=True)) * texture_vis_mask[:, :, :-1, :] )\r\n G_loss_local_albedo_const_v = tf.reduce_mean(norm_loss( albedo_300W[:, :, :-1, :], albedo_300W[:, :, 1:, :], loss_type = 'l2,1', reduce_mean=False, p=0.8) * w_v) / tf.reduce_sum(w_v+1e-6)\r\n\r\n G_loss_local_albedo_const = (G_loss_local_albedo_const_u + G_loss_local_albedo_const_v)*10\r\n else:\r\n G_loss_local_albedo_const = tf.zeros(1)\r\n g_loss += G_loss_local_albedo_const\r\n\r\n if self.is_using_recon:\r\n g_loss += G_loss_recon\r\n else:\r\n g_loss += G_loss_texture\r\n\r\n G_loss_frecon = tf.zeros(1)\r\n \r\n\r\n if self.is_using_landmark:\r\n g_loss_wlandmark = g_loss + G_landmark_loss\r\n else:\r\n g_loss_wlandmark = g_loss\r\n\r\n\r\n return g_loss, g_loss_wlandmark, G_loss_m, G_loss_shape, G_loss_texture, G_loss_recon, G_loss_frecon, G_landmark_loss, G_loss_symetry, G_loss_white_shading, G_loss_albedo_const, G_loss_smoothness, G_loss_local_albedo_const, \\\r\n G_images_300W, texture_300W, albedo_300W, shade_300W, texture_300W_labels, input_images_300W\r\n\r\n g_loss, g_loss_wlandmark, G_loss_m, G_loss_shape, G_loss_texture, G_loss_recon, G_loss_frecon, G_landmark_loss, G_loss_symetry, G_loss_white_shading, G_loss_albedo_const, G_loss_smoothness, G_loss_local_albedo_const, \\\r\n G_images_300W, texture_300W, albedo_300W, shade_300W, texture_300W_labels, input_images_300W \\\r\n = make_parallel(model_and_loss, self.gpu_num, \r\n input_images_fn_300W= self.input_images_fn_300W, input_masks_fn_300W=self.input_masks_fn_300W,\r\n texture_labels_fn_300W=self.texture_labels_fn_300W, texture_masks_fn_300W=self.texture_masks_fn_300W,\r\n input_offset_height=self.input_offset_height, input_offset_width=self.input_offset_width,\r\n m_300W_labels = self.m_300W_labels, shape_300W_labels=self.shape_300W_labels, \r\n albedo_indexes_x1= self.albedo_indexes_x1, albedo_indexes_y1 = self.albedo_indexes_y1,\r\n albedo_indexes_x2=self.albedo_indexes_x2, albedo_indexes_y2 = self.albedo_indexes_y2)\r\n\r\n self.G_loss = tf.reduce_mean(g_loss)\r\n self.G_loss_wlandmark = tf.reduce_mean(g_loss_wlandmark)\r\n self.G_loss_m = tf.reduce_mean(G_loss_m)\r\n self.G_loss_shape = tf.reduce_mean(G_loss_shape)\r\n self.G_loss_texture = tf.reduce_mean(G_loss_texture)\r\n self.G_loss_recon = tf.reduce_mean(G_loss_recon)\r\n self.G_loss_frecon = tf.reduce_mean(G_loss_frecon)\r\n self.G_landmark_loss = tf.reduce_mean(G_landmark_loss)\r\n self.G_loss_symetry = tf.reduce_mean(G_loss_symetry)\r\n self.G_loss_white_shading = tf.reduce_mean(G_loss_white_shading)\r\n self.G_loss_albedo_const = tf.reduce_mean(G_loss_albedo_const)\r\n self.G_loss_local_albedo_const = tf.reduce_mean(G_loss_local_albedo_const)\r\n self.G_loss_smoothness = tf.reduce_mean(G_loss_smoothness)\r\n\r\n self.G_images_300W = tf.clip_by_value(tf.concat(G_images_300W, axis=0), -1, 1)\r\n self.texture_300W = tf.clip_by_value(tf.concat(texture_300W, axis=0), -1, 1)\r\n self.albedo_300W = tf.concat(albedo_300W, axis=0)\r\n self.shade_300W = tf.concat(shade_300W, axis=0)\r\n self.texture_300W_labels = tf.concat(texture_300W_labels, axis=0)\r\n self.input_images_300W = tf.concat(input_images_300W, axis=0)\r\n\r\n\r\n \r\n t_vars = tf.trainable_variables()\r\n self.d_vars = [var for var in t_vars if 'd_' in var.name]\r\n self.g_vars = [var for var in t_vars if 'g_' in var.name]\r\n\r\n self.g_en_vars = [var for var in t_vars if 'g_k' in var.name]\r\n self.g_tex_de_vars = [var for var in t_vars if 'g_h' in var.name]\r\n self.g_shape_de_vars = [var for var in t_vars if 'g_s' in var.name]\r\n\r\n self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=1, max_to_keep = 10)\r\n \r\n\r\n def setupParaStat(self):\r\n self.tri = load_3DMM_tri()\r\n self.vertex_tri = load_3DMM_vertex_tri()\r\n self.vt2pixel_u, self.vt2pixel_v = load_3DMM_vt2pixel()\r\n self.uv_tri, self.uv_mask = load_3DMM_tri_2d(with_mask = True)\r\n\r\n \r\n \r\n\r\n\r\n # Basis\r\n mu_shape, w_shape = load_Basel_basic('shape')\r\n mu_exp, w_exp = load_Basel_basic('exp')\r\n\r\n self.mean_shape = mu_shape + mu_exp\r\n self.std_shape = np.tile(np.array([1e4, 1e4, 1e4]), self.vertexNum)\r\n #self.std_shape = np.load('std_shape.npy')\r\n\r\n self.mean_shape_tf = tf.constant(self.mean_shape, tf.float32)\r\n self.std_shape_tf = tf.constant(self.std_shape, tf.float32)\r\n\r\n self.mean_m = np.load('mean_m.npy')\r\n self.std_m = np.load('std_m.npy')\r\n\r\n self.mean_m_tf = tf.constant(self.mean_m, tf.float32)\r\n self.std_m_tf = tf.constant(self.std_m, tf.float32)\r\n \r\n self.w_shape = w_shape\r\n self.w_exp = w_exp\r\n\r\n \r\n\r\n def m2full(self, m):\r\n return m * self.std_m_tf + self.mean_m_tf\r\n\r\n def shape2full(self, shape):\r\n return shape * self.std_shape_tf + self.mean_shape_tf\r\n \r\n\r\n\r\n def setupTrainingData(self):\r\n # Training data - 300W\r\n\r\n dataset = ['AFW', 'AFW_Flip', 'HELEN', 'HELEN_Flip', 'IBUG', 'IBUG_Flip', 'LFPW', 'LFPW_Flip']\r\n dataset_num = len(dataset)\r\n\r\n\r\n images = [0] * dataset_num\r\n pid = [0] * dataset_num\r\n m = [0] * dataset_num\r\n pose = [0] * dataset_num\r\n shape = [0] * dataset_num\r\n exp = [0] * dataset_num\r\n tex_para = [0] * dataset_num\r\n tex = [0] * dataset_num\r\n il = [0] * dataset_num\r\n alb = [0] * dataset_num\r\n mask = [0] * dataset_num\r\n\r\n for i in range(dataset_num):\r\n images[i], pid[i], m[i], pose[i], shape[i], exp[i], tex_para[i], _ = load_300W_LP_dataset(dataset[i])\r\n\r\n\r\n self.image_filenames = np.concatenate(images, axis=0)\r\n images = None\r\n\r\n all_m = np.concatenate(m, axis=0)\r\n\r\n all_shape_para = np.concatenate(shape, axis=0)\r\n all_exp_para = np.concatenate(exp, axis=0)\r\n self.all_tex_para = np.concatenate(tex_para, axis=0)\r\n self.pids_300W = np.concatenate(pid, axis=0)\r\n #self.all_il = np.concatenate(il, axis=0)\r\n\r\n\r\n self.all_m = np.divide(np.subtract(all_m, self.mean_m), self.std_m)\r\n\r\n self.mean_shape_para = np.mean(all_shape_para, axis=0)\r\n self.std_shape_para = np.std(all_shape_para, axis=0)\r\n self.all_shape_para = all_shape_para #np.divide(np.subtract(all_shape_para, self.mean_shape_para), self.std_shape_para)\r\n\r\n\r\n self.mean_exp_para = np.mean(all_exp_para, axis=0)\r\n self.std_exp_para = np.std(all_exp_para, axis=0)\r\n self.all_exp_para = all_exp_para #np.divide(np.subtract(all_exp_para, self.mean_exp_para), self.std_exp_para)\r\n\r\n return\r\n\r\n \r\n \r\n\r\n\r\n def train(self, config):\r\n\r\n # Training data\r\n self.setupTrainingData()\r\n\r\n valid_idx = range(self.image_filenames.shape[0])\r\n print(\"Valid images %d/%d\" % ( len(valid_idx), self.image_filenames.shape[0] ))\r\n\r\n\r\n\r\n np.random.shuffle(valid_idx)\r\n\r\n\r\n # Using 2 separated optim for with and withou landmark losses\r\n g_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1).minimize(self.G_loss, var_list=self.g_vars, colocate_gradients_with_ops=True)\r\n g_en_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1).minimize(self.G_loss_wlandmark, var_list=self.g_en_vars, colocate_gradients_with_ops=True)\r\n tf.global_variables_initializer().run()\r\n \r\n\r\n \r\n \"\"\"Train DCGAN\"\"\"\r\n could_load, checkpoint_counter = self.load(self.checkpoint_dir)\r\n if could_load:\r\n epoch0 = checkpoint_counter + 1\r\n print(\" [*] Load SUCCESS\")\r\n else:\r\n epoch0 = 1\r\n print(\" [!] Load failed...\")\r\n\r\n\r\n start_time = time.time()\r\n \r\n for epoch in xrange(epoch0, config.epoch):\r\n \r\n batch_idxs = min(len(valid_idx), config.train_size) // config.batch_size\r\n\r\n for idx in xrange(0, batch_idxs):\r\n '''\r\n Data processing. Create feed_dict\r\n '''\r\n\r\n # 300W\r\n batch_idx = valid_idx[idx*config.batch_size:(idx+1)*config.batch_size]\r\n \r\n \r\n tx = np.random.random_integers(0, 32, size=config.batch_size)\r\n ty = np.random.random_integers(0, 32, size=config.batch_size)\r\n\r\n batch_300W_images_fn = [self.image_filenames[batch_idx[i]] for i in range(config.batch_size)] \r\n\r\n\r\n\r\n delta_m = np.zeros([config.batch_size, 8])\r\n delta_m[:,6] = np.divide(ty, self.std_m[6])\r\n delta_m[:,7] = np.divide(32 - tx, self.std_m[7])\r\n\r\n \r\n batch_m = self.all_m[batch_idx,:] - delta_m\r\n\r\n batch_shape_para = self.all_shape_para[batch_idx,:]\r\n batch_exp_para = self.all_exp_para[batch_idx,:]\r\n\r\n batch_shape = np.divide( np.matmul(batch_shape_para, np.transpose(self.w_shape)) + np.matmul(batch_exp_para, np.transpose(self.w_exp)), self.std_shape)\r\n\r\n ffeed_dict={ self.m_300W_labels: batch_m, self.shape_300W_labels: batch_shape, self.input_offset_height: tx, self.input_offset_width: ty}\r\n for i in range(self.batch_size):\r\n ffeed_dict[self.input_images_fn_300W[i]] = _300W_LP_DIR + 'image/'+ batch_300W_images_fn[i]\r\n ffeed_dict[self.input_masks_fn_300W[i]] = _300W_LP_DIR + 'mask_img/'+ batch_300W_images_fn[i]\r\n ffeed_dict[self.texture_labels_fn_300W[i]] = _300W_LP_DIR + 'texture/'+ image2texture_fn(batch_300W_images_fn[i])\r\n ffeed_dict[self.texture_masks_fn_300W[i]] = _300W_LP_DIR + 'mask/'+ image2texture_fn(batch_300W_images_fn[i])\r\n\r\n if self.is_const_albedo:\r\n indexes1 = np.random.randint(low=0, high=self.const_alb_mask.shape[0], size=[self.batch_size* CONST_PIXELS_NUM])\r\n indexes2 = np.random.randint(low=0, high=self.const_alb_mask.shape[0], size=[self.batch_size* CONST_PIXELS_NUM])\r\n\r\n\r\n ffeed_dict[self.albedo_indexes_x1] = np.reshape(self.const_alb_mask[indexes1, 1], [self.batch_size, CONST_PIXELS_NUM, 1])\r\n ffeed_dict[self.albedo_indexes_y1] = np.reshape(self.const_alb_mask[indexes1, 0], [self.batch_size, CONST_PIXELS_NUM, 1])\r\n ffeed_dict[self.albedo_indexes_x2] = np.reshape(self.const_alb_mask[indexes2, 1], [self.batch_size, CONST_PIXELS_NUM, 1])\r\n ffeed_dict[self.albedo_indexes_y2] = np.reshape(self.const_alb_mask[indexes2, 0], [self.batch_size, CONST_PIXELS_NUM, 1])\r\n \r\n\r\n if np.mod(idx, 2) == 0:\r\n # Update G\r\n self.sess.run([g_optim], feed_dict=ffeed_dict)\r\n else:\r\n # Update G encoder only\r\n self.sess.run([g_en_optim], feed_dict=ffeed_dict)\r\n \r\n\r\n\r\n self.save(config.checkpoint_dir, epoch)\r\n\r\n \r\n \r\n\r\n def generator_encoder(self, image, is_reuse=False, is_training = True): \r\n\r\n ''' \r\n Creating a encoder network\r\n\r\n Output: shape_fx, tex_fc, m, il\r\n\r\n '''\r\n\r\n \r\n if not is_reuse:\r\n self.g_bn0_0 = batch_norm(name='g_k_bn0_0')\r\n self.g_bn0_1 = batch_norm(name='g_k_bn0_1')\r\n self.g_bn0_2 = batch_norm(name='g_k_bn0_2')\r\n self.g_bn0_3 = batch_norm(name='g_k_bn0_3')\r\n self.g_bn1_0 = batch_norm(name='g_k_bn1_0')\r\n self.g_bn1_1 = batch_norm(name='g_k_bn1_1')\r\n self.g_bn1_2 = batch_norm(name='g_k_bn1_2')\r\n self.g_bn1_3 = batch_norm(name='g_k_bn1_3')\r\n self.g_bn2_0 = batch_norm(name='g_k_bn2_0')\r\n self.g_bn2_1 = batch_norm(name='g_k_bn2_1')\r\n self.g_bn2_2 = batch_norm(name='g_k_bn2_2')\r\n self.g_bn2_3 = batch_norm(name='g_k_bn2_3')\r\n self.g_bn3_0 = batch_norm(name='g_k_bn3_0')\r\n self.g_bn3_1 = batch_norm(name='g_k_bn3_1')\r\n self.g_bn3_2 = batch_norm(name='g_k_bn3_2')\r\n self.g_bn3_3 = batch_norm(name='g_k_bn3_3')\r\n self.g_bn4_0 = batch_norm(name='g_k_bn4_0')\r\n self.g_bn4_1 = batch_norm(name='g_k_bn4_1')\r\n self.g_bn4_2 = batch_norm(name='g_k_bn4_2')\r\n self.g_bn4_c = batch_norm(name='g_h_bn4_c')\r\n self.g_bn5 = batch_norm(name='g_k_bn5')\r\n self.g_bn5_m = batch_norm(name='g_k_bn5_m')\r\n self.g_bn5_il = batch_norm(name='g_k_bn5_il')\r\n self.g_bn5_shape = batch_norm(name='g_k_bn5_shape')\r\n self.g_bn5_shape_linear = batch_norm(name='g_k_bn5_shape_linear')\r\n self.g_bn5_tex = batch_norm(name='g_k_bn5_tex')\r\n\r\n \r\n\r\n k0_1 = elu(self.g_bn0_1(conv2d(image, self.gf_dim*1, k_h=7, k_w=7, d_h=2, d_w =2, use_bias = False, name='g_k01_conv', reuse = is_reuse), train=is_training, reuse = is_reuse))\r\n k0_2 = elu(self.g_bn0_2(conv2d(k0_1, self.gf_dim*2, d_h=1, d_w =1, use_bias = False, name='g_k02_conv', reuse = is_reuse), train=is_training, reuse = is_reuse))\r\n\r\n k1_0 = elu(self.g_bn1_0(conv2d(k0_2, self.gf_dim*2, d_h=2, d_w =2, use_bias = False, name='g_k10_conv', reuse = is_reuse), train=is_training, reuse = is_reuse))\r\n k1_1 = elu(self.g_bn1_1(conv2d(k1_0, self.gf_dim*2, d_h=1, d_w =1, use_bias = False, name='g_k11_conv', reuse = is_reuse), train=is_training, reuse = is_reuse))\r\n k1_2 = elu(self.g_bn1_2(conv2d(k1_1, self.gf_dim*4, d_h=1, d_w =1, use_bias = False, name='g_k12_conv', reuse = is_reuse), train=is_training, reuse = is_reuse))\r\n #k1_3 = maxpool2d(k1_2, k=2, padding='VALID')\r\n k2_0 = elu(self.g_bn2_0(conv2d(k1_2, self.gf_dim*4, d_h=2, d_w =2, use_bias = False, name='g_k20_conv', reuse = is_reuse), train=is_training, reuse = is_reuse))\r\n k2_1 = elu(self.g_bn2_1(conv2d(k2_0, self.gf_dim*3, d_h=1, d_w =1, use_bias = False, name='g_k21_conv', reuse = is_reuse), train=is_training, reuse = is_reuse))\r\n k2_2 = elu(self.g_bn2_2(conv2d(k2_1, self.gf_dim*6, d_h=1, d_w =1, use_bias = False, name='g_k22_conv', reuse = is_reuse), train=is_training, reuse = is_reuse))\r\n #k2_3 = maxpool2d(k2_2, k=2, padding='VALID')\r\n k3_0 = elu(self.g_bn3_0(conv2d(k2_2, self.gf_dim*6, d_h=2, d_w =2, use_bias = False, name='g_k30_conv', reuse = is_reuse), train=is_training, reuse = is_reuse))\r\n k3_1 = elu(self.g_bn3_1(conv2d(k3_0, self.gf_dim*4, d_h=1, d_w =1, use_bias = False, name='g_k31_conv', reuse = is_reuse), train=is_training, reuse = is_reuse))\r\n k3_2 = elu(self.g_bn3_2(conv2d(k3_1, self.gf_dim*8, d_h=1, d_w =1, use_bias = False, name='g_k32_conv', reuse = is_reuse), train=is_training, reuse = is_reuse))\r\n #k3_3 = maxpool2d(k3_2, k=2, padding='VALID')\r\n k4_0 = elu(self.g_bn4_0(conv2d(k3_2, self.gf_dim*8, d_h=2, d_w =2, use_bias = False, name='g_k40_conv', reuse = is_reuse), train=is_training, reuse = is_reuse))\r\n k4_1 = elu(self.g_bn4_1(conv2d(k4_0, self.gf_dim*5, d_h=1, d_w =1, use_bias = False, name='g_k41_conv', reuse = is_reuse), train=is_training, reuse = is_reuse))\r\n \r\n\r\n # M\r\n k51_m = self.g_bn5_m( conv2d(k4_1, int(self.gfc_dim/5), d_h=1, d_w =1, name='g_k5_m_conv', reuse = is_reuse), train=is_training, reuse = is_reuse)\r\n k51_shape_ = get_shape(k51_m)\r\n k52_m = tf.nn.avg_pool(k51_m, ksize = [1, k51_shape_[1], k51_shape_[2], 1], strides = [1,1,1,1],padding = 'VALID')\r\n k52_m = tf.reshape(k52_m, [-1, int(self.gfc_dim/5)])\r\n k6_m = linear(k52_m, self.mDim, 'g_k6_m_lin', reuse = is_reuse)\r\n \r\n # Il\r\n k51_il = self.g_bn5_il( conv2d(k4_1, int(self.gfc_dim/5), d_h=1, d_w =1, name='g_k5_il_conv', reuse = is_reuse), train=is_training, reuse = is_reuse)\r\n k52_il = tf.nn.avg_pool(k51_il, ksize = [1, k51_shape_[1], k51_shape_[2], 1], strides = [1,1,1,1],padding = 'VALID')\r\n k52_il = tf.reshape(k52_il, [-1, int(self.gfc_dim/5)])\r\n k6_il = linear(k52_il, self.ilDim, 'g_k6_il_lin', reuse = is_reuse)\r\n\r\n # Shape\r\n k51_shape = self.g_bn5_shape(conv2d(k4_1, self.gfc_dim/2, d_h=1, d_w =1, name='g_k5_shape_conv', reuse = is_reuse), train=is_training, reuse = is_reuse)\r\n k52_shape = tf.nn.avg_pool(k51_shape, ksize = [1, k51_shape_[1], k51_shape_[2], 1], strides = [1,1,1,1],padding = 'VALID')\r\n k52_shape = tf.reshape(k52_shape, [-1, int(self.gfc_dim/2)])\r\n\r\n # Albedo\r\n k51_tex = self.g_bn5_tex( conv2d(k4_1, self.gfc_dim/2, d_h=1, d_w =1, name='g_k5_tex_conv', reuse = is_reuse), train=is_training, reuse = is_reuse)\r\n k52_tex = tf.nn.avg_pool(k51_tex, ksize = [1, k51_shape_[1], k51_shape_[2], 1], strides = [1,1,1,1],padding = 'VALID')\r\n k52_tex = tf.reshape(k52_tex, [-1, int(self.gfc_dim/2)])\r\n \r\n return k52_shape, k52_tex, k6_m, k6_il\r\n\r\n def generator_decoder_shape(self, k52_shape, is_reuse=False, is_training=True):\r\n if False: ## This is for shape decoder as fully connected network (NOT FULLY COMPATIBLE WITH THE REST OF THE CODE)\r\n return self.generator_decoder_shape_1d(k52_shape, is_reuse, is_training)\r\n else: \r\n\r\n n_size = get_shape(k52_shape)\r\n n_size = n_size[0]\r\n\r\n vt2pixel_u, vt2pixel_v = load_3DMM_vt2pixel()\r\n\r\n\r\n #Vt2pix\r\n vt2pixel_u_const = tf.constant(vt2pixel_u[:-1], tf.float32)\r\n vt2pixel_v_const = tf.constant(vt2pixel_v[:-1], tf.float32)\r\n\r\n #if self.is_partbase_albedo:\r\n # shape_2d = self.generator_decoder_shape_2d_partbase(k52_shape, is_reuse, is_training)\r\n #else:\r\n # shape_2d = self.generator_decoder_shape_2d_v1(k52_shape, is_reuse, is_training) \r\n shape_2d = self.generator_decoder_shape_2d(k52_shape, is_reuse, is_training) \r\n\r\n vt2pixel_v_const_ = tf.tile(tf.reshape(vt2pixel_v_const, shape =[1,1,-1]), [n_size, 1,1])\r\n vt2pixel_u_const_ = tf.tile(tf.reshape(vt2pixel_u_const, shape =[1,1,-1]), [n_size, 1,1])\r\n\r\n shape_1d = tf.reshape(bilinear_sampler( shape_2d, vt2pixel_v_const_, vt2pixel_u_const_), shape=[n_size, -1])\r\n\r\n return shape_1d, shape_2d\r\n\r\n\r\n def generator_decoder_shape_1d(self, shape_fx, is_reuse=False, is_training=True):\r\n s6 = elu(self.g1_bn6(linear(k52_shape, 1000, scope= 'g_s6_lin', reuse = is_reuse), train=is_training, reuse = is_reuse), name=\"g_s6_prelu\")\r\n s7 = linear(s6, self.vertexNum*3, scope= 'g_s7_lin', reuse = is_reuse)\r\n\r\n return s7\r\n\r\n\r\n def generator_decoder_shape_2d(self, shape_fx, is_reuse=False, is_training=True):\r\n '''\r\n Create shape decoder network\r\n Output: 3d_shape [N, (self.vertexNum*3)]\r\n '''\r\n\r\n if not is_reuse:\r\n self.g2_bn0_0 = batch_norm(name='g_s_bn0_0')\r\n self.g2_bn0_1 = batch_norm(name='g_s_bn0_1')\r\n self.g2_bn0_2 = batch_norm(name='g_s_bn0_2') \r\n self.g2_bn1_0 = batch_norm(name='g_s_bn1_0')\r\n self.g2_bn1_1 = batch_norm(name='g_s_bn1_1')\r\n self.g2_bn1_2 = batch_norm(name='g_s_bn1_2')\r\n self.g2_bn2_0 = batch_norm(name='g_s_bn2_0')\r\n self.g2_bn2_1 = batch_norm(name='g_s_bn2_1')\r\n self.g2_bn2_2 = batch_norm(name='g_s_bn2_2')\r\n self.g2_bn3_0 = batch_norm(name='g_s_bn3_0')\r\n self.g2_bn3_1 = batch_norm(name='g_s_bn3_1')\r\n self.g2_bn3_2 = batch_norm(name='g_s_bn3_2')\r\n self.g2_bn4_0 = batch_norm(name='g_s_bn4_0')\r\n self.g2_bn4 = batch_norm(name='g_s_bn4')\r\n self.g2_bn5 = batch_norm(name='g_s_bn5')\r\n \r\n s_h = int(self.texture_size[0])\r\n s_w = int(self.texture_size[1])\r\n s32_h= int(s_h/32)\r\n s32_w= int(s_w/32)\r\n \r\n # project `z` and reshape\r\n h5 = linear(shape_fx, self.gfc_dim*s32_h*s32_w, scope= 'g_s5_lin', reuse = is_reuse)\r\n h5 = tf.reshape(h5, [-1, s32_h, s32_w, self.gfc_dim])\r\n h5 = elu(self.g2_bn5(h5, train=is_training, reuse = is_reuse))\r\n \r\n h4_1 = deconv2d(h5, self.gf_dim*5, name='g_s4', reuse = is_reuse)\r\n h4_1 = elu(self.g2_bn4(h4_1, train=is_training, reuse = is_reuse))\r\n h4_0 = deconv2d(h4_1, self.gf_dim*8, strides=[1,1], name='g_s40', reuse = is_reuse)\r\n h4_0 = elu(self.g2_bn4_0(h4_0, train=is_training, reuse = is_reuse))\r\n\r\n h3_2 = deconv2d(h4_0, self.gf_dim*8, strides=[2,2], name='g_s32', reuse = is_reuse)\r\n h3_2 = elu(self.g2_bn3_2(h3_2, train=is_training, reuse = is_reuse))\r\n h3_1 = deconv2d(h3_2, self.gf_dim*4, strides=[1,1], name='g_s31', reuse = is_reuse)\r\n h3_1 = elu(self.g2_bn3_1(h3_1, train=is_training, reuse = is_reuse))\r\n h3_0 = deconv2d(h3_1, self.gf_dim*6, strides=[1,1], name='g_s30', reuse = is_reuse)\r\n h3_0 = elu(self.g2_bn3_0(h3_0, train=is_training, reuse = is_reuse))\r\n\r\n h2_2 = deconv2d(h3_0, self.gf_dim*6, strides=[2,2], name='g_s22', reuse = is_reuse)\r\n h2_2 = elu(self.g2_bn2_2(h2_2, train=is_training, reuse = is_reuse))\r\n h2_1 = deconv2d(h2_2, self.gf_dim*3, strides=[1,1], name='g_s21', reuse = is_reuse)\r\n h2_1 = elu(self.g2_bn2_1(h2_1, train=is_training, reuse = is_reuse))\r\n h2_0 = deconv2d(h2_1, self.gf_dim*4, strides=[1,1], name='g_s20', reuse = is_reuse)\r\n h2_0 = elu(self.g2_bn2_0(h2_0, train=is_training, reuse = is_reuse))\r\n\r\n h1_2 = deconv2d(h2_0, self.gf_dim*4, strides=[2,2], name='g_s12', reuse = is_reuse)\r\n h1_2 = elu(self.g2_bn1_2(h1_2, train=is_training, reuse = is_reuse))\r\n h1_1 = deconv2d(h1_2, self.gf_dim*2, strides=[1,1], name='g_s11', reuse = is_reuse)\r\n h1_1 = elu(self.g2_bn1_1(h1_1, train=is_training, reuse = is_reuse))\r\n h1_0 = deconv2d(h1_1,self.gf_dim*2, strides=[1,1], name='g_s10', reuse = is_reuse)\r\n h1_0 = elu(self.g2_bn1_0(h1_0, train=is_training, reuse = is_reuse))\r\n\r\n h0_2 = deconv2d(h1_0, self.gf_dim*2, strides=[2,2], name='g_s02', reuse = is_reuse)\r\n h0_2 = elu(self.g2_bn0_2(h0_2, train=is_training, reuse = is_reuse))\r\n h0_1 = deconv2d(h0_2, self.gf_dim, strides=[1,1], name='g_s01', reuse = is_reuse)\r\n h0_1 = elu(self.g2_bn0_1(h0_1, train=is_training, reuse = is_reuse))\r\n \r\n h0 = 2*tf.nn.tanh(deconv2d(h0_1, self.c_dim, strides=[1,1], name='g_s0', reuse = is_reuse))\r\n \r\n return h0\r\n\r\n\r\n\r\n def generator_decoder_albedo(self, tex_fx, is_reuse=False, is_training=True):\r\n '''\r\n Create texture decoder network\r\n Output: uv_texture [N, self.texture_sz[0], self.texture_sz[1], self.c_dim]\r\n '''\r\n\r\n if not is_reuse:\r\n self.g1_bn0_0 = batch_norm(name='g_h_bn0_0')\r\n self.g1_bn0_1 = batch_norm(name='g_h_bn0_1')\r\n self.g1_bn0_2 = batch_norm(name='g_h_bn0_2') \r\n self.g1_bn1_0 = batch_norm(name='g_h_bn1_0')\r\n self.g1_bn1_1 = batch_norm(name='g_h_bn1_1')\r\n self.g1_bn1_2 = batch_norm(name='g_h_bn1_2')\r\n self.g1_bn2_0 = batch_norm(name='g_h_bn2_0')\r\n self.g1_bn2_1 = batch_norm(name='g_h_bn2_1')\r\n self.g1_bn2_2 = batch_norm(name='g_h_bn2_2')\r\n self.g1_bn3_0 = batch_norm(name='g_h_bn3_0')\r\n self.g1_bn3_1 = batch_norm(name='g_h_bn3_1')\r\n self.g1_bn3_2 = batch_norm(name='g_h_bn3_2')\r\n self.g1_bn4_0 = batch_norm(name='g_h_bn4_0')\r\n self.g1_bn4 = batch_norm(name='g_h_bn4')\r\n self.g1_bn5 = batch_norm(name='g_h_bn5')\r\n #self.g1_bn6 = batch_norm(name='g_s_bn6')\r\n \r\n s_h = int(self.texture_size[0])\r\n s_w = int(self.texture_size[1])\r\n s32_h= int(s_h/32)\r\n s32_w= int(s_w/32)\r\n\r\n df = int(self.gf_dim)\r\n \r\n # project `z` and reshape\r\n h5 = linear(tex_fx, df*10*s32_h*s32_w, scope= 'g_h5_lin', reuse = is_reuse)\r\n h5 = tf.reshape(h5, [-1, s32_h, s32_w, df*10])\r\n h5 = elu(self.g1_bn5(h5, train=is_training, reuse = is_reuse))\r\n \r\n h4_1 = deconv2d(h5, df*5, name='g_h4', reuse = is_reuse)\r\n h4_1 = elu(self.g1_bn4(h4_1, train=is_training, reuse = is_reuse))\r\n h4_0 = deconv2d(h4_1, df*8, strides=[1,1], name='g_h40', reuse = is_reuse)\r\n h4_0 = elu(self.g1_bn4_0(h4_0, train=is_training, reuse = is_reuse))\r\n\r\n h3_2 = deconv2d(h4_0, df*8, strides=[2,2], name='g_h32', reuse = is_reuse)\r\n h3_2 = elu(self.g1_bn3_2(h3_2, train=is_training, reuse = is_reuse))\r\n h3_1 = deconv2d(h3_2, df*4, strides=[1,1], name='g_h31', reuse = is_reuse)\r\n h3_1 = elu(self.g1_bn3_1(h3_1, train=is_training, reuse = is_reuse))\r\n h3_0 = deconv2d(h3_1, df*6, strides=[1,1], name='g_h30', reuse = is_reuse)\r\n h3_0 = elu(self.g1_bn3_0(h3_0, train=is_training, reuse = is_reuse))\r\n\r\n h2_2 = deconv2d(h3_0, df*6, strides=[2,2], name='g_h22', reuse = is_reuse)\r\n h2_2 = elu(self.g1_bn2_2(h2_2, train=is_training, reuse = is_reuse))\r\n h2_1 = deconv2d(h2_2, df*3, strides=[1,1], name='g_h21', reuse = is_reuse)\r\n h2_1 = elu(self.g1_bn2_1(h2_1, train=is_training, reuse = is_reuse))\r\n h2_0 = deconv2d(h2_1, df*4, strides=[1,1], name='g_h20', reuse = is_reuse)\r\n h2_0 = elu(self.g1_bn2_0(h2_0, train=is_training, reuse = is_reuse))\r\n\r\n h1_2 = deconv2d(h2_0, df*4, strides=[2,2], name='g_h12', reuse = is_reuse)\r\n h1_2 = elu(self.g1_bn1_2(h1_2, train=is_training, reuse = is_reuse))\r\n h1_1 = deconv2d(h1_2, df*2, strides=[1,1], name='g_h11', reuse = is_reuse)\r\n h1_1 = elu(self.g1_bn1_1(h1_1, train=is_training, reuse = is_reuse))\r\n h1_0 = deconv2d(h1_1,df*2, strides=[1,1], name='g_h10', reuse = is_reuse)\r\n h1_0 = elu(self.g1_bn1_0(h1_0, train=is_training, reuse = is_reuse))\r\n\r\n h0_2 = deconv2d(h1_0, df*2, strides=[2,2], name='g_h02', reuse = is_reuse)\r\n h0_2 = elu(self.g1_bn0_2(h0_2, train=is_training, reuse = is_reuse))\r\n h0_1 = deconv2d(h0_2, df, strides=[1,1], name='g_h01', reuse = is_reuse)\r\n h0_1 = elu(self.g1_bn0_1(h0_1, train=is_training, reuse = is_reuse))\r\n \r\n h0 = tf.nn.tanh(deconv2d(h0_1, self.c_dim, strides=[1,1], name='g_h0', reuse = is_reuse))\r\n \r\n return h0\r\n\r\n \r\n @property\r\n def model_dir(self):\r\n return \"\" # \"%s_%s_%s_%s_%s_%s_%s\" % (self.dataset_name, self.batch_size, self.output_size, self.gf_dim, self.gfc_dim, self.df_dim, self.dfc_dim)\r\n \r\n def save(self, checkpoint_dir, step):\r\n model_name = \"Nonlinear3DMM.model\"\r\n checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)\r\n\r\n if not os.path.exists(checkpoint_dir):\r\n os.makedirs(checkpoint_dir)\r\n\r\n self.saver.save(self.sess, os.path.join(checkpoint_dir, model_name), global_step=step)\r\n print(\" Saved checkpoint %s-%d\" % (os.path.join(checkpoint_dir, model_name), step))\r\n\r\n def load(self, checkpoint_dir):\r\n import re\r\n print(\" [*] Reading checkpoints...\")\r\n checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)\r\n\r\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\r\n if ckpt and ckpt.model_checkpoint_path:\r\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\r\n\r\n self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))\r\n counter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\",ckpt_name)).group(0))\r\n print(\" [*] Success to read {}\".format(ckpt_name))\r\n\r\n\r\n return True, counter\r\n else:\r\n print(\" [*] Failed to find a checkpoint\")\r\n\r\n return False, 0\r\n\r\n\r\n\r\n\r\n" ]
[ [ "tensorflow.ones_like", "numpy.load", "tensorflow.train.get_checkpoint_state", "numpy.mean", "tensorflow.reshape", "tensorflow.ones", "tensorflow.stack", "tensorflow.global_variables_initializer", "tensorflow.nn.avg_pool", "numpy.concatenate", "tensorflow.trainable_variables", "numpy.divide", "numpy.random.random_integers", "tensorflow.concat", "tensorflow.read_file", "tensorflow.train.Saver", "tensorflow.norm", "tensorflow.constant", "numpy.random.randint", "numpy.transpose", "tensorflow.split", "numpy.mod", "tensorflow.abs", "numpy.array", "tensorflow.zeros", "tensorflow.train.AdamOptimizer", "numpy.reshape", "numpy.zeros", "tensorflow.expand_dims", "numpy.random.shuffle", "tensorflow.nn.l2_loss", "numpy.std", "numpy.subtract", "tensorflow.placeholder", "tensorflow.reduce_sum", "tensorflow.image.flip_left_right", "tensorflow.multiply", "tensorflow.image.decode_png", "tensorflow.reduce_mean" ] ]
JayantBenjamin/wordle-solver
[ "3c83dfb9c0300ed402601cfb12b704fec66e7478" ]
[ "wordle-app.py" ]
[ "import pandas as pd \nfrom wordfreq import word_frequency\n\nwords = pd.read_csv(\"subdhkosh.csv\") \n# Preview the first 5 lines of the loaded data \nrows, columns = words.shape\n#print(columns)\n#print(rows)\n#print(type(words.iat[1,0]))\n\n\n#print(l_inc_pos)\n#print(l_inc_pos[0])\n#print(words.iat[12477,0])\n\n############################\n############################\ndef search():\n\tstring =\"\"\n\tguess = []\n\tcorrect = []\n\ti =0\n\tj=0\n\tl_inc_pos = input(\"Enter positionally incorrect letters: \") #letter incorrect position\n\tl_inc_pos = l_inc_pos.upper() \n\tl_inc = input(\"Enter incorrect letters: \")#letter incorrect\n\tl_inc = l_inc.upper()\n\tl_pos = input(\"Do you know the green letters? y/n\")\n\tif(l_pos.upper()==\"Y\"):\n\t\tstring1 = input(\"First letter?\")\n\t\tcorrect.append(string1.upper())\n\t\tstring1 = input(\"Second letter?\")\n\t\tcorrect.append(string1.upper())\n\t\tstring1 = input(\"Third letter?\")\n\t\tcorrect.append(string1.upper())\n\t\tstring1 = input(\"Fourth letter?\")\n\t\tcorrect.append(string1.upper())\n\t\tstring1 = input(\"Fifth letter?\")\n\t\tcorrect.append(string1.upper())\n\n\t\n\twhile i < 12478:\n\t\tstring=words.iat[i,0]\n\t\tif(string.find(l_inc_pos[0])!=-1):\n\t\t\tguess.append(words.iat[i,0])\n\t\ti+=1\n\t# print(\"********\")\n # print(guess)\n\t# print(len(guess))\n\t# print(len(l_inc_pos))\n\t# print(\" \")\n\t# l_inc_pos=l_inc_pos[1:]\n\t# print(l_inc_pos)\n\t#print(\"********\")\n\t#############################\n\tj=0\n\ti=0\n\twhile j<len(l_inc_pos):\n\t#print(j)\n\t\ti=0\n\t\twhile i<len(guess):\n\t\t\tstring=guess[i]\n\t\t\t#l_inc_pos=l_inc_pos.capitalize()\n\t\t\t#print(string)\n\t\t\t#print(i)\n\t\t\t#print(l_inc_pos[j])\n\t\t\t#result=string.find(l_inc_pos[j])\n\t\t\t#print(string.find(l_inc_pos[j]))\n\t\t\tif(string.find(l_inc_pos[j])==-1): #if I can't find the letter I delete it from guess array \n\t\t\t\tguess.remove(guess[i])\n\t\t\t\ti-=1\n\t\t\ti+=1\n\t\tj+=1\n\t#print(guess)\n\t#print(\" \")\n\t#############################\n\t#############################\n\t#######parsing incorrect#####\n\t#############################\n\tj=0\n\ti=0\n\tstring=\"\"\n\t#print(l_inc)\n\twhile j<len(l_inc):\n\t#print(j)\n\t\ti=0\n\t\twhile i<len(guess):\n\t\t\tstring=guess[i]\n\t\t\t#print(\"*****\")\n\t\t\t#l_inc_pos=l_inc_pos.capitalize()\n\t\t\t#print(string)\n\t\t\t#print(i)\n\t\t\t#print(l_inc_pos[j])\n\t\t\t#print(string.find(l_inc_pos[j]))\n\t\t\t#print(\"*****\")\n\t\t\tif(string.find(l_inc[j])!=-1): #if I CAN find the letter I delete it from guess array \n\t\t\t\tguess.remove(guess[i])\n\t\t\t\ti-=1 #since the element if removed we need to decreament the index\n\t\t\ti+=1\n\t\tj+=1\n\tj=0\n\ti=0\n\tstring1=\"\"\n\twhile j<5:\n\t\t# print(str(j))\n\t\ti=0\n\t\twhile i<len(guess):\n\t\t\t#print(correct)\n\t\t\tstring1=guess[i]\n\t\t\tif(correct[j]==\"\"):\n\t\t\t\tpass\n\t\t\telse :\n\t\t\t\t# print(string1[j])\n\t\t\t\t# print(correct[j])\n\t\t\t\tif(string1[j]!=correct[j]):\n\t\t\t\t\tguess.remove(guess[i])\n\t\t\t\t\ti-=1\n\t\t\ti+=1\n\t\tj+=1\n\t#################vowel %##############\n\tvowel = [0]*5 \n\ti=0\n\tj=0\n\tstring2=\"\"\n\twhile i<len(guess):\n\t\tj=0\n\t\twhile j<5:\n\t\t\tstring2=guess[i]\n\t\t\tif(string2[j]==\"A\"):\n\t\t\t\tvowel[0]+=1\n\t\t\telif (string2[j]=='E'):\n\t\t\t\tvowel[1]+=1\n\t\t\telif (string2[j]=='I'):\n\t\t\t\tvowel[2]+=1\n\t\t\telif (string2[j]=='O'):\n\t\t\t\tvowel[3]+=1\n\t\t\telif (string2[j]==\"U\"):\n\t\t\t\tvowel[4]+=1\n\t\t\t# string2=guess[i]\n\t\t\t# print(string2[j])\n\t\t\t\n\t\t\tj+=1\n\n\t\ti+=1\n\tj=0\n\tsum=0\n\tsum=vowel[0]+vowel[1]+vowel[2]+vowel[3]+vowel[4]\n\tprint(\"Chances of A \"+str(vowel[0]/sum*100))\n\tprint(\"Chances of E \"+str(vowel[1]/sum*100))\n\tprint(\"Chances of I \"+str(vowel[2]/sum*100))\n\tprint(\"Chances of O \"+str(vowel[3]/sum*100))\n\tprint(\"Chances of U \"+str(vowel[4]/sum*100))\n\t######################################\n\tdictionary ={\"word\":[],\"freq\":[]}\n\ti=0\n\tfrequency=0\n\tif(len(guess)<11):\n\t\tprint(\"enough words to calculate frequency\")\n\t\twhile(i<len(guess)):\n\t\t\tfrequency=word_frequency(guess[i], 'en')\n\t\t\tdictionary[\"word\"].append(guess[i])\n\t\t\tdictionary[\"freq\"].append(frequency)\n\t\t\ti+=1\n\n\telse:\n\t\tprint(\"Too many guesses for word frequency\")\n\ti=0\n\twhile(i<len(guess)):\n\t\tprint(guess[i])\n\t\ti+=1\n\t\n\tfor word,freq in dictionary.items():\n\t\tprint(word+\":\"+str(freq))\n\n############################\n############################\n\n#############################\n#######run it again?#####\n#############################\nk=0\nrun=\"\"\n\nwhile(run.upper()==\"\"):\n\tif(k==0):\n\t\trun = input(\"Search? y/n: \") \n\t\tif(run.upper()==\"Y\"):\n\t\t\tsearch()\n\t\t\trun=\"\"\n\telse:\n\t\trun = input(\"Search again? y/n: \")\n\t\tif(run.upper()==\"Y\"):\n\t\t\tsearch()\n\t\t\trun=\"\"\n\tk+=1\n\nif(run.upper()!=\"Y\"):\n\tprint(\"program terminated\")\n\n\n\n\n\t\n\n\n\n" ]
[ [ "pandas.read_csv" ] ]
sk-aravind/3D-Bounding-Boxes-From-Monocular-Images
[ "98e9e7caf98edc6a6841d3eac7bd6f62b6866e10" ]
[ "Run_with_2D.py" ]
[ "\"\"\"\n\nThis script utilises the a yolo network to detect pedestrians and cars \nfrom and images. The 2D detections are crop out and fed it into the model so that \nit can predict a 3D bounding box for each of the 2D detections\n\nThe script will plot the results of the 3D bounding box onto the image and display it\nusing cv2.show, press the space bar in order to move on to the next image\n\nImages to be evaluated should be placed in Kitti/validation/image_2 \n\nFLAGS:\n--val-img-path\nPlease specify the path to the images you wish to evaluate. \nPath default is Kitti/validation/image_2/\n\n--calb-path\nPlease specify the path containing camera calibration obtained from KITTI. \nPath default is Kitti/camera_cal/\n\n--show-2D\nShows yolonet's 2D BoundingBox detections of in a seperate image alongside the 3D regressed boxes\n\n\"\"\"\n\n\nimport os\nimport time\nimport cv2\nimport glob\nimport argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torchvision.models as models\nfrom lib.DataUtils import *\nfrom lib.Utils import *\nfrom lib import Model, ClassAverages\nfrom yolo.yolo import cv_Yolo\n\n\ndef main():\n\n bins_no = 2\n\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n weights_path = os.path.abspath(os.path.dirname(__file__)) + '/weights'\n weight_list = [x for x in sorted(os.listdir(weights_path)) if x.endswith('.pkl')]\n if len(weight_list) == 0:\n print('We could not find any model weight to load, please train the model first!')\n exit()\n else:\n print('Using model weights : %s'%weight_list[-1])\n my_vgg = models.vgg19_bn(pretrained=True)\n model = Model.Model(features=my_vgg.features, bins=bins_no).to(device)\n if use_cuda: \n checkpoint = torch.load(weights_path + '/%s'%weight_list[-1])\n else: \n checkpoint = torch.load(weights_path + '/%s'%weight_list[-1],map_location='cpu')\n model.load_state_dict(checkpoint['model_state_dict'])\n model.eval()\n\n # Load Yolo\n yolo_path = os.path.abspath(os.path.dirname(__file__)) + '/weights'\n yolo = cv_Yolo(yolo_path)\n\n averages = ClassAverages.ClassAverages()\n angle_bins = generate_bins(bins_no)\n\n image_dir = FLAGS.val_img_path\n cal_dir = FLAGS.calb_path\n\n img_path = os.path.abspath(os.path.dirname(__file__)) + \"/\" + image_dir\n # using P_rect from global calibration file instead of per image calibration\n calib_path = os.path.abspath(os.path.dirname(__file__)) + \"/\" + cal_dir\n calib_file = calib_path + \"calib_cam_to_cam.txt\"\n # using P from each frame\n # calib_path = os.path.abspath(os.path.dirname(__file__)) + '/Kitti/testing/calib/'\n \n try:\n ids = [x.split('.')[0][-6:] for x in sorted(glob.glob(img_path+'/*.png'))]\n except:\n print(\"\\nError: There are no images in %s\"%img_path)\n exit()\n\n for id in ids:\n start_time = time.time()\n img_file = img_path + id + \".png\"\n\n # Read in image and make copy\n truth_img = cv2.imread(img_file)\n img = np.copy(truth_img)\n yolo_img = np.copy(truth_img)\n \n # Run Detection on yolo\n detections = yolo.detect(yolo_img)\n\n # For each 2D Detection\n for detection in detections:\n\n if not averages.recognized_class(detection.detected_class):\n continue\n # To catch errors should there be an invalid 2D detection\n try:\n object = DetectedObject(img, detection.detected_class, detection.box_2d, calib_file)\n except:\n continue\n\n theta_ray = object.theta_ray\n input_img = object.img\n proj_matrix = object.proj_matrix\n box_2d = detection.box_2d\n detected_class = detection.detected_class\n\n input_tensor = torch.zeros([1,3,224,224]).to(device)\n input_tensor[0,:,:,:] = input_img\n\n [orient, conf, dim] = model(input_tensor)\n orient = orient.cpu().data.numpy()[0, :, :]\n conf = conf.cpu().data.numpy()[0, :]\n dim = dim.cpu().data.numpy()[0, :]\n dim += averages.get_item(detected_class)\n\n argmax = np.argmax(conf)\n orient = orient[argmax, :]\n cos = orient[0]\n sin = orient[1]\n alpha = np.arctan2(sin, cos)\n alpha += angle_bins[argmax]\n alpha -= np.pi\n\n if FLAGS.show_2D:\n location = plot_regressed_3d_bbox(img, proj_matrix, box_2d, dim, alpha, theta_ray, truth_img)\n else:\n location = plot_regressed_3d_bbox(img, proj_matrix, box_2d, dim, alpha, theta_ray)\n \n print('Estimated pose: %s'%location)\n\n if FLAGS.show_2D:\n numpy_vertical = np.concatenate((truth_img, img), axis=0)\n cv2.imshow('SPACE for next image, any other key to exit', numpy_vertical)\n else:\n cv2.imshow('3D detections', img)\n\n print(\"\\n\")\n print('Got %s poses in %.3f seconds'%(len(detections), time.time() - start_time))\n print('-------------')\n\n \n if cv2.waitKey(0) != 32: # space bar\n exit()\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--val-img-path\", default=\"Kitti/validation/image_2/\",\n help=\"Please specify the path to the images you wish to evaluate on.\")\n\n parser.add_argument(\"--calb-path\", default=\"Kitti/camera_cal/\",\n help=\"Please specify the path containing camera calibration obtained from KITTI\")\n\n parser.add_argument(\"--show-2D\", action=\"store_true\",\n help=\"Shows the 2D BoundingBox detections of the object detection model on a separate image\")\n\n FLAGS = parser.parse_args()\n\n main()\n" ]
[ [ "numpy.concatenate", "torch.device", "torch.zeros", "numpy.copy", "torch.cuda.is_available", "numpy.argmax", "numpy.arctan2", "torch.load" ] ]
rzhangpku/EMAV
[ "91c364a359f698528f35966c89d47b1ccc2cfb64", "91c364a359f698528f35966c89d47b1ccc2cfb64" ]
[ "top_esim_quora.py", "top_bert_quora.py" ]
[ "\"\"\"\nTrain the ESIM model on the preprocessed SNLI dataset.\n\"\"\"\n# Aurelien Coet, 2018.\n\nfrom utils.utils_top_esim import train, validate\nfrom vaa.model import ESIM\nfrom vaa.model_top import TOP\nfrom vaa.data import NLIDataset\nfrom torch.utils.data import DataLoader\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\nimport os\nimport argparse\nimport pickle\nimport sys\nimport json\nimport torch\nimport itertools\nimport matplotlib\nmatplotlib.use('Agg')\n\n\ndef main(train_file,\n valid_file,\n test_file,\n embeddings_file,\n target_dir,\n hidden_size=300,\n dropout=0.5,\n num_classes=3,\n epochs=64,\n batch_size=32,\n lr=0.0004,\n patience=5,\n max_grad_norm=10.0,\n checkpoint_model0=None,\n checkpoint_model1=None,\n finetuning=False):\n \"\"\"\n Train the ESIM model on the Quora dataset.\n\n Args:\n train_file: A path to some preprocessed data that must be used\n to train the model.\n valid_file: A path to some preprocessed data that must be used\n to validate the model.\n embeddings_file: A path to some preprocessed word embeddings that\n must be used to initialise the model.\n target_dir: The path to a directory where the trained model must\n be saved.\n hidden_size: The size of the hidden layers in the model. Defaults\n to 300.\n dropout: The dropout rate to use in the model. Defaults to 0.5.\n num_classes: The number of classes in the output of the model.\n Defaults to 3.\n epochs: The maximum number of epochs for training. Defaults to 64.\n batch_size: The size of the batches for training. Defaults to 32.\n lr: The learning rate for the optimizer. Defaults to 0.0004.\n patience: The patience to use for early stopping. Defaults to 5.\n checkpoint: A checkpoint from which to continue training. If None,\n training starts from scratch. Defaults to None.\n \"\"\"\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n print(20 * \"=\", \" Preparing for training \", 20 * \"=\")\n\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n\n # -------------------- Data loading ------------------- #\n print(\"\\t* Loading training data...\")\n with open(train_file, \"rb\") as pkl:\n train_data = NLIDataset(pickle.load(pkl))\n\n train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size)\n\n print(\"\\t* Loading validation data...\")\n with open(valid_file, \"rb\") as pkl:\n valid_data = NLIDataset(pickle.load(pkl))\n\n valid_loader = DataLoader(valid_data, shuffle=False, batch_size=batch_size)\n\n print(\"\\t* Loading test data...\")\n with open(test_file, \"rb\") as pkl:\n test_data = NLIDataset(pickle.load(pkl))\n\n test_loader = DataLoader(test_data, shuffle=False, batch_size=batch_size)\n\n # -------------------- Model definition ------------------- #\n print(\"\\t* Building model...\")\n with open(embeddings_file, \"rb\") as pkl:\n embeddings = torch.tensor(pickle.load(pkl), dtype=torch.float)\\\n .to(device)\n\n model = []\n model0 = ESIM(embeddings.shape[0],\n embeddings.shape[1],\n hidden_size,\n embeddings=embeddings,\n dropout=0,\n num_classes=num_classes,\n device=device).to(device)\n model1 = TOP(embeddings.shape[0],\n embeddings.shape[1],\n hidden_size,\n embeddings=embeddings,\n dropout=dropout,\n num_classes=num_classes,\n device=device).to(device)\n model.append(model0)\n model.append(model1)\n\n # -------------------- Preparation for training ------------------- #\n criterion = nn.CrossEntropyLoss()\n if finetuning:\n optimizer = torch.optim.Adam(itertools.chain(model[0].parameters(), model[1].parameters()), lr=lr)\n else:\n optimizer = torch.optim.Adam(model[1].parameters(), lr=lr)\n\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,\n mode=\"max\",\n factor=0.5,\n patience=0)\n\n best_score = 0.0\n start_epoch = 1\n\n # Data for loss curves plot.\n epochs_count = []\n train_losses = []\n valid_losses = []\n\n # Continuing training from a checkpoint if one was given as argument.\n if checkpoint_model0:\n checkpoint = torch.load(checkpoint_model0)\n # start_epoch = checkpoint[\"epoch\"] + 1\n best_score = checkpoint[\"best_score\"]\n\n print(\"\\t* Training will continue on existing model from epoch {}...\"\n .format(start_epoch))\n\n model[0].load_state_dict(checkpoint[\"model\"])\n # optimizer.load_state_dict(checkpoint[\"optimizer\"])\n # epochs_count = checkpoint[\"epochs_count\"]\n # train_losses = checkpoint[\"train_losses\"]\n # valid_losses = checkpoint[\"valid_losses\"]\n if checkpoint_model1:\n checkpoint = torch.load(checkpoint_model1)\n start_epoch = checkpoint[\"epoch\"] + 1\n best_score = checkpoint[\"best_score\"]\n\n print(\"\\t* Training will continue on existing model from epoch {}...\"\n .format(start_epoch))\n\n model[1].load_state_dict(checkpoint[\"model\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n epochs_count = checkpoint[\"epochs_count\"]\n train_losses = checkpoint[\"train_losses\"]\n valid_losses = checkpoint[\"valid_losses\"]\n else:\n model_dict = model1.state_dict()\n pretrained_dict = checkpoint[\"model\"]\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n model1.load_state_dict(model_dict)\n\n # Compute loss and accuracy before starting (or resuming) training.\n # _, valid_loss, valid_accuracy = validate(model,\n # valid_loader,\n # criterion)\n # print(\"\\t* Validation loss before training: {:.4f}, accuracy: {:.4f}%\"\n # .format(valid_loss, (valid_accuracy*100)))\n #\n # _, valid_loss, valid_accuracy = validate(model,\n # test_loader,\n # criterion)\n # print(\"\\t* test loss before training: {:.4f}, accuracy: {:.4f}%\"\n # .format(valid_loss, (valid_accuracy*100)))\n\n # -------------------- Training epochs ------------------- #\n print(\"\\n\",\n 20 * \"=\",\n \"Training ESIM model on device: {}\".format(device),\n 20 * \"=\")\n\n patience_counter = 0\n for epoch in range(start_epoch, epochs+1):\n epochs_count.append(epoch)\n\n print(\"* Training epoch {}:\".format(epoch))\n epoch_time, epoch_loss, epoch_accuracy = train(model,\n train_loader,\n optimizer,\n criterion,\n epoch,\n max_grad_norm)\n\n train_losses.append(epoch_loss)\n print(\"-> Training time: {:.4f}s, loss = {:.4f}, accuracy: {:.4f}%\"\n .format(epoch_time, epoch_loss, (epoch_accuracy*100)))\n\n print(\"* Validation for epoch {}:\".format(epoch))\n epoch_time, epoch_loss, epoch_accuracy = validate(model,\n valid_loader,\n criterion)\n\n valid_losses.append(epoch_loss)\n print(\"-> Valid. time: {:.4f}s, loss: {:.4f}, accuracy: {:.4f}%\\n\"\n .format(epoch_time, epoch_loss, (epoch_accuracy*100)))\n\n print(\"* test for epoch {}:\".format(epoch))\n epoch_time, epoch_loss, test_accuracy = validate(model,\n test_loader,\n criterion)\n\n print(\"-> test. time: {:.4f}s, loss: {:.4f}, accuracy: {:.4f}%\\n\"\n .format(epoch_time, epoch_loss, (test_accuracy*100)))\n\n sys.stdout.flush() # 刷新输出\n # Update the optimizer's learning rate with the scheduler.\n scheduler.step(epoch_accuracy)\n\n # Early stopping on validation accuracy.\n if epoch_accuracy < best_score:\n patience_counter += 1\n else:\n best_score = epoch_accuracy\n patience_counter = 0\n # Save the best model. The optimizer is not saved to avoid having\n # a checkpoint file that is too heavy to be shared. To resume\n # training from the best model, use the 'esim_*.pth.tar'\n # checkpoints instead.\n\n # torch.save({\"epoch\": epoch,\n # \"model\": model[0].state_dict(),\n # \"best_score\": best_score,\n # \"epochs_count\": epochs_count,\n # \"train_losses\": train_losses,\n # \"valid_losses\": valid_losses},\n # os.path.join(target_dir, \"best_model0.pth.tar\"))\n\n torch.save({\"epoch\": epoch,\n \"model\": model[1].state_dict(),\n \"best_score\": best_score,\n \"optimizer\": optimizer.state_dict(),\n \"epochs_count\": epochs_count,\n \"train_losses\": train_losses,\n \"valid_losses\": valid_losses},\n os.path.join(target_dir, \"best_model1.pth.tar\"))\n\n # Save the model at each epoch.\n # torch.save({\"epoch\": epoch,\n # \"model\": model[0].state_dict(),\n # \"best_score\": best_score,\n # \"optimizer\": optimizer.state_dict(),\n # \"epochs_count\": epochs_count,\n # \"train_losses\": train_losses,\n # \"valid_losses\": valid_losses},\n # os.path.join(target_dir, \"esim_model0{}.pth.tar\".format(epoch)))\n\n torch.save({\"epoch\": epoch,\n \"model\": model[1].state_dict(),\n \"best_score\": best_score,\n \"optimizer\": optimizer.state_dict(),\n \"epochs_count\": epochs_count,\n \"train_losses\": train_losses,\n \"valid_losses\": valid_losses},\n os.path.join(target_dir, \"esim_model1{}.pth.tar\".format(epoch)))\n\n if patience_counter >= patience:\n print(\"-> Early stopping: patience limit reached, stopping...\")\n break\n\n # Plotting of the loss curves for the train and validation sets.\n fig = plt.figure()\n plt.plot(epochs_count, train_losses, \"-r\")\n plt.plot(epochs_count, valid_losses, \"-b\")\n plt.xlabel(\"epoch\")\n plt.ylabel(\"loss\")\n plt.legend([\"Training loss\", \"Validation loss\"])\n plt.title(\"Cross entropy loss\")\n fig.savefig('quora_loss.png')\n # plt.show()\n\n\nif __name__ == \"__main__\":\n default_config = \"../../config/training/quora_training.json\"\n\n parser = argparse.ArgumentParser(\n description=\"Train the ESIM model on quora\")\n parser.add_argument(\"--config\",\n default=default_config,\n help=\"Path to a json configuration file\")\n\n script_dir = os.path.dirname(os.path.realpath(__file__))\n script_dir = script_dir + '/scripts/training'\n\n parser.add_argument(\"--checkpoint_model0\",\n default=os.path.dirname(os.path.realpath(__file__)) + '/data/checkpoints/quora/' +\"best.pth.tar\",\n help=\"Path to a checkpoint file to resume training\")\n\n parser.add_argument(\"--checkpoint_model1\",\n default=None,#os.path.dirname(os.path.realpath(__file__)) + '/data/checkpoints/quora/' +\"esim_model1{}.pth.tar\".format(9),\n help=\"Path to a checkpoint file to resume training\")\n args = parser.parse_args()\n\n if args.config == default_config:\n config_path = os.path.join(script_dir, args.config)\n else:\n config_path = args.config\n\n with open(os.path.normpath(config_path), 'r') as config_file:\n config = json.load(config_file)\n\n main(os.path.normpath(os.path.join(script_dir, config[\"train_data\"])),\n os.path.normpath(os.path.join(script_dir, config[\"valid_data\"])),\n os.path.normpath(os.path.join(script_dir, config[\"test_data\"])),\n os.path.normpath(os.path.join(script_dir, config[\"embeddings\"])),\n os.path.normpath(os.path.join(script_dir, config[\"target_dir\"])),\n config[\"hidden_size\"],\n config[\"dropout\"],\n config[\"num_classes\"],\n config[\"epochs\"],\n config[\"batch_size\"]//2,\n config[\"lr\"],\n config[\"patience\"],\n config[\"max_gradient_norm\"],\n args.checkpoint_model0,\n args.checkpoint_model1,\n finetuning=False)\n", "\"\"\"\nTrain the ESIM model on the preprocessed SNLI dataset.\n\"\"\"\n# Aurelien Coet, 2018.\n\nfrom utils.utils_top_transformer import train, validate\nfrom vaa.model_transformer import ESIM\nfrom vaa.model_transformer_top import TOP\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nimport argparse\nimport json\nimport numpy as np\nimport pickle\nimport torch\nimport matplotlib\nimport itertools\nmatplotlib.use('Agg')\n\ndef transform_batch_data(data, batch_size=64, shuffle=True):\n data_batch = dict()\n data_batch['premises'] = dict()\n data_batch['hypotheses'] = dict()\n data_batch['labels'] = dict()\n index = np.arange(len(data['labels']))\n if shuffle:\n np.random.shuffle(index)\n\n idx = -1\n for i in range(len(index)):\n if i % batch_size == 0:\n idx += 1\n data_batch['premises'][idx] = []\n data_batch['hypotheses'][idx] = []\n data_batch['labels'][idx] = []\n data_batch['premises'][idx].append(data['premises'][index[i]])\n data_batch['hypotheses'][idx].append(data['hypotheses'][index[i]])\n data_batch['labels'][idx].append(int(data['labels'][index[i]]))\n return data_batch\n\n\ndef main(train_file,\n valid_file,\n test_file,\n target_dir,\n embedding_size=512,\n hidden_size=512,\n dropout=0.5,\n num_classes=3,\n epochs=64,\n batch_size=32,\n lr=0.0004,\n patience=5,\n max_grad_norm=10.0,\n checkpoint_model0=None,\n checkpoint_model1=None,\n finetuning=False):\n \"\"\"\n Train the ESIM model on the Quora dataset.\n\n Args:\n train_file: A path to some preprocessed data that must be used\n to train the model.\n valid_file: A path to some preprocessed data that must be used\n to validate the model.\n embeddings_file: A path to some preprocessed word embeddings that\n must be used to initialise the model.\n target_dir: The path to a directory where the trained model must\n be saved.\n hidden_size: The size of the hidden layers in the model. Defaults\n to 300.\n dropout: The dropout rate to use in the model. Defaults to 0.5.\n num_classes: The number of classes in the output of the model.\n Defaults to 3.\n epochs: The maximum number of epochs for training. Defaults to 64.\n batch_size: The size of the batches for training. Defaults to 32.\n lr: The learning rate for the optimizer. Defaults to 0.0004.\n patience: The patience to use for early stopping. Defaults to 5.\n checkpoint: A checkpoint from which to continue training. If None,\n training starts from scratch. Defaults to None.\n \"\"\"\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n print(20 * \"=\", \" Preparing for training \", 20 * \"=\")\n\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n\n # -------------------- Data loading ------------------- #\n print(\"\\t* Loading training data...\")\n with open(train_file, \"rb\") as pkl:\n train_data = pickle.load(pkl)\n\n print(\"\\t* Loading validation data...\")\n with open(valid_file, \"rb\") as pkl:\n valid_data = pickle.load(pkl)\n valid_dataloader = transform_batch_data(valid_data, batch_size=batch_size, shuffle=False)\n\n print(\"\\t* Loading test data...\")\n with open(test_file, \"rb\") as pkl:\n test_data = pickle.load(pkl)\n test_dataloader = transform_batch_data(test_data, batch_size=batch_size, shuffle=False)\n\n # -------------------- Model definition ------------------- #\n print(\"\\t* Building model...\")\n model = []\n model0 = ESIM(embedding_size,\n hidden_size,\n dropout=0,\n num_classes=num_classes,\n device=device).to(device)\n\n model1 = TOP(embedding_size,\n hidden_size,\n dropout=dropout,\n num_classes=num_classes,\n device=device).to(device)\n model.append(model0)\n model.append(model1)\n\n\n # -------------------- Preparation for training ------------------- #\n criterion = nn.CrossEntropyLoss()\n if finetuning:\n optimizer = torch.optim.Adam(itertools.chain(model[0].parameters(), model[1].parameters()), lr=lr)\n else:\n optimizer = torch.optim.Adam(model[1].parameters(), lr=lr)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,\n mode=\"max\",\n factor=0.5,\n patience=0)\n\n best_score = 0.0\n start_epoch = 1\n\n # Data for loss curves plot.\n epochs_count = []\n train_losses = []\n valid_losses = []\n\n # Continuing training from a checkpoint if one was given as argument.\n if checkpoint_model0:\n checkpoint = torch.load(checkpoint_model0)\n # start_epoch = checkpoint[\"epoch\"] + 1\n best_score = checkpoint[\"best_score\"]\n\n print(\"\\t* Training will continue on existing model from epoch {}...\"\n .format(start_epoch))\n\n model[0].load_state_dict(checkpoint[\"model\"])\n # optimizer.load_state_dict(checkpoint[\"optimizer\"])\n # epochs_count = checkpoint[\"epochs_count\"]\n # train_losses = checkpoint[\"train_losses\"]\n # valid_losses = checkpoint[\"valid_losses\"]\n if checkpoint_model1:\n checkpoint = torch.load(checkpoint_model1)\n start_epoch = checkpoint[\"epoch\"] + 1\n best_score = checkpoint[\"best_score\"]\n\n print(\"\\t* Training will continue on existing model from epoch {}...\"\n .format(start_epoch))\n\n model[1].load_state_dict(checkpoint[\"model\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n epochs_count = checkpoint[\"epochs_count\"]\n train_losses = checkpoint[\"train_losses\"]\n valid_losses = checkpoint[\"valid_losses\"]\n else:\n print('load pretrained net1 to net2')\n model_dict = model1.state_dict()\n pretrained_dict = checkpoint[\"model\"]\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n model1.load_state_dict(model_dict)\n\n # Compute loss and accuracy before starting (or resuming) training.\n # _, valid_loss, valid_accuracy = validate(model,\n # valid_dataloader,\n # criterion)\n # print(\"\\t* Validation loss before training: {:.4f}, accuracy: {:.4f}%\"\n # .format(valid_loss, (valid_accuracy*100)))\n #\n # _, test_loss, test_accuracy = validate(model,\n # test_dataloader,\n # criterion)\n # print(\"\\t* test loss before training: {:.4f}, accuracy: {:.4f}%\"\n # .format(test_loss, (test_accuracy*100)))\n\n # -------------------- Training epochs ------------------- #\n print(\"\\n\",\n 20 * \"=\",\n \"Training ESIM model on device: {}\".format(device),\n 20 * \"=\")\n\n patience_counter = 0\n for epoch in range(start_epoch, epochs+1):\n train_dataloader = transform_batch_data(train_data, batch_size=batch_size, shuffle=True)\n\n epochs_count.append(epoch)\n print(\"* Training epoch {}:\".format(epoch))\n epoch_time, epoch_loss, epoch_accuracy = train(model,\n train_dataloader,\n optimizer,\n criterion,\n epoch,\n max_grad_norm)\n\n train_losses.append(epoch_loss)\n print(\"-> Training time: {:.4f}s, loss = {:.4f}, accuracy: {:.4f}%\"\n .format(epoch_time, epoch_loss, (epoch_accuracy*100)))\n\n print(\"* Validation for epoch {}:\".format(epoch))\n epoch_time, epoch_loss, epoch_accuracy = validate(model,\n valid_dataloader,\n criterion)\n\n valid_losses.append(epoch_loss)\n print(\"-> Valid. time: {:.4f}s, loss: {:.4f}, accuracy: {:.4f}%\\n\"\n .format(epoch_time, epoch_loss, (epoch_accuracy*100)))\n\n print(\"* Test for epoch {}:\".format(epoch))\n epoch_time, epoch_loss, test_accuracy = validate(model,\n test_dataloader,\n criterion)\n print(\"-> Test. time: {:.4f}s, loss: {:.4f}, accuracy: {:.4f}%\\n\"\n .format(epoch_time, epoch_loss, (test_accuracy*100)))\n\n sys.stdout.flush() #刷新输出\n # Update the optimizer's learning rate with the scheduler.\n scheduler.step(epoch_accuracy)\n\n # Early stopping on validation accuracy.\n if epoch_accuracy < best_score:\n patience_counter += 1\n else:\n best_score = epoch_accuracy\n patience_counter = 0\n # Save the best model. The optimizer is not saved to avoid having\n # a checkpoint file that is too heavy to be shared. To resume\n # training from the best model, use the 'esim_*.pth.tar'\n # checkpoints instead.\n torch.save({\"epoch\": epoch,\n \"model\": model[0].state_dict(),\n \"best_score\": best_score,\n \"optimizer\": optimizer.state_dict(),\n \"epochs_count\": epochs_count,\n \"train_losses\": train_losses,\n \"valid_losses\": valid_losses},\n os.path.join(target_dir, \"best_model0.pth.tar\"))\n\n torch.save({\"epoch\": epoch,\n \"model\": model[1].state_dict(),\n \"best_score\": best_score,\n \"optimizer\": optimizer.state_dict(),\n \"epochs_count\": epochs_count,\n \"train_losses\": train_losses,\n \"valid_losses\": valid_losses},\n os.path.join(target_dir, \"best_model1.pth.tar\"))\n\n # Save the model at each epoch.\n torch.save({\"epoch\": epoch,\n \"model\": model[0].state_dict(),\n \"best_score\": best_score,\n \"optimizer\": optimizer.state_dict(),\n \"epochs_count\": epochs_count,\n \"train_losses\": train_losses,\n \"valid_losses\": valid_losses},\n os.path.join(target_dir, \"esim_model0{}.pth.tar\".format(epoch)))\n torch.save({\"epoch\": epoch,\n \"model\": model[1].state_dict(),\n \"best_score\": best_score,\n \"optimizer\": optimizer.state_dict(),\n \"epochs_count\": epochs_count,\n \"train_losses\": train_losses,\n \"valid_losses\": valid_losses},\n os.path.join(target_dir, \"esim_model1{}.pth.tar\".format(epoch)))\n\n if patience_counter >= patience:\n print(\"-> Early stopping: patience limit reached, stopping...\")\n break\n\n # Plotting of the loss curves for the train and validation sets.\n fig = plt.figure()\n plt.plot(epochs_count, train_losses, \"-r\")\n plt.plot(epochs_count, valid_losses, \"-b\")\n plt.xlabel(\"epoch\")\n plt.ylabel(\"loss\")\n plt.legend([\"Training loss\", \"Validation loss\"])\n plt.title(\"Cross entropy loss\")\n fig.savefig('quora_loss.png')\n\n\nif __name__ == \"__main__\":\n default_config = \"../../config/training/quora_training_bert.json\"\n\n parser = argparse.ArgumentParser(\n description=\"Train the ESIM model on quora\")\n parser.add_argument(\"--config\",\n default=default_config,\n help=\"Path to a json configuration file\")\n\n script_dir = os.path.dirname(os.path.realpath(__file__))\n script_dir = script_dir + '/scripts/training'\n\n parser.add_argument(\"--checkpoint_model0\",\n default=os.path.dirname(os.path.realpath(__file__)) + '/data/checkpoints/quora/bert/' +\"loss_20.pth.tar\",\n help=\"Path to a checkpoint file to resume training\")\n\n parser.add_argument(\"--checkpoint_model1\",\n default=None,#os.path.dirname(os.path.realpath(__file__)) + '/data/checkpoints/quora/bert/' +\"esim_model1{}.pth.tar\".format(1),\n help=\"Path to a checkpoint file to resume training\")\n\n args = parser.parse_args()\n\n if args.config == default_config:\n config_path = os.path.join(script_dir, args.config)\n else:\n config_path = args.config\n\n with open(os.path.normpath(config_path), 'r') as config_file:\n config = json.load(config_file)\n\n main(os.path.normpath(os.path.join(script_dir, config[\"train_data\"])),\n os.path.normpath(os.path.join(script_dir, config[\"valid_data\"])),\n os.path.normpath(os.path.join(script_dir, config[\"test_data\"])),\n os.path.normpath(os.path.join(script_dir, config[\"target_dir\"])),\n config[\"embedding_size\"],\n config[\"hidden_size\"],\n config[\"dropout\"],\n config[\"num_classes\"],\n config[\"epochs\"],\n config[\"batch_size\"]//2,\n config[\"lr\"],\n config[\"patience\"],\n config[\"max_gradient_norm\"],\n args.checkpoint_model0,\n args.checkpoint_model1,\n finetuning=False)\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "torch.cuda.is_available", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.utils.data.DataLoader", "matplotlib.pyplot.ylabel", "torch.load", "torch.nn.CrossEntropyLoss" ], [ "matplotlib.use", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "numpy.random.shuffle", "matplotlib.pyplot.figure", "torch.cuda.is_available", "torch.optim.lr_scheduler.ReduceLROnPlateau", "matplotlib.pyplot.ylabel", "torch.load", "torch.nn.CrossEntropyLoss" ] ]
D-own-T-o-P-rogramme/PK_Model
[ "a413a4c8edc2dc76513afb07b312acb34f3f6729" ]
[ "interactive_pkmodel.py" ]
[ "import matplotlib.pylab as plt\nimport pathlib\n\n\ndef make_model():\n from pkmodel import model\n from pkmodel import protocol\n from pkmodel import solution\n Vc, Vps, Qps, CL = input_model()\n model = model.Model(Vc=Vc, Vps=Vps, Qps=Qps, CL=CL)\n dose, sub, k_a, cont, cont_period, inst, dose_times = input_protocol()\n protocol = protocol.Protocol(dose, sub, k_a, cont, cont_period,\n inst, dose_times)\n sol = solution.Solution(model, protocol)\n return sol\n\n\ndef input_model():\n print('\\nEnter volume of central compartment:')\n Vc = float(input(\"Vc (mL) = \"))\n print('\\nEnter number of peripheral compartments:')\n n = int(input(\"n = \"))\n Vps = []\n Qps = []\n for i in range(n):\n print('\\nEnter volume of peripheral compartment %d:' % (i + 1))\n Vp = input(\"Vp (mL) = \")\n Vps.append(float(Vp))\n print('\\nEnter transition rate between central and '\n 'peripheral compartment %d:' % (i + 1))\n Qp = input(\"Qp (mL/h) = \")\n Qps.append(float(Qp))\n print('\\nEnter clearance rate from central compartment:')\n CL = float(input(\"CL (mL/h) = \"))\n return Vc, Vps, Qps, CL\n\n\ndef input_k_a():\n print('\\nDosing protocol can be either subcutaneous (s) '\n 'OR intravenous bolus (i) ')\n protocol = str(input('Enter dosing protocol: (s/i) [i] ') or 'i')\n if (protocol != 's' and protocol != 'i'):\n print('Could not interpret protocol. '\n 'Running with default (intravenous)')\n sub = False\n k_a = 1\n if protocol == 's':\n sub = True\n print('\\nEnter absorption rate of drug '\n 'administration for subcutaneous dosing:')\n k_a = input('k_a (/h): [1] ') or 1\n try:\n k_a = float(k_a)\n except ValueError:\n print('Could not interpret k_a. Running with default (1 /h)')\n k_a = 1\n return sub, k_a\n\n\ndef input_inst():\n print('\\nEnter the number of instantaneous doses '\n 'of X ng (default=1): ')\n n_inst = input('n = ') or 1\n dose_times = []\n inst = True\n try:\n n_inst = int(n_inst)\n except ValueError:\n print('Could not interpret n. Running with default (1)')\n n_inst = 1\n if n_inst < 1:\n inst = False\n n_inst = 0\n else:\n d0 = input('Time (in hours) of first dose: [0] ') or 0\n try:\n d0 = float(d0)\n except ValueError:\n print('Could not interpret time. Running with default (0)')\n d0 = 0\n dose_times.append(d0)\n for i in range(n_inst - 1):\n d = input('Time (in hours) of dose %d: ' % (i + 2))\n try:\n d = float(d)\n dose_times.append(d)\n except ValueError:\n print('Could not interpret time. Running with default (None)')\n return inst, dose_times\n\n\ndef input_cont():\n print('\\nEnter whether the dose is applied '\n 'at a continuous rate of X ng per hour: ')\n cont = str(input('Continuous?: (y/n) [n] ') or 'n')\n if (cont != 'y' and cont != 'n'):\n print('Could not interpret protocol. '\n 'Running with default (not continuous)')\n continuous = False\n cont_period = [0, 0]\n if cont == 'y':\n continuous = True\n print('Enter time in hours at which continuous '\n 'dosing begins (default=0):')\n t0 = float(input('t0 = ') or 0)\n print('Enter time in hours at which continuous '\n 'dosing ends (default=0):')\n tf = float(input('tf = ') or 0)\n cont_period = [t0, tf]\n return continuous, cont_period\n\n\ndef input_protocol():\n print('\\nEnter protocol of drug administration:')\n dose = float(input('Enter the dose amount in ng: [1] ') or 1.)\n try:\n dose = float(dose)\n except ValueError:\n print('Could not dose amount. Running with default (1 ng)')\n dose = 1\n\n sub, k_a = input_k_a()\n continuous, cont_period = input_cont()\n inst, dose_times = input_inst()\n\n return dose, sub, k_a, continuous, cont_period, inst, dose_times\n\n\ndef ask_show():\n show = str(input('\\nShow the plot in pop-up window? (y/n) [y] \\n') or 'y')\n if show != 'n' and show != 'y':\n print('Could not interpret input. Running with default (y)')\n return True\n if show == 'y':\n return True\n elif show == 'n':\n return False\n\n\ndef ask_save():\n save = str(input('\\nSave the figure? (y/n) [n] \\n') or 'n')\n if save != 'n' and save != 'y':\n print('Could not interpret input. Running with default (n)')\n return False\n if save == 'n':\n return False\n elif save == 'y':\n default_path = str(pathlib.Path(__file__).parent.absolute()) + '/'\n filename = input('Enter filename for figure [\\'pkplot.pdf\\']: ')\n path = input('Enter path for figure [%s]: ' % default_path)\n if path:\n filepath = '' + path + filename\n else:\n filepath = '' + default_path + filename\n print('Saving image at %s.pdf' % filepath)\n return filepath\n\n\ndef print_intro():\n print('\\n=====================================')\n print('PK MODEL: SIMULATING PHARMACOKINETICS')\n print('=====================================\\n \\n')\n print('This is a package to run a user-specifed pharmacokinetic '\n 'model (PK model).')\n print('The user can specify the number of peripheral compartments '\n 'around a central compartment,')\n print('a dosing type (I.V. or S.C.), and a dosing protocol. '\n 'A solver will solve the differential')\n print('equations that model the pharmacokinetics of the compartments, '\n 'and graphs comparing the')\n print('solutions of different model parameters will be outputted.')\n print('Default values are within brackets (e.g. [0]).\\n \\n')\n\n print('Enter the parameters of the main model and protocol:')\n print('___________________________________________________')\n\n\nif __name__ == \"__main__\":\n print_intro()\n solution1 = make_model()\n\n print(' \\n================ \\nPreparing plots. \\n================')\n separate = input('\\nSeparate the plots by compartment? (y/n) [n] ') or 'n'\n compare = input('\\nCompare the plot with another model? (y/n) [n] ') or 'n'\n if compare == 'y':\n print('\\nEnter the parameters of the second model and protocol.')\n print('_____________________________________________________')\n solution2 = make_model()\n if separate == 'y':\n fig = solution1.generate_plot(solution2, True, False, ask_save())\n else:\n fig = solution1.generate_plot(solution2, False, False, ask_save())\n elif separate == 'y':\n fig = solution1.generate_plot(None, True, False, ask_save())\n else:\n fig = solution1.generate_plot(None, False, False, ask_save())\n if ask_show():\n plt.show()\n" ]
[ [ "matplotlib.pylab.show" ] ]
thw17/fibermorph
[ "19db9d7e04c98648ab3e8f5999e47cf14249e4c7" ]
[ "fibermorph/demo.py" ]
[ "# %% import\n\nimport os\nimport pathlib\nimport requests\nimport shutil\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\n\n# sys.path.append(os.path.join(os.path.dirname(__file__), '../'))\nimport dummy_data\nimport fibermorph\n\n\n\n\n\n# %% functions\n\ndef create_results_cache():\n relpath = \"fibermorph/demo\"\n datadir = pathlib.Path.cwd().joinpath(relpath)\n cache = fibermorph.make_subdirectory(datadir, \"results_cache\")\n\n # Designate where fibermorph should make the directory with all your results - this location must exist!\n os.makedirs(cache, exist_ok=True)\n output_directory = os.path.abspath(cache)\n\n return output_directory\n\n\ndef delete_results_cache():\n cache = pathlib.Path.cwd().joinpath(\"fibermorph/demo/results_cache\")\n\n print(\"Deleting {}\".format(os.path.abspath(cache)))\n shutil.rmtree(cache)\n\n return True\n\n\ndef url_files(im_type):\n\n if im_type == \"curv\":\n\n demo_url = [\n \"https://github.com/tinalasisi/fibermorph_DemoData/raw/master/test_input/curv/004_demo_curv.tiff\",\n \"https://github.com/tinalasisi/fibermorph_DemoData/raw/master/test_input/curv/027_demo_nocurv.tiff\"]\n\n return demo_url\n\n elif im_type == \"section\":\n\n demo_url = [\n \"https://github.com/tinalasisi/fibermorph_DemoData/raw/master/test_input/section/140918_demo_section.tiff\",\n \"https://github.com/tinalasisi/fibermorph_DemoData/raw/master/test_input/section/140918_demo_section2.tiff\"]\n\n return demo_url\n\n\ndef download_im(tmpdir, demo_url):\n\n for u in demo_url:\n r = requests.get(u, allow_redirects=True)\n open(str(tmpdir.joinpath(pathlib.Path(u).name)), \"wb\").write(r.content)\n\n return True\n\n\ndef get_data(im_type=\"all\"):\n relpath = \"fibermorph/demo/data\"\n datadir = pathlib.Path.cwd().joinpath(relpath)\n datadir = fibermorph.make_subdirectory(datadir, \"tmpdata\")\n\n if im_type == \"curv\" or im_type == \"section\":\n tmpdir = fibermorph.make_subdirectory(datadir, im_type)\n urllist = url_files(im_type)\n\n download_im(tmpdir, urllist)\n return tmpdir\n\n else:\n typelist = [\"curv\", \"section\"]\n for im_type in typelist:\n tmpdir = fibermorph.make_subdirectory(datadir, im_type)\n urllist = url_files(im_type)\n\n download_im(tmpdir, urllist)\n\n return True\n\n\ndef teardown_data(append=\"\"):\n datadir = pathlib.Path.cwd().joinpath(\"fibermorph/demo/data/tmpdata/\"+append)\n\n print(\"Deleting {}\".format(str(datadir.resolve())))\n\n try:\n shutil.rmtree(datadir)\n except FileNotFoundError:\n print(\"The file doesn't exist. Nothing has been deleted\")\n\n return True\n\n\ndef validation_curv(output_location, repeats=3):\n jetzt = datetime.now()\n timestamp = jetzt.strftime(\"%b%d_%H%M_\")\n testname = str(timestamp + \"ValidationTest_Curv\")\n\n main_output_path = fibermorph.make_subdirectory(output_location, append_name=testname)\n\n dummy_dir = fibermorph.make_subdirectory(main_output_path, append_name=\"ValidationData\")\n shape_list = [\"arc\", \"line\"]\n\n replist = [el for el in shape_list for i in range(repeats)]\n\n output_path = fibermorph.make_subdirectory(main_output_path, append_name=\"ValidationAnalysis\")\n\n for shape in replist:\n print(shape)\n df, img, im_path, df_path = dummy_data.dummy_data_gen(\n output_directory=dummy_dir,\n shape=shape,\n min_elem=10,\n max_elem=20,\n im_width=5200,\n im_height=3900,\n width=10)\n\n valid_df = pd.DataFrame(df).sort_values(by=['ref_length'], ignore_index=True).reset_index(drop=True)\n\n test_df = fibermorph.curvature_seq(im_path, output_path, resolution=1, window_size_mm=10, save_img=False, test=True)\n\n test_df2 = pd.DataFrame(test_df).sort_values(by=['length'], ignore_index=True).reset_index(drop=True)\n\n col_list = ['error_length']\n\n if shape == \"arc\":\n valid_df['index1'] = valid_df['ref_length'] * valid_df['ref_radius']\n valid_df = pd.DataFrame(valid_df).sort_values(by=['index1'], ignore_index=True).reset_index(drop=True)\n test_df2['radius'] = 1 / test_df2['curv_median']\n test_df2['index2'] = test_df2['length'] * test_df2['radius']\n test_df2 = pd.DataFrame(test_df2).sort_values(by=['index2'], ignore_index=True).reset_index(drop=True)\n test_df2['error_radius'] = abs(valid_df['ref_radius'] - test_df2['radius']) / valid_df['ref_radius']\n test_df2['error_curvature'] = abs(valid_df['ref_curvature'] - test_df2['curv_median']) / valid_df[\n 'ref_curvature']\n\n col_list = ['error_radius', 'error_curvature', 'error_length']\n\n test_df2['error_length'] = abs(valid_df['ref_length'] - test_df2['length']) / valid_df['ref_length']\n\n valid_df2 = valid_df.join(test_df2)\n\n error_df = valid_df2\n # error_df = valid_df2[col_list]\n\n im_name = im_path.stem\n df_path = pathlib.Path(output_path).joinpath(str(im_name) + \"_errordata.csv\")\n error_df.to_csv(df_path)\n\n print(\"Results saved as:\\n\")\n print(df_path)\n\n shutil.rmtree(pathlib.Path(output_path).joinpath(\"analysis\"))\n\n return main_output_path\n\n\ndef validation_section(output_location, repeats=12):\n jetzt = datetime.now()\n timestamp = jetzt.strftime(\"%b%d_%H%M_\")\n testname = str(timestamp + \"ValidationTest_Section\")\n\n main_output_path = fibermorph.make_subdirectory(output_location, append_name=testname)\n\n dummy_dir = fibermorph.make_subdirectory(main_output_path, append_name=\"ValidationData\")\n shape_list = [\"circle\", \"ellipse\"]\n\n replist = [el for el in shape_list for i in range(repeats)]\n\n output_path = fibermorph.make_subdirectory(main_output_path, append_name=\"ValidationAnalysis\")\n\n for shape in replist:\n print(shape)\n df, img, im_path, df_path = dummy_data.dummy_data_gen(\n output_directory=dummy_dir,\n shape=shape,\n min_elem=1,\n max_elem=1,\n im_width=5200,\n im_height=3900,\n width=1)\n\n valid_df = pd.DataFrame(df).sort_values(by=[0], axis=1)\n min_ax = np.asarray(valid_df)[0][0]\n max_ax = np.asarray(valid_df)[0][1]\n valid_df['ref_min'] = min_ax\n valid_df['ref_max'] = max_ax\n valid_df['ref_eccentricity'] = np.sqrt(1 - (min_ax ** 2) / (max_ax ** 2))\n valid_df.drop(columns=['ref_height', 'ref_width'])\n\n test_df = fibermorph.analyze_section(im_path, output_path, minsize=0, maxsize=3900, resolution=1.0)\n\n test_df['error_min'] = abs(valid_df['ref_min'] - test_df['min']) / valid_df['ref_min']\n test_df['error_max'] = abs(valid_df['ref_max'] - test_df['max']) / valid_df['ref_max']\n\n test_df['error_area'] = abs(valid_df['ref_area'] - test_df['area']) / valid_df['ref_area']\n test_df['error_eccentricity'] = np.nan_to_num(\n abs(valid_df['ref_eccentricity'] - test_df['eccentricity']) / valid_df['ref_eccentricity'], posinf=0)\n\n valid_df2 = valid_df.join(test_df)\n\n col_list = ['error_min', 'error_max', 'error_area', 'error_eccentricity']\n\n error_df = valid_df2\n # error_df = valid_df2[col_list]\n\n im_name = im_path.stem\n df_path = pathlib.Path(output_path).joinpath(str(im_name) + \"_errordata.csv\")\n error_df.to_csv(df_path)\n\n print(\"Results saved as:\\n\")\n print(df_path)\n\n return main_output_path\n\n\n# %% Main modules\n\n\ndef real_curv():\n \"\"\"Downloads curvature data and runs fibermorph_curv analysis.\n\n Returns\n -------\n bool\n True.\n\n \"\"\"\n input_directory = get_data(\"curv\")\n jetzt = datetime.now()\n timestamp = jetzt.strftime(\"%b%d_%H%M_\")\n testname = str(timestamp + \"DemoTest_Curv\")\n\n output_location = fibermorph.make_subdirectory(create_results_cache(), append_name=testname)\n\n fibermorph.curvature(input_directory, output_location, jobs=1, resolution=132, window_size_mm=0.5, save_img=True)\n\n return True\n\n\ndef real_section():\n \"\"\"Downloads section data and runs fibermorph_section analysis.\n\n Returns\n -------\n bool\n True.\n\n \"\"\"\n input_directory = get_data(\"section\")\n\n jetzt = datetime.now()\n timestamp = jetzt.strftime(\"%b%d_%H%M_\")\n testname = str(timestamp + \"DemoTest_Section\")\n\n output_dir = fibermorph.make_subdirectory(create_results_cache(), append_name=testname)\n\n fibermorph.section(input_directory, output_dir, jobs=4, resolution=1.06)\n\n return True\n\n\ndef dummy_curv():\n \"\"\"Creates dummy data, runs curvature analysis and provides error data for this analysis compared to known values from the dummy data.\n\n Returns\n -------\n bool\n True.\n\n \"\"\"\n output_dir = validation_curv(create_results_cache(), repeats=1)\n print(\"Validation data and error analyses are saved in:\\n\")\n print(output_dir)\n\n return True\n\n\ndef dummy_section():\n \"\"\"Creates dummy data, runs section analysis and provides error data for this analysis compared to known values from the dummy data.\n\n Returns\n -------\n bool\n True.\n\n \"\"\"\n output_dir = validation_section(create_results_cache(), repeats=2)\n print(\"Validation data and error analyses are saved in:\\n\")\n print(output_dir)\n\n return True\n" ]
[ [ "pandas.DataFrame", "numpy.asarray", "numpy.sqrt" ] ]
xqdan/akg
[ "e28501611d73d3957a1f3c58eeb6b028f2f2765d" ]
[ "tests/operators/ci_gpu/test_fused_bn_reduce_grad.py" ]
[ "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\nfrom __future__ import absolute_import\nimport numpy as np\nfrom gen_random import random_gaussian\nfrom akg.utils import kernel_exec as utils\nfrom akg.utils.result_analysis import gpu_profiling\nfrom akg.utils.format_transform import to_tvm_nd_array\nfrom akg.ops.poly_gpu import fused_bn_reduce_grad_manual, fused_bn_reduce_grad_auto\n\ndef compute_fused_bn_reduce_grad(data, inter_dtype, layout, out_dtype):\n\n data0 = data[0]\n data1 = data[1]\n data2 = data[2]\n data3 = data[3]\n data4 = data[4]\n data5 = data[5]\n data6 = data[6]\n data7 = data[7]\n\n if layout == \"NCHW\":\n data3 = np.transpose(data3, axes=(0, 2, 3, 1))\n data7 = np.transpose(data7, axes=(0, 2, 3, 1))\n\n n, h, w, c = data3.shape\n\n data3 = data3.astype(inter_dtype)\n data7 = data7.astype(inter_dtype)\n\n out1 = data4 * data5 / (n * h * w)\n out2 = data3 * (n * h * w) - data2\n out3 = (data7 - data6 / (n * h * w)) * data1 / data0\n output = out1 * (out2 - out3)\n output = output.astype(out_dtype)\n\n if layout == \"NCHW\":\n output = np.transpose(output, axes=(0, 3, 1, 2))\n\n return output\n\ndef gen_data(in_shape, in_dtype, inter_dtype, layout, out_dtype):\n\n if layout == \"NHWC\":\n num_channel = in_shape[3]\n else:\n num_channel = in_shape[1]\n\n data = [np.nan] * 8\n data[0] = random_gaussian([num_channel], miu=1, sigma=0.1).astype(inter_dtype)\n data[1] = random_gaussian([num_channel], miu=1, sigma=0.1).astype(inter_dtype)\n data[2] = random_gaussian([num_channel], miu=1, sigma=0.1).astype(inter_dtype)\n data[3] = random_gaussian(in_shape, miu=1, sigma=0.1).astype(in_dtype)\n data[4] = random_gaussian([num_channel], miu=1, sigma=0.1).astype(inter_dtype)\n data[5] = random_gaussian([num_channel], miu=1, sigma=0.1).astype(inter_dtype)\n data[6] = random_gaussian([num_channel], miu=1, sigma=0.1).astype(inter_dtype)\n data[7] = random_gaussian(in_shape, miu=1, sigma=0.1).astype(in_dtype)\n\n expect = compute_fused_bn_reduce_grad(data, inter_dtype, layout, out_dtype)\n output = np.full(expect.shape, np.nan, out_dtype)\n\n return data, output, expect\n\ndef test_fused_bn_reduce_grad(in_shape, in_dtype=\"float16\", layout='NHWC', out_dtype='float16', poly_sch=False):\n\n if layout != \"NHWC\" and layout != \"NCHW\":\n raise NotImplementedError(\n 'Layout not supported {} '.format(layout))\n\n inter_dtype = 'float32'\n inputs, output, expect = gen_data(in_shape, in_dtype, inter_dtype, layout, out_dtype)\n input_shape_list = [i.shape for i in inputs]\n input_dtype_list = [inter_dtype] * 3 + [in_dtype] + [inter_dtype] * 3 + [in_dtype]\n op_attrs = [layout, out_dtype]\n if poly_sch:\n mod = utils.op_build_test(\n fused_bn_reduce_grad_auto, input_shape_list, input_dtype_list,\n kernel_name=\"fused_bn_reduce_grad_auto\", op_attrs=op_attrs, attrs={\"target\": \"cuda\"}\n )\n else:\n mod = utils.op_build_test(\n fused_bn_reduce_grad_manual, input_shape_list, input_dtype_list,\n kernel_name=\"fused_bn_reduce_grad_manual\", op_attrs=op_attrs\n )\n\n outputs = [output]\n arglist = inputs + outputs\n output = utils.mod_launch(mod, arglist, expect=expect)\n\n res = np.allclose(output, expect, rtol=5e-03, atol=1.e-8)\n print(\"Test {}\".format(\"Pass\" if res else \"Fail\"))\n if not res:\n print(\"Error cuda:========================\")\n print(mod.imported_modules[0].get_source())\n raise AssertionError(\"Test fail\")\n\n inputs = to_tvm_nd_array(inputs)\n expect = to_tvm_nd_array(expect)\n return True\n" ]
[ [ "numpy.allclose", "numpy.full", "numpy.transpose" ] ]
ikibalin/rhochi
[ "1ca03f18dc72006322a101ed877cdbba33ed61e7" ]
[ "cryspy/A_functions_base/function_3_den_file.py" ]
[ "\"\"\"Module define operations with density file: file.den\n\nFunctions\n---------\n - read_den_file\n - save_to_den_file\n - recalc_den_file_to_p1\n\n\"\"\"\nimport numpy\n\nfrom cryspy.A_functions_base.function_2_sym_elems import \\\n form_symm_elems_by_b_i_r_ij, calc_numerators_denominator_for_b_i, \\\n transform_to_p1\n\n\ndef read_den_file(file_name: str):\n \"\"\"Read density file.\n \n Arguments\n ---------\n - file_name is a file name str\n\n points_abc: [3] int\n cell_parameters: [6] float\n sym_elems: [13, n_symm_elems]: int\n indexes_xyz: [3, n_points] int\n densities: [3, n_points] float\n centrosymmetry: bool\n np_shift: [4, n_symmelems] int\n \"\"\"\n with open(file_name, \"r\") as fid:\n l_content = fid.readlines()\n\n number_lines = int(l_content[1])\n\n hh = l_content[number_lines+2].strip().split()\n rad = float(numpy.pi/180.)\n cell_parameters = numpy.array([float(hh[0]), float(hh[1]), float(hh[2]), \n float(hh[3])*rad, float(hh[4])*rad, float(hh[5])*rad],\n dtype=float)\n\n [points_a, points_b, points_c, n_el_symm, centr, n_shift] = [\n int(hh) for hh in l_content[number_lines+3][:-1].split()]\n points_abc = numpy.array((points_a, points_b, points_c), dtype=int)\n centrosymmetry = bool(centr)\n\n l_ind_xyz = []\n l_dens = []\n for line in l_content[2:number_lines+2]:\n hh = line.strip().split()\n ind_x, ind_y, ind_z = int(hh[0]), int(hh[1]), int(hh[2])\n den_f, den_a = 0., 0.\n den = float(hh[3])\n if len(hh) == 6:\n den_f = float(hh[4])\n den_a = float(hh[5])\n elif den >= 0.:\n den_f = den\n else: # den < 0 \n den_a = den\n l_ind_xyz.append((ind_x, ind_y, ind_z))\n l_dens.append((den, den_f, den_a))\n indexes_xyz = numpy.array(l_ind_xyz, dtype=int).transpose()\n densities = numpy.array(l_dens, dtype=float).transpose()\n\n r_11, r_12, r_13, r_21, r_22, r_23, r_31, r_32, r_33 = [], [], [], [], \\\n [], [], [], [], []\n b_1, b_2, b_3 = [], [], []\n for line in l_content[number_lines+4:number_lines+4+n_el_symm]:\n hh = line.replace(\"-\", \" -\").strip().split()\n r_11.append(int(hh[0]))\n r_12.append(int(hh[3]))\n r_13.append(int(hh[6]))\n \n r_21.append(int(hh[1]))\n r_22.append(int(hh[4]))\n r_23.append(int(hh[7]))\n\n r_31.append(int(hh[2]))\n r_32.append(int(hh[5]))\n r_33.append(int(hh[8]))\n\n b_1.append(float(hh[9]))\n b_2.append(float(hh[10]))\n b_3.append(float(hh[11]))\n\n b_i = (b_1, b_2, b_3)\n r_ij = (r_11, r_12, r_13, r_21, r_22, r_23, r_31, r_32, r_33)\n\n sym_elems = form_symm_elems_by_b_i_r_ij(b_i, r_ij)\n\n shift_1, shift_2, shift_3 = [], [], []\n for line in l_content[number_lines+4+n_el_symm:\n number_lines+4+n_el_symm+n_shift]:\n hh = line.strip().split()\n shift_1.append(float(hh[0]))\n shift_2.append(float(hh[1]))\n shift_3.append(float(hh[2]))\n\n sh_num_x, sh_num_y, sh_num_z, sh_den = calc_numerators_denominator_for_b_i(\n shift_1, shift_2, shift_3)\n\n np_shift = numpy.stack((sh_num_x, sh_num_y, sh_num_z, sh_den), axis=0)\n\n return points_abc, cell_parameters, sym_elems, indexes_xyz, densities, \\\n centrosymmetry, np_shift\n\n\ndef save_to_den_file(\n file_name: str, points_abc, cell_parameters, sym_elems, indexes_xyz,\n densities, centrosymmetry, shift):\n \"\"\"Save to file.\n file_name is str\n points_abc: [3] int\n cell_parameters: [6] float\n sym_elems: [13, n_symm_elems]: int\n indexes_xyz: [3, n_points] int\n densities: [3, n_points] float\n centrosymmetry: bool\n np_shift: [4, n_symmelems] int\n \"\"\"\n\n index_x = indexes_xyz[0, :]\n index_y = indexes_xyz[1, :]\n index_z = indexes_xyz[2, :]\n\n n_symmetry = sym_elems.shape[1]\n\n n_shape = densities.shape\n if len(n_shape) == 1:\n n_points = n_shape[0]\n n_size = 1 \n else:\n n_size, n_points = n_shape\n if n_size == 3:\n density = densities[0, :]\n density_f = densities[1, :]\n density_a = densities[2, :]\n elif n_size == 2:\n density_f = densities[0, :]\n density_a = densities[1, :]\n density = density_f-density_a\n else:\n density = densities\n density_f = numpy.zeros(density.shape, dtype=float)\n density_a = numpy.zeros(density.shape, dtype=float)\n cond = density >= 0.\n density_f[cond] = density[cond]\n cond_not = numpy.logical_not(cond)\n density_a[cond_not] = density[cond_not]\n\n ls_out = []\n ls_out.append(\"Created by CrysPy\")\n ls_out.append(\"{:}\".format(n_points))\n\n for i_x, i_y, i_z, den, den_f, den_a in zip(\n index_x, index_y, index_z, density, density_f, density_a):\n\n ls_out.append(\n f\"{i_x:4} {i_y:4} {i_z:4} {den:15.7f} {den_f:15.7f} {den_a:15.7f}\")\n\n (a, b, c, al, be, ga) = cell_parameters\n\n ls_out.append(\n f\"{a:10.5f}{b:10.5f}{c:10.5f}{al:10.5f}{be:10.5f}{ga:10.5f}\")\n\n n_shift = shift.shape[1]\n\n ls_out.append(f\"{points_abc[0]:5}{points_abc[1]:5}{points_abc[2]:5}\\\n{n_symmetry:5}{centrosymmetry:5}{n_shift:5}\")\n\n for r_11, r_12, r_13, r_21, r_22, r_23, r_31, r_32, r_33, b_num_1, \\\n b_num_2, b_num_3, b_den in zip(\n sym_elems[4, :], sym_elems[5, :], sym_elems[6, :], \n sym_elems[7, :], sym_elems[8, :], sym_elems[9, :], \n sym_elems[10, :], sym_elems[11, :], sym_elems[12, :], \n sym_elems[0, :], sym_elems[1, :], sym_elems[2, :],\n sym_elems[3, :]):\n b_1 = float(b_num_1)/float(b_den)\n b_2 = float(b_num_2)/float(b_den)\n b_3 = float(b_num_3)/float(b_den)\n ls_out.append(\n f\"{r_11:4}{r_21:4}{r_31:4} {r_12:4}{r_22:4}{r_32:4} \\\n{r_13:4}{r_23:4}{r_33:4} {b_1:8.5f}{b_2:8.5f}{b_3:8.5f}\")\n for orig_1, orig_2, orig_3 in zip(shift[0, :], shift[1, :], shift[2, :]):\n ls_out.append(f\"{orig_1:8.4f}{orig_2:8.4f}{orig_3:8.4f}\")\n\n with open(file_name, \"w\") as fid:\n fid.write(\"\\n\".join(ls_out))\n\ndef recalc_den_file_to_p1(den_file_input: str, den_file_output: str):\n points_abc, cell_parameters, sym_elems, indexes_xyz, densities, \\\n centrosymmetry, np_shift = read_den_file(den_file_input)\n\n sym_elems_p1 = numpy.array([[0], [0], [0], [1], [1], [0], [0], [0], [1],\n [0], [0], [0], [1]], dtype=int)\n\n np_shift_p1 = numpy.array([[0], [0], [0], [1]], dtype=int)\n indexes_xyz_p1, densities_p1 = transform_to_p1(\n points_abc, sym_elems, np_shift, centrosymmetry, indexes_xyz, densities)\n\n save_to_den_file(\n den_file_output, points_abc, cell_parameters, sym_elems_p1,\n indexes_xyz_p1, densities_p1, False, np_shift_p1)\n" ]
[ [ "numpy.logical_not", "numpy.array", "numpy.stack", "numpy.zeros" ] ]
Bellomia/pyDrude
[ "cd2d6980008ddbe247f1aa50dc238e0d7cf0904f" ]
[ "HeVanderbiltModel/OBC/OBCsData.py" ]
[ "import numpy\nimport pylab\n\nL = numpy.array([10,14,20,24,26,30,32,34,40,42,46,50])\nD = numpy.array([0.7495521937409436,0.748133095630968,0.7551880187600327,0.753688557716125,0.690564577980913,0.728280401470924,0.6950535214361949,0.6326486809343935,0.7794025403496738,0.6853505319229404,0.6899417389825953,0.7033161262753732])\nF = numpy.array([0.9618361716626087,0.9428258007428983,0.9552063081901584,0.9446269600248529,0.9170289208571015,0.9526852269574598,0.9526852269574598,0.8214302457216206,0.9913968616328988,0.8768807465924278,0.9083384797698022,0.9106027728730962])\n\nL = 1/L[range(6)]\nD = D[range(6)]\nF = F[range(6)]\n\n#D = D[::-1]\n#F = F[::-1]\n\npylab.figure('OBCs Residue Sums')\npylab.plot(L,F, marker='s', c='green', markeredgecolor='black')\npylab.plot(L,D, marker='o', c='red', markeredgecolor='black')\npylab.plot(L,F-D, marker='o', c='orange', markeredgecolor='black')\n\npylab.show()\n\n# PBCS\n\nL = [ 6., 10., 14., 18., 22., 26., 30.]\nD = [0.60795342, 0.7953829 , 0.7923511 , 0.77841188, 0.76635186, 0.75685108, 0.74937526]\nF = [0.62280857, 0.79948367, 0.80042398, 0.88114045, 0.89914115, 0.91187523, 0.96045241]\nR = [0.01485515, 0.00410077, 0.00807289, 0.10272856, 0.13278929, 0.15502415, 0.21107715]\n\npylab.figure('PBCs Drude Weight and Fsum')\npylab.plot(L,F, marker='s', c='green', markeredgecolor='black')\npylab.plot(L,D, marker='o', c='red', markeredgecolor='black')\npylab.plot(L,R, marker='o', c='orange', markeredgecolor='black')\n\n \npylab.show()" ]
[ [ "numpy.array" ] ]
iQua/fl-lottery
[ "360d9c2d54c12e2631ac123a4dd5ac9184d913f0" ]
[ "rl/agent.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.distributions as distributions\nimport numpy as np\n\n\nclass MLP(nn.Module):\n def __init__(self, input_dim, hidden_dim, output_dim, dropout = 0.5):\n super().__init__()\n\n self.fc_1 = nn.Linear(input_dim, hidden_dim)\n self.fc_2 = nn.Linear(hidden_dim, output_dim)\n self.dropout = nn.Dropout(dropout)\n \n def forward(self, x):\n x = self.fc_1(x)\n x = self.dropout(x)\n x = F.relu(x)\n x = self.fc_2(x)\n return x\n\n\nclass ActorCritic(nn.Module):\n def __init__(self, actor, critic):\n super().__init__()\n \n self.actor = actor\n self.critic = critic\n \n def forward(self, state):\n \n action_pred = self.actor(state)\n value_pred = self.critic(state)\n \n return action_pred, value_pred\n\n\nclass Agent(object):\n \n def __init__(self):\n pass\n\n\n" ]
[ [ "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.Dropout" ] ]
luyuliu/unsupervised_llamas
[ "9b99f464e1983195b922e2df8bb57760182206e7" ]
[ "evaluation/segmentation_metrics.py" ]
[ "#!/usr/bin/env python3\n\"\"\"\nCalculates\n true positives (tp)\n false positives (fp)\n true negatives (tn)\n false negatives (fn)\n precision\n recall\n average precision / AUC / PR curves\n\nAdditional metrics are welcome\nOne problem with lane marker segmentation is that the absolute number of correctly\nclassified pixels often is not helpful because background pixels far outweigh\nthe lane marker pixels. In absolute terms, marking all pixels as background likely\nis the best solution but not helpful for the problem at hand.\n\nNotes\n-----\nDon't use Python2. There may be integer divisions that I missed.\n\nOptions for calculating AUC / Precision Recall curve\n1)\nIt may be faster to sort (prediction, label) pixels by probability and\ngo through those. O(n log n) in the amount of pixels per image.\nSorting takes about .36 seconds on my current system.\nExpected speedup should be about 50%\n\n2)\nBucket sort is possible as well. O(n) to put probabilities into k buckets.\no(n) to calculate the poc / auc. May be faster than using sort().\nSort however may be implemented in C. Still an approximation, as 3).\n\n3) * current implementation. It was easy and can be replaced any time.\nO(k * n), k being the amount of threshold steps,\nwhich is not as accurate but may leverage the c/c++ numpy backend.\ntp/tn/fp/fn take about one second to calculate\n\"\"\"\n# NOTE There should be tests\n\nimport numpy\n\n\ndef _debug_view(prediction, label):\n \"\"\" Shows prediction and label for visual debugging \"\"\"\n prediction = (prediction * 255).astype(numpy.uint8)\n label = (label * 255).astype(numpy.uint8)\n c = numpy.zeros((717, 1276), dtype=numpy.uint8)\n\n debug_image = numpy.stack((prediction, label, c), axis=-1)\n import cv2 # Not forcing cv2 dependency for metrics\n cv2.imshow('debug_image', debug_image)\n cv2.waitKey(1000)\n\n\ndef thresholded_binary(prediction, threshold):\n \"\"\" Thresholds prediction to 0 and 1 according to threshold \"\"\"\n return (prediction >= threshold).astype(int)\n\n\ndef true_positive(prediction, label):\n \"\"\" Calculates number of correctly classified foreground pixels \"\"\"\n num_tp = numpy.sum(numpy.logical_and(label != 0, prediction == label))\n return num_tp\n\n\ndef false_positive(prediction, label):\n \"\"\" Calculates number of incorrectly predicted foreground pixels \"\"\"\n num_fp = numpy.sum(numpy.logical_and(label == 0, prediction != 0))\n return num_fp\n\n\ndef true_negative(prediction, label):\n \"\"\" Calculates number of correctly identified background pixels \"\"\"\n num_tn = numpy.sum(numpy.logical_and(label == 0, prediction == label))\n return num_tn\n\n\ndef false_negative(prediction, label):\n \"\"\" Calculates number of missed foreground pixels \"\"\"\n num_fn = numpy.sum(numpy.logical_and(label != 0, prediction == 0))\n return num_fn\n\n\ndef binary_approx_auc(prediction, label):\n \"\"\" Calculates approximated auc and best precision-recall combination\n\n Parameters\n ----------\n prediction : numpy.ndarray\n raw prediction output in [0, 1]\n label : numpy.ndarray\n target / label, values are either 0 or 1\n\n Returns\n -------\n Dict of approximate AUC, \"corner\" precision, \"corner\" recall\n {'precision', 'recall', 'auc'}\n\n Notes\n -----\n See docstring for alternative implementation options\n Approximated by 100 uniform thresholds between 0 and 1\n \"\"\"\n # NOTE May achieve speedup by checking if label is all zeros\n num_steps = 100\n auc_value = 0\n\n # Most upper right precision, recall point\n corner_precision = 0\n corner_recall = 0\n corner_auc = 0\n corner_threshold = 0\n\n precisions = [1]\n recalls = [0]\n\n # Individual precision recall evaluation for those steps\n for i in range(num_steps + 1):\n threshold = (num_steps - i) / num_steps\n thresholded_prediction = thresholded_binary(prediction, threshold)\n\n # tn = true_negative(thresholded_prediction, label)\n tp = true_positive(thresholded_prediction, label)\n fn = false_negative(thresholded_prediction, label)\n fp = false_positive(thresholded_prediction, label)\n\n precision = 0 if (tp + fp) == 0 else tp / (tp + fp)\n recall = 0 if (tp + fn) == 0 else tp / (tp + fn)\n\n if (precision * recall) > corner_auc:\n corner_auc = precision * recall\n corner_precision = precision\n corner_recall = recall\n corner_threshold = threshold\n\n precisions.append(precision)\n recalls.append(recall)\n\n auc_value += (recalls[-1] - recalls[-2]) * precisions[-2]\n\n return {'recall': corner_recall, 'precision': corner_precision,\n 'threshold': corner_threshold, 'auc': auc_value}\n" ]
[ [ "numpy.stack", "numpy.logical_and", "numpy.zeros" ] ]
DevisPatel/Python-Programs
[ "9c975e35b4d2b89bc9e9d206830e7ad60e38c34a" ]
[ "PCA Algorithm (Dimensionality Reduction).py" ]
[ "import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.decomposition import PCA\r\n\r\n\r\ndata = pd.read_csv('C:/Users/Devis Patel/AppData/Local/Programs/Python/Python37/Programs/Assignment Programs/Day 8/Data Set/trans_us.csv', index_col = 0, thousands = ',')\r\n\r\ndata.index.names = ['stations']\r\ndata.columns.names = ['months']\r\ndata = data.fillna(15)\r\ndata.head()\r\n\r\n\r\n\r\npca = PCA(n_components=2)\r\npca.fit(data)\r\n\r\n\r\nexisting_2d = pca.transform(data)\r\ndata_2d = pd.DataFrame(existing_2d)\r\ndata_2d.index = data.index\r\ndata_2d.columns = ['PC1','PC2']\r\ndata_2d.head()\r\n\r\n\r\nprint (pca.explained_variance_ratio_)\r\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv", "sklearn.decomposition.PCA" ] ]
yeonju7kim/Industrial-AI
[ "66073c136e7bb6964dd80e2e63970b86eed4b669" ]
[ "main.py" ]
[ "import torch\nfrom torch.utils.data import DataLoader, Dataset\n\nclass CategoryDataset(Dataset):\n def __init__(self, id_list, digit1_list, digit2_list, digit3_list, text_obj_list, text_mthd_list, text_deal_list):\n self.id_list = id_list\n self.digit1_list = digit1_list\n self.digit2_list = digit2_list\n self.digit3_list = digit3_list\n self.text_obj_list = text_obj_list\n self.text_mthd_list = text_mthd_list\n self.text_deal_list = text_deal_list\n\n def __len__(self):\n return len(self.id_list)\n\n def __getitem__(self, idx):\n id = self.id_list[idx]\n digit1 = self.digit1_list[idx]\n digit2 = self.digit2_list[idx]\n digit3 = self.digit3_list[idx]\n text_obj = self.text_obj_list[idx]\n text_mthd = self.text_mthd_list[idx]\n text_deal = self.text_deal_list[idx]\n\n return id, digit1, digit2, digit3, text_obj, text_mthd, text_deal\n\nclass Index:\n id_idx = 0\n digit1_idx = 1\n digit2_idx = 2\n digit3_idx = 3\n text_obj_idx = 4\n text_mthd_idx = 5\n text_deal_idx = 6\n\ndef _read_txt_file(filename):\n id_list = []\n digit1_list = []\n digit2_list = []\n digit3_list = []\n text_obj_list = []\n text_mthd_list = []\n text_deal_list = []\n f = open(filename)\n print(f.readline())\n lines = f.readlines()\n for line in lines:\n words = line.split('|')\n id_list.append(words[Index.id_idx])\n digit1_list.append(words[Index.digit1_idx])\n digit2_list.append(words[Index.digit2_idx])\n digit3_list.append(words[Index.digit3_idx])\n text_obj_list.append(words[Index.text_obj_idx])\n text_mthd_list.append(words[Index.text_mthd_idx])\n text_deal_list.append(words[Index.text_deal_idx])\n return id_list, digit1_list, digit2_list, digit3_list, text_obj_list, text_mthd_list, text_deal_list\n\n\ndef get_category_dataloader(batch_size, train_portion=0.7, shuffle=True, transform=None, filename='../data/1. 실습용자료.txt'):\n id_list, digit1_list, digit2_list, digit3_list, text_obj_list, text_mthd_list, text_deal_list = _read_txt_file(filename)\n category_dataset = CategoryDataset(id_list, digit1_list, digit2_list, digit3_list, text_obj_list, text_mthd_list, text_deal_list)\n dataset_size = len(category_dataset)\n train_size = (int)(train_portion * dataset_size)\n train_set, val_set = torch.utils.data.random_split(category_dataset, [train_size, dataset_size - train_size])\n trainDataLoader = DataLoader(train_set, batch_size=batch_size, shuffle=shuffle)\n validDataLoader = DataLoader(val_set, batch_size=batch_size, shuffle=shuffle)\n return trainDataLoader, validDataLoader\n\n\nif __name__ == '__main__':\n tr_dataloader, val_dataloader = get_category_dataloader(10)\n print('hello')\n" ]
[ [ "torch.utils.data.random_split", "torch.utils.data.DataLoader" ] ]
yahiasaqer/Association_Rules
[ "e75735e85750179fc537445afa926e14daa1e5b7" ]
[ "AssociationRules1.py" ]
[ "import pandas as pd\r\nfrom mlxtend.preprocessing import TransactionEncoder\r\nfrom mlxtend.frequent_patterns import apriori\r\n\r\ndataset = [['Milk', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'],\r\n ['Dill', 'Onion', 'Nutmeg', 'Kidney Beans', 'Eggs', 'Yogurt'],\r\n ['Milk', 'Apple', 'Kidney Beans', 'Eggs'],\r\n ['Milk', 'Unicorn', 'Corn', 'Kidney Beans', 'Yogurt'],\r\n ['Milk', 'Unicorn', 'Kidney Beans', 'Yogurt'],\r\n ['Corn', 'Onion', 'Onion', 'Kidney Beans', 'Ice cream', 'Eggs']]\r\n\r\n# TransactionEncoder is a model to deal with transactions\r\nte = TransactionEncoder()\r\n\r\n# Transform the transaction dataset to binary 2D array\r\nte_ary = te.fit(dataset).transform(dataset)\r\nprint(te_ary)\r\n\r\n# convert the array of transaction data array into pandas DataFrame\r\ndf = pd.DataFrame(te_ary, columns=te.columns_)\r\n\r\n# get the frequent itemsets by using apriori algorithm\r\nfrequentItemsets = apriori(df, min_support=0.6, use_colnames=True)\r\nprint('Itemsets\\n', frequentItemsets)\r\n\r\n#get the association rules\r\nfrom mlxtend.frequent_patterns import association_rules\r\nrules = association_rules(frequentItemsets, min_threshold=0.7) #min_threshold is the minimum confidence\r\nprint('Rules\\n', rules)\r\n" ]
[ [ "pandas.DataFrame" ] ]
skatsuta/pandas-gbq
[ "81ab6b8f01c5a2cd1033e687a3d8bb7fe3f43c98" ]
[ "tests/system/test_gbq.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport datetime\nimport sys\n\nimport numpy as np\nimport pandas\nimport pandas.api.types\nimport pandas.util.testing as tm\nfrom pandas import DataFrame, NaT\n\ntry:\n import pkg_resources # noqa\nexcept ImportError:\n raise ImportError(\"Could not import pkg_resources (setuptools).\")\nimport pytest\nimport pytz\n\nfrom pandas_gbq import gbq\nimport pandas_gbq.schema\n\n\nTABLE_ID = \"new_test\"\nPANDAS_VERSION = pkg_resources.parse_version(pandas.__version__)\nNULLABLE_INT_PANDAS_VERSION = pkg_resources.parse_version(\"0.24.0\")\nNULLABLE_INT_MESSAGE = (\n \"Require pandas 0.24+ in order to use nullable integer type.\"\n)\n\n\ndef test_imports():\n gbq._test_google_api_imports()\n\n\ndef make_mixed_dataframe_v2(test_size):\n # create df to test for all BQ datatypes except RECORD\n bools = np.random.randint(2, size=(1, test_size)).astype(bool)\n flts = np.random.randn(1, test_size)\n ints = np.random.randint(1, 10, size=(1, test_size))\n strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)\n times = [\n datetime.datetime.now(pytz.timezone(\"US/Arizona\"))\n for t in range(test_size)\n ]\n return DataFrame(\n {\n \"bools\": bools[0],\n \"flts\": flts[0],\n \"ints\": ints[0],\n \"strs\": strs[0],\n \"times\": times[0],\n },\n index=range(test_size),\n )\n\n\ndef get_schema(\n gbq_connector: gbq.GbqConnector, dataset_id: str, table_id: str\n):\n \"\"\"Retrieve the schema of the table\n\n Obtain from BigQuery the field names and field types\n for the table defined by the parameters\n\n Parameters\n ----------\n dataset_id : str\n Name of the BigQuery dataset for the table\n table_id : str\n Name of the BigQuery table\n\n Returns\n -------\n list of dicts\n Fields representing the schema\n \"\"\"\n from google.cloud import bigquery\n\n bqclient = gbq_connector.client\n table_ref = bigquery.TableReference(\n bigquery.DatasetReference(bqclient.project, dataset_id),\n table_id,\n )\n\n try:\n table = bqclient.get_table(table_ref)\n remote_schema = table.schema\n\n remote_fields = [\n field_remote.to_api_repr() for field_remote in remote_schema\n ]\n for field in remote_fields:\n field[\"type\"] = field[\"type\"].upper()\n field[\"mode\"] = field[\"mode\"].upper()\n\n return remote_fields\n except gbq_connector.http_error as ex:\n gbq_connector.process_http_error(ex)\n\n\ndef verify_schema(gbq_connector, dataset_id, table_id, schema):\n \"\"\"Indicate whether schemas match exactly\n\n Compare the BigQuery table identified in the parameters with\n the schema passed in and indicate whether all fields in the former\n are present in the latter. Order is not considered.\n\n Parameters\n ----------\n dataset_id :str\n Name of the BigQuery dataset for the table\n table_id : str\n Name of the BigQuery table\n schema : list(dict)\n Schema for comparison. Each item should have\n a 'name' and a 'type'\n\n Returns\n -------\n bool\n Whether the schemas match\n \"\"\"\n\n fields_remote = pandas_gbq.schema._clean_schema_fields(\n get_schema(gbq_connector, dataset_id, table_id)\n )\n fields_local = pandas_gbq.schema._clean_schema_fields(schema[\"fields\"])\n return fields_remote == fields_local\n\n\nclass TestGBQConnectorIntegration(object):\n def test_should_be_able_to_make_a_connector(self, gbq_connector):\n assert gbq_connector is not None, \"Could not create a GbqConnector\"\n\n def test_should_be_able_to_get_a_bigquery_client(self, gbq_connector):\n bigquery_client = gbq_connector.get_client()\n assert bigquery_client is not None\n\n\nclass TestReadGBQIntegration(object):\n @pytest.fixture(autouse=True)\n def setup(self, project, credentials):\n # - PER-TEST FIXTURES -\n # put here any instruction you want to be run *BEFORE* *EVERY* test is\n # executed.\n self.gbq_connector = gbq.GbqConnector(project, credentials=credentials)\n self.credentials = credentials\n\n def test_should_properly_handle_empty_strings(self, project_id):\n query = 'SELECT \"\" AS empty_string'\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n tm.assert_frame_equal(df, DataFrame({\"empty_string\": [\"\"]}))\n\n def test_should_properly_handle_null_strings(self, project_id):\n query = \"SELECT STRING(NULL) AS null_string\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n tm.assert_frame_equal(df, DataFrame({\"null_string\": [None]}))\n\n def test_should_properly_handle_valid_integers(self, project_id):\n query = \"SELECT CAST(3 AS INT64) AS valid_integer\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"standard\",\n )\n tm.assert_frame_equal(df, DataFrame({\"valid_integer\": [3]}))\n\n def test_should_properly_handle_nullable_integers(self, project_id):\n if PANDAS_VERSION < NULLABLE_INT_PANDAS_VERSION:\n pytest.skip(msg=NULLABLE_INT_MESSAGE)\n\n query = \"\"\"SELECT * FROM\n UNNEST([1, NULL]) AS nullable_integer\n \"\"\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"standard\",\n dtypes={\"nullable_integer\": \"Int64\"},\n )\n tm.assert_frame_equal(\n df,\n DataFrame(\n {\n \"nullable_integer\": pandas.Series(\n [1, pandas.NA], dtype=\"Int64\"\n )\n }\n ),\n )\n\n def test_should_properly_handle_valid_longs(self, project_id):\n query = \"SELECT 1 << 62 AS valid_long\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"standard\",\n )\n tm.assert_frame_equal(df, DataFrame({\"valid_long\": [1 << 62]}))\n\n def test_should_properly_handle_nullable_longs(self, project_id):\n if PANDAS_VERSION < NULLABLE_INT_PANDAS_VERSION:\n pytest.skip(msg=NULLABLE_INT_MESSAGE)\n\n query = \"\"\"SELECT * FROM\n UNNEST([1 << 62, NULL]) AS nullable_long\n \"\"\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"standard\",\n dtypes={\"nullable_long\": \"Int64\"},\n )\n tm.assert_frame_equal(\n df,\n DataFrame(\n {\n \"nullable_long\": pandas.Series(\n [1 << 62, pandas.NA], dtype=\"Int64\"\n )\n }\n ),\n )\n\n def test_should_properly_handle_null_integers(self, project_id):\n if PANDAS_VERSION < NULLABLE_INT_PANDAS_VERSION:\n pytest.skip(msg=NULLABLE_INT_MESSAGE)\n\n query = \"SELECT CAST(NULL AS INT64) AS null_integer\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"standard\",\n dtypes={\"null_integer\": \"Int64\"},\n )\n tm.assert_frame_equal(\n df,\n DataFrame(\n {\"null_integer\": pandas.Series([pandas.NA], dtype=\"Int64\")}\n ),\n )\n\n def test_should_properly_handle_valid_floats(self, project_id):\n from math import pi\n\n query = \"SELECT PI() AS valid_float\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n tm.assert_frame_equal(df, DataFrame({\"valid_float\": [pi]}))\n\n def test_should_properly_handle_nullable_floats(self, project_id):\n from math import pi\n\n query = \"\"\"SELECT * FROM\n (SELECT PI() AS nullable_float),\n (SELECT NULL AS nullable_float)\"\"\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n tm.assert_frame_equal(df, DataFrame({\"nullable_float\": [pi, None]}))\n\n def test_should_properly_handle_valid_doubles(self, project_id):\n from math import pi\n\n query = \"SELECT PI() * POW(10, 307) AS valid_double\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n tm.assert_frame_equal(\n df, DataFrame({\"valid_double\": [pi * 10 ** 307]})\n )\n\n def test_should_properly_handle_nullable_doubles(self, project_id):\n from math import pi\n\n query = \"\"\"SELECT * FROM\n (SELECT PI() * POW(10, 307) AS nullable_double),\n (SELECT NULL AS nullable_double)\"\"\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n tm.assert_frame_equal(\n df, DataFrame({\"nullable_double\": [pi * 10 ** 307, None]})\n )\n\n def test_should_properly_handle_null_floats(self, project_id):\n query = \"\"\"SELECT null_float\n FROM UNNEST(ARRAY<FLOAT64>[NULL, 1.0]) AS null_float\n \"\"\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"standard\",\n )\n tm.assert_frame_equal(df, DataFrame({\"null_float\": [np.nan, 1.0]}))\n\n def test_should_properly_handle_date(self, project_id):\n query = \"SELECT DATE(2003, 1, 4) AS date_col\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n )\n expected = DataFrame(\n {\n \"date_col\": pandas.Series(\n [datetime.date(2003, 1, 4)], dtype=\"datetime64[ns]\"\n )\n },\n )\n tm.assert_frame_equal(df, expected)\n\n def test_should_properly_handle_time(self, project_id):\n query = \"SELECT TIME_ADD(TIME(3, 14, 15), INTERVAL 926589 MICROSECOND) AS time_col\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n )\n expected = DataFrame(\n {\n \"time_col\": pandas.Series(\n [datetime.time(3, 14, 15, 926589)], dtype=\"object\"\n )\n },\n )\n tm.assert_frame_equal(df, expected)\n\n def test_should_properly_handle_timestamp_unix_epoch(self, project_id):\n query = 'SELECT TIMESTAMP(\"1970-01-01 00:00:00\") AS unix_epoch'\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n expected = DataFrame(\n {\"unix_epoch\": [\"1970-01-01T00:00:00.000000Z\"]},\n dtype=\"datetime64[ns]\",\n )\n if expected[\"unix_epoch\"].dt.tz is None:\n expected[\"unix_epoch\"] = expected[\"unix_epoch\"].dt.tz_localize(\n \"UTC\"\n )\n tm.assert_frame_equal(df, expected)\n\n def test_should_properly_handle_arbitrary_timestamp(self, project_id):\n query = 'SELECT TIMESTAMP(\"2004-09-15 05:00:00\") AS valid_timestamp'\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n expected = DataFrame(\n {\"valid_timestamp\": [\"2004-09-15T05:00:00.000000Z\"]},\n dtype=\"datetime64[ns]\",\n )\n if expected[\"valid_timestamp\"].dt.tz is None:\n expected[\"valid_timestamp\"] = expected[\n \"valid_timestamp\"\n ].dt.tz_localize(\"UTC\")\n tm.assert_frame_equal(df, expected)\n\n def test_should_properly_handle_datetime_unix_epoch(self, project_id):\n query = 'SELECT DATETIME(\"1970-01-01 00:00:00\") AS unix_epoch'\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n tm.assert_frame_equal(\n df,\n DataFrame(\n {\"unix_epoch\": [\"1970-01-01T00:00:00\"]}, dtype=\"datetime64[ns]\"\n ),\n )\n\n def test_should_properly_handle_arbitrary_datetime(self, project_id):\n query = 'SELECT DATETIME(\"2004-09-15 05:00:00\") AS valid_timestamp'\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n tm.assert_frame_equal(\n df,\n DataFrame(\n {\"valid_timestamp\": [np.datetime64(\"2004-09-15T05:00:00\")]}\n ),\n )\n\n @pytest.mark.parametrize(\n \"expression, is_expected_dtype\",\n [\n (\"current_date()\", pandas.api.types.is_datetime64_ns_dtype),\n (\"current_timestamp()\", pandas.api.types.is_datetime64tz_dtype),\n (\"current_datetime()\", pandas.api.types.is_datetime64_ns_dtype),\n (\"TRUE\", pandas.api.types.is_bool_dtype),\n (\"FALSE\", pandas.api.types.is_bool_dtype),\n ],\n )\n def test_return_correct_types(\n self, project_id, expression, is_expected_dtype\n ):\n \"\"\"\n All type checks can be added to this function using additional\n parameters, rather than creating additional functions.\n We can consolidate the existing functions here in time\n\n TODO: time doesn't currently parse\n (\"time(12,30,00)\", \"<M8[ns]\"),\n \"\"\"\n query = \"SELECT {} AS _\".format(expression)\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"standard\",\n )\n assert is_expected_dtype(df[\"_\"].dtype)\n\n def test_should_properly_handle_null_timestamp(self, project_id):\n query = \"SELECT TIMESTAMP(NULL) AS null_timestamp\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n expected = DataFrame({\"null_timestamp\": [NaT]}, dtype=\"datetime64[ns]\")\n expected[\"null_timestamp\"] = expected[\"null_timestamp\"].dt.tz_localize(\n \"UTC\"\n )\n tm.assert_frame_equal(df, expected)\n\n def test_should_properly_handle_null_datetime(self, project_id):\n query = \"SELECT CAST(NULL AS DATETIME) AS null_datetime\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"standard\",\n )\n tm.assert_frame_equal(df, DataFrame({\"null_datetime\": [NaT]}))\n\n def test_should_properly_handle_null_boolean(self, project_id):\n query = \"SELECT BOOLEAN(NULL) AS null_boolean\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n tm.assert_frame_equal(df, DataFrame({\"null_boolean\": [None]}))\n\n def test_should_properly_handle_nullable_booleans(self, project_id):\n query = \"\"\"SELECT * FROM\n (SELECT BOOLEAN(TRUE) AS nullable_boolean),\n (SELECT NULL AS nullable_boolean)\"\"\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n tm.assert_frame_equal(\n df, DataFrame({\"nullable_boolean\": [True, None]}).astype(object)\n )\n\n def test_unicode_string_conversion_and_normalization(self, project_id):\n correct_test_datatype = DataFrame({\"unicode_string\": [\"éü\"]})\n unicode_string = \"éü\"\n query = 'SELECT \"{0}\" AS unicode_string'.format(unicode_string)\n\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n tm.assert_frame_equal(df, correct_test_datatype)\n\n def test_index_column(self, project_id):\n query = \"SELECT 'a' AS string_1, 'b' AS string_2\"\n result_frame = gbq.read_gbq(\n query,\n project_id=project_id,\n index_col=\"string_1\",\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n correct_frame = DataFrame(\n {\"string_1\": [\"a\"], \"string_2\": [\"b\"]}\n ).set_index(\"string_1\")\n assert result_frame.index.name == correct_frame.index.name\n\n def test_column_order(self, project_id):\n query = \"SELECT 'a' AS string_1, 'b' AS string_2, 'c' AS string_3\"\n col_order = [\"string_3\", \"string_1\", \"string_2\"]\n result_frame = gbq.read_gbq(\n query,\n project_id=project_id,\n col_order=col_order,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n correct_frame = DataFrame(\n {\"string_1\": [\"a\"], \"string_2\": [\"b\"], \"string_3\": [\"c\"]}\n )[col_order]\n tm.assert_frame_equal(result_frame, correct_frame)\n\n def test_read_gbq_raises_invalid_column_order(self, project_id):\n query = \"SELECT 'a' AS string_1, 'b' AS string_2, 'c' AS string_3\"\n col_order = [\"string_aaa\", \"string_1\", \"string_2\"]\n\n # Column string_aaa does not exist. Should raise InvalidColumnOrder\n with pytest.raises(gbq.InvalidColumnOrder):\n gbq.read_gbq(\n query,\n project_id=project_id,\n col_order=col_order,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n\n def test_column_order_plus_index(self, project_id):\n query = \"SELECT 'a' AS string_1, 'b' AS string_2, 'c' AS string_3\"\n col_order = [\"string_3\", \"string_2\"]\n result_frame = gbq.read_gbq(\n query,\n project_id=project_id,\n index_col=\"string_1\",\n col_order=col_order,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n correct_frame = DataFrame(\n {\"string_1\": [\"a\"], \"string_2\": [\"b\"], \"string_3\": [\"c\"]}\n )\n correct_frame.set_index(\"string_1\", inplace=True)\n correct_frame = correct_frame[col_order]\n tm.assert_frame_equal(result_frame, correct_frame)\n\n def test_read_gbq_raises_invalid_index_column(self, project_id):\n query = \"SELECT 'a' AS string_1, 'b' AS string_2, 'c' AS string_3\"\n col_order = [\"string_3\", \"string_2\"]\n\n # Column string_bbb does not exist. Should raise InvalidIndexColumn\n with pytest.raises(gbq.InvalidIndexColumn):\n gbq.read_gbq(\n query,\n project_id=project_id,\n index_col=\"string_bbb\",\n col_order=col_order,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n\n def test_malformed_query(self, project_id):\n with pytest.raises(gbq.GenericGBQException):\n gbq.read_gbq(\n \"SELCET * FORM [publicdata:samples.shakespeare]\",\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n\n def test_bad_project_id(self):\n with pytest.raises(gbq.GenericGBQException):\n gbq.read_gbq(\n \"SELCET * FROM [publicdata:samples.shakespeare]\",\n project_id=\"not-my-project\",\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n\n def test_bad_table_name(self, project_id):\n with pytest.raises(gbq.GenericGBQException):\n gbq.read_gbq(\n \"SELECT * FROM [publicdata:samples.nope]\",\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n\n def test_download_dataset_larger_than_200k_rows(self, project_id):\n test_size = 200005\n # Test for known BigQuery bug in datasets larger than 100k rows\n # http://stackoverflow.com/questions/19145587/bq-py-not-paging-results\n df = gbq.read_gbq(\n \"SELECT id FROM [publicdata:samples.wikipedia] \"\n \"GROUP EACH BY id ORDER BY id ASC LIMIT {0}\".format(test_size),\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n assert len(df.drop_duplicates()) == test_size\n\n def test_ddl(self, random_dataset, project_id):\n # Bug fix for https://github.com/pydata/pandas-gbq/issues/45\n df = gbq.read_gbq(\n \"CREATE OR REPLACE TABLE {}.test_ddl (x INT64)\".format(\n random_dataset.dataset_id\n )\n )\n assert len(df) == 0\n\n def test_ddl_w_max_results(self, random_dataset, project_id):\n df = gbq.read_gbq(\n \"CREATE OR REPLACE TABLE {}.test_ddl (x INT64)\".format(\n random_dataset.dataset_id\n ),\n max_results=0,\n )\n assert df is None\n\n def test_max_results(self, random_dataset, project_id):\n df = gbq.read_gbq(\n \"SELECT * FROM UNNEST(GENERATE_ARRAY(1, 100))\", max_results=10\n )\n assert len(df) == 10\n\n def test_zero_rows(self, project_id):\n # Bug fix for https://github.com/pandas-dev/pandas/issues/10273\n df = gbq.read_gbq(\n 'SELECT name, number, (mlc_class = \"HU\") is_hurricane, iso_time '\n \"FROM `bigquery-public-data.noaa_hurricanes.hurricanes` \"\n 'WHERE iso_time = TIMESTAMP(\"1900-01-01 00:00:00\") ',\n project_id=project_id,\n credentials=self.credentials,\n )\n empty_columns = {\n \"name\": pandas.Series([], dtype=object),\n \"number\": pandas.Series([], dtype=np.dtype(int)),\n \"is_hurricane\": pandas.Series([], dtype=np.dtype(bool)),\n \"iso_time\": pandas.Series([], dtype=\"datetime64[ns]\"),\n }\n expected_result = DataFrame(\n empty_columns,\n columns=[\"name\", \"number\", \"is_hurricane\", \"iso_time\"],\n )\n tm.assert_frame_equal(df, expected_result, check_index_type=False)\n\n def test_one_row_one_column(self, project_id):\n df = gbq.read_gbq(\n \"SELECT 3 as v\",\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"standard\",\n )\n expected_result = DataFrame(dict(v=[3]))\n tm.assert_frame_equal(df, expected_result)\n\n def test_legacy_sql(self, project_id):\n legacy_sql = \"SELECT id FROM [publicdata.samples.wikipedia] LIMIT 10\"\n\n # Test that a legacy sql statement fails when\n # setting dialect='standard'\n with pytest.raises(gbq.GenericGBQException):\n gbq.read_gbq(\n legacy_sql,\n project_id=project_id,\n dialect=\"standard\",\n credentials=self.credentials,\n )\n\n # Test that a legacy sql statement succeeds when\n # setting dialect='legacy'\n df = gbq.read_gbq(\n legacy_sql,\n project_id=project_id,\n dialect=\"legacy\",\n credentials=self.credentials,\n )\n assert len(df.drop_duplicates()) == 10\n\n def test_standard_sql(self, project_id):\n standard_sql = (\n \"SELECT DISTINCT id FROM \"\n \"`publicdata.samples.wikipedia` LIMIT 10\"\n )\n\n # Test that a standard sql statement fails when using\n # the legacy SQL dialect.\n with pytest.raises(gbq.GenericGBQException):\n gbq.read_gbq(\n standard_sql,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n\n # Test that a standard sql statement succeeds when\n # setting dialect='standard'\n df = gbq.read_gbq(\n standard_sql,\n project_id=project_id,\n dialect=\"standard\",\n credentials=self.credentials,\n )\n assert len(df.drop_duplicates()) == 10\n\n def test_query_with_parameters(self, project_id):\n sql_statement = \"SELECT @param1 + @param2 AS valid_result\"\n config = {\n \"query\": {\n \"useLegacySql\": False,\n \"parameterMode\": \"named\",\n \"queryParameters\": [\n {\n \"name\": \"param1\",\n \"parameterType\": {\"type\": \"INTEGER\"},\n \"parameterValue\": {\"value\": 1},\n },\n {\n \"name\": \"param2\",\n \"parameterType\": {\"type\": \"INTEGER\"},\n \"parameterValue\": {\"value\": 2},\n },\n ],\n }\n }\n # Test that a query that relies on parameters fails\n # when parameters are not supplied via configuration\n with pytest.raises(ValueError):\n gbq.read_gbq(\n sql_statement,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n\n # Test that the query is successful because we have supplied\n # the correct query parameters via the 'config' option\n df = gbq.read_gbq(\n sql_statement,\n project_id=project_id,\n credentials=self.credentials,\n configuration=config,\n dialect=\"legacy\",\n )\n tm.assert_frame_equal(df, DataFrame({\"valid_result\": [3]}))\n\n def test_query_inside_configuration(self, project_id):\n query_no_use = 'SELECT \"PI_WRONG\" AS valid_string'\n query = 'SELECT \"PI\" AS valid_string'\n config = {\"query\": {\"query\": query, \"useQueryCache\": False}}\n # Test that it can't pass query both\n # inside config and as parameter\n with pytest.raises(ValueError):\n gbq.read_gbq(\n query_no_use,\n project_id=project_id,\n credentials=self.credentials,\n configuration=config,\n dialect=\"legacy\",\n )\n\n df = gbq.read_gbq(\n None,\n project_id=project_id,\n credentials=self.credentials,\n configuration=config,\n dialect=\"legacy\",\n )\n tm.assert_frame_equal(df, DataFrame({\"valid_string\": [\"PI\"]}))\n\n def test_configuration_without_query(self, project_id):\n sql_statement = \"SELECT 1\"\n config = {\n \"copy\": {\n \"sourceTable\": {\n \"projectId\": project_id,\n \"datasetId\": \"publicdata:samples\",\n \"tableId\": \"wikipedia\",\n },\n \"destinationTable\": {\n \"projectId\": project_id,\n \"datasetId\": \"publicdata:samples\",\n \"tableId\": \"wikipedia_copied\",\n },\n }\n }\n # Test that only 'query' configurations are supported\n # nor 'copy','load','extract'\n with pytest.raises(ValueError):\n gbq.read_gbq(\n sql_statement,\n project_id=project_id,\n credentials=self.credentials,\n configuration=config,\n dialect=\"legacy\",\n )\n\n def test_configuration_raises_value_error_with_multiple_config(\n self, project_id\n ):\n sql_statement = \"SELECT 1\"\n config = {\n \"query\": {\"query\": sql_statement, \"useQueryCache\": False},\n \"load\": {\"query\": sql_statement, \"useQueryCache\": False},\n }\n # Test that only ValueError is raised with multiple configurations\n with pytest.raises(ValueError):\n gbq.read_gbq(\n sql_statement,\n project_id=project_id,\n credentials=self.credentials,\n configuration=config,\n dialect=\"legacy\",\n )\n\n def test_timeout_configuration(self, project_id):\n sql_statement = \"\"\"\n SELECT\n SUM(bottles_sold) total_bottles,\n UPPER(category_name) category_name,\n magnitude,\n liquor.zip_code zip_code\n FROM `bigquery-public-data.iowa_liquor_sales.sales` liquor\n JOIN `bigquery-public-data.geo_us_boundaries.zip_codes` zip_codes\n ON liquor.zip_code = zip_codes.zip_code\n JOIN `bigquery-public-data.noaa_historic_severe_storms.tornado_paths` tornados\n ON liquor.date = tornados.storm_date\n WHERE ST_INTERSECTS(tornado_path_geom, zip_code_geom)\n GROUP BY category_name, magnitude, zip_code\n ORDER BY magnitude ASC, total_bottles DESC\n \"\"\"\n configs = [\n {\"query\": {\"useQueryCache\": False, \"timeoutMs\": 1}},\n {\"query\": {\"useQueryCache\": False}, \"jobTimeoutMs\": 1},\n ]\n for config in configs:\n with pytest.raises(gbq.QueryTimeout):\n gbq.read_gbq(\n sql_statement,\n project_id=project_id,\n credentials=self.credentials,\n configuration=config,\n )\n\n def test_query_response_bytes(self):\n assert self.gbq_connector.sizeof_fmt(999) == \"999.0 B\"\n assert self.gbq_connector.sizeof_fmt(1024) == \"1.0 KB\"\n assert self.gbq_connector.sizeof_fmt(1099) == \"1.1 KB\"\n assert self.gbq_connector.sizeof_fmt(1044480) == \"1020.0 KB\"\n assert self.gbq_connector.sizeof_fmt(1048576) == \"1.0 MB\"\n assert self.gbq_connector.sizeof_fmt(1048576000) == \"1000.0 MB\"\n assert self.gbq_connector.sizeof_fmt(1073741824) == \"1.0 GB\"\n assert self.gbq_connector.sizeof_fmt(1.099512e12) == \"1.0 TB\"\n assert self.gbq_connector.sizeof_fmt(1.125900e15) == \"1.0 PB\"\n assert self.gbq_connector.sizeof_fmt(1.152922e18) == \"1.0 EB\"\n assert self.gbq_connector.sizeof_fmt(1.180592e21) == \"1.0 ZB\"\n assert self.gbq_connector.sizeof_fmt(1.208926e24) == \"1.0 YB\"\n assert self.gbq_connector.sizeof_fmt(1.208926e28) == \"10000.0 YB\"\n\n def test_struct(self, project_id):\n query = \"\"\"SELECT 1 int_field,\n STRUCT(\"a\" as letter, 1 as num) struct_field\"\"\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"standard\",\n )\n expected = DataFrame(\n [[1, {\"letter\": \"a\", \"num\": 1}]],\n columns=[\"int_field\", \"struct_field\"],\n )\n tm.assert_frame_equal(df, expected)\n\n def test_array(self, project_id):\n query = \"\"\"select [\"a\",\"x\",\"b\",\"y\",\"c\",\"z\"] as letters\"\"\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"standard\",\n )\n tm.assert_frame_equal(\n df,\n DataFrame([[[\"a\", \"x\", \"b\", \"y\", \"c\", \"z\"]]], columns=[\"letters\"]),\n )\n\n def test_array_length_zero(self, project_id):\n query = \"\"\"WITH t as (\n SELECT \"a\" letter, [\"\"] as array_field\n UNION ALL\n SELECT \"b\" letter, [] as array_field)\n\n select letter, array_field, array_length(array_field) len\n from t\n order by letter ASC\"\"\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"standard\",\n )\n expected = DataFrame(\n [[\"a\", [\"\"], 1], [\"b\", [], 0]],\n columns=[\"letter\", \"array_field\", \"len\"],\n )\n tm.assert_frame_equal(df, expected)\n\n def test_array_agg(self, project_id):\n query = \"\"\"WITH t as (\n SELECT \"a\" letter, 1 num\n UNION ALL\n SELECT \"b\" letter, 2 num\n UNION ALL\n SELECT \"a\" letter, 3 num)\n\n select letter, array_agg(num order by num ASC) numbers\n from t\n group by letter\n order by letter ASC\"\"\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"standard\",\n )\n tm.assert_frame_equal(\n df,\n DataFrame(\n [[\"a\", [1, 3]], [\"b\", [2]]], columns=[\"letter\", \"numbers\"]\n ),\n )\n\n def test_array_of_floats(self, project_id):\n query = \"\"\"select [1.1, 2.2, 3.3] as a, 4 as b\"\"\"\n df = gbq.read_gbq(\n query,\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"standard\",\n )\n tm.assert_frame_equal(\n df, DataFrame([[[1.1, 2.2, 3.3], 4]], columns=[\"a\", \"b\"])\n )\n\n def test_tokyo(self, tokyo_dataset, tokyo_table, project_id):\n df = gbq.read_gbq(\n \"SELECT MAX(year) AS max_year FROM {}.{}\".format(\n tokyo_dataset, tokyo_table\n ),\n dialect=\"standard\",\n location=\"asia-northeast1\",\n project_id=project_id,\n credentials=self.credentials,\n )\n assert df[\"max_year\"][0] >= 2000\n\n\nclass TestToGBQIntegration(object):\n @pytest.fixture(autouse=True, scope=\"function\")\n def setup(self, project, credentials, random_dataset_id):\n # - PER-TEST FIXTURES -\n # put here any instruction you want to be run *BEFORE* *EVERY* test is\n # executed.\n self.credentials = credentials\n self.gbq_connector = gbq.GbqConnector(project, credentials=credentials)\n self.bqclient = self.gbq_connector.client\n self.table = gbq._Table(\n project, random_dataset_id, credentials=credentials\n )\n self.destination_table = \"{}.{}\".format(random_dataset_id, TABLE_ID)\n\n def test_upload_data(self, project_id):\n test_id = \"1\"\n test_size = 20001\n df = make_mixed_dataframe_v2(test_size)\n\n gbq.to_gbq(\n df,\n self.destination_table + test_id,\n project_id,\n chunksize=10000,\n credentials=self.credentials,\n )\n\n result = gbq.read_gbq(\n \"SELECT COUNT(*) AS num_rows FROM {0}\".format(\n self.destination_table + test_id\n ),\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n assert result[\"num_rows\"][0] == test_size\n\n def test_upload_empty_data(self, project_id):\n test_id = \"data_with_0_rows\"\n df = DataFrame()\n\n gbq.to_gbq(\n df,\n self.destination_table + test_id,\n project_id,\n credentials=self.credentials,\n )\n\n table = self.bqclient.get_table(self.destination_table + test_id)\n assert table.num_rows == 0\n assert len(table.schema) == 0\n\n def test_upload_empty_data_with_schema(self, project_id):\n test_id = \"data_with_0_rows\"\n df = DataFrame(\n {\n \"a\": pandas.Series(dtype=\"int64\"),\n \"b\": pandas.Series(dtype=\"object\"),\n }\n )\n\n gbq.to_gbq(\n df,\n self.destination_table + test_id,\n project_id,\n credentials=self.credentials,\n )\n\n table = self.bqclient.get_table(self.destination_table + test_id)\n assert table.num_rows == 0\n schema = table.schema\n assert schema[0].field_type == \"INTEGER\"\n assert schema[1].field_type == \"STRING\"\n\n def test_upload_data_if_table_exists_fail(self, project_id):\n test_id = \"2\"\n test_size = 10\n df = make_mixed_dataframe_v2(test_size)\n self.table.create(TABLE_ID + test_id, gbq._generate_bq_schema(df))\n\n # Test the default value of if_exists is 'fail'\n with pytest.raises(gbq.TableCreationError):\n gbq.to_gbq(\n df,\n self.destination_table + test_id,\n project_id,\n credentials=self.credentials,\n )\n\n # Test the if_exists parameter with value 'fail'\n with pytest.raises(gbq.TableCreationError):\n gbq.to_gbq(\n df,\n self.destination_table + test_id,\n project_id,\n if_exists=\"fail\",\n credentials=self.credentials,\n )\n\n def test_upload_data_if_table_exists_append(self, project_id):\n test_id = \"3\"\n test_size = 10\n df = make_mixed_dataframe_v2(test_size)\n df_different_schema = tm.makeMixedDataFrame()\n\n # Initialize table with sample data\n gbq.to_gbq(\n df,\n self.destination_table + test_id,\n project_id,\n chunksize=10000,\n credentials=self.credentials,\n )\n\n # Test the if_exists parameter with value 'append'\n gbq.to_gbq(\n df,\n self.destination_table + test_id,\n project_id,\n if_exists=\"append\",\n credentials=self.credentials,\n )\n\n result = gbq.read_gbq(\n \"SELECT COUNT(*) AS num_rows FROM {0}\".format(\n self.destination_table + test_id\n ),\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n assert result[\"num_rows\"][0] == test_size * 2\n\n # Try inserting with a different schema, confirm failure\n with pytest.raises(gbq.InvalidSchema):\n gbq.to_gbq(\n df_different_schema,\n self.destination_table + test_id,\n project_id,\n if_exists=\"append\",\n credentials=self.credentials,\n )\n\n def test_upload_subset_columns_if_table_exists_append(self, project_id):\n # Issue 24: Upload is succesful if dataframe has columns\n # which are a subset of the current schema\n test_id = \"16\"\n test_size = 10\n df = make_mixed_dataframe_v2(test_size)\n df_subset_cols = df.iloc[:, :2]\n\n # Initialize table with sample data\n gbq.to_gbq(\n df,\n self.destination_table + test_id,\n project_id,\n chunksize=10000,\n credentials=self.credentials,\n )\n\n # Test the if_exists parameter with value 'append'\n gbq.to_gbq(\n df_subset_cols,\n self.destination_table + test_id,\n project_id,\n if_exists=\"append\",\n credentials=self.credentials,\n )\n\n result = gbq.read_gbq(\n \"SELECT COUNT(*) AS num_rows FROM {0}\".format(\n self.destination_table + test_id\n ),\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n assert result[\"num_rows\"][0] == test_size * 2\n\n def test_upload_data_if_table_exists_replace(self, project_id):\n test_id = \"4\"\n test_size = 10\n df = make_mixed_dataframe_v2(test_size)\n df_different_schema = tm.makeMixedDataFrame()\n\n # Initialize table with sample data\n gbq.to_gbq(\n df,\n self.destination_table + test_id,\n project_id,\n chunksize=10000,\n credentials=self.credentials,\n )\n\n # Test the if_exists parameter with the value 'replace'.\n gbq.to_gbq(\n df_different_schema,\n self.destination_table + test_id,\n project_id,\n if_exists=\"replace\",\n credentials=self.credentials,\n )\n\n result = gbq.read_gbq(\n \"SELECT COUNT(*) AS num_rows FROM {0}\".format(\n self.destination_table + test_id\n ),\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n assert result[\"num_rows\"][0] == 5\n\n def test_upload_data_if_table_exists_raises_value_error(self, project_id):\n test_id = \"4\"\n test_size = 10\n df = make_mixed_dataframe_v2(test_size)\n\n # Test invalid value for if_exists parameter raises value error\n with pytest.raises(ValueError):\n gbq.to_gbq(\n df,\n self.destination_table + test_id,\n project_id,\n if_exists=\"xxxxx\",\n credentials=self.credentials,\n )\n\n def test_google_upload_errors_should_raise_exception(self, project_id):\n raise pytest.skip(\"buggy test\")\n\n test_id = \"5\"\n test_timestamp = datetime.datetime.now(pytz.timezone(\"US/Arizona\"))\n bad_df = DataFrame(\n {\n \"bools\": [False, False],\n \"flts\": [0.0, 1.0],\n \"ints\": [0, \"1\"],\n \"strs\": [\"a\", 1],\n \"times\": [test_timestamp, test_timestamp],\n },\n index=range(2),\n )\n\n with pytest.raises(gbq.StreamingInsertError):\n gbq.to_gbq(\n bad_df,\n self.destination_table + test_id,\n project_id,\n credentials=self.credentials,\n )\n\n def test_upload_chinese_unicode_data(self, project_id):\n test_id = \"2\"\n test_size = 6\n df = DataFrame(\n np.random.randn(6, 4), index=range(6), columns=list(\"ABCD\")\n )\n df[\"s\"] = u\"信用卡\"\n\n gbq.to_gbq(\n df,\n self.destination_table + test_id,\n project_id,\n credentials=self.credentials,\n chunksize=10000,\n )\n\n result_df = gbq.read_gbq(\n \"SELECT * FROM {0}\".format(self.destination_table + test_id),\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n\n assert len(result_df) == test_size\n\n if sys.version_info.major < 3:\n pytest.skip(msg=\"Unicode comparison in Py2 not working\")\n\n result = result_df[\"s\"].sort_values()\n expected = df[\"s\"].sort_values()\n\n tm.assert_numpy_array_equal(expected.values, result.values)\n\n def test_upload_other_unicode_data(self, project_id):\n test_id = \"3\"\n test_size = 3\n df = DataFrame(\n {\n \"s\": [\"Skywalker™\", \"lego\", \"hülle\"],\n \"i\": [200, 300, 400],\n \"d\": [\n \"2017-12-13 17:40:39\",\n \"2017-12-13 17:40:39\",\n \"2017-12-13 17:40:39\",\n ],\n }\n )\n\n gbq.to_gbq(\n df,\n self.destination_table + test_id,\n project_id=project_id,\n credentials=self.credentials,\n chunksize=10000,\n )\n\n result_df = gbq.read_gbq(\n \"SELECT * FROM {0}\".format(self.destination_table + test_id),\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n\n assert len(result_df) == test_size\n\n if sys.version_info.major < 3:\n pytest.skip(msg=\"Unicode comparison in Py2 not working\")\n\n result = result_df[\"s\"].sort_values()\n expected = df[\"s\"].sort_values()\n\n tm.assert_numpy_array_equal(expected.values, result.values)\n\n def test_upload_mixed_float_and_int(self, project_id):\n \"\"\"Test that we can upload a dataframe containing an int64 and float64 column.\n See: https://github.com/pydata/pandas-gbq/issues/116\n \"\"\"\n test_id = \"mixed_float_and_int\"\n test_size = 2\n df = DataFrame(\n [[1, 1.1], [2, 2.2]],\n index=[\"row 1\", \"row 2\"],\n columns=[\"intColumn\", \"floatColumn\"],\n )\n\n gbq.to_gbq(\n df,\n self.destination_table + test_id,\n project_id=project_id,\n credentials=self.credentials,\n )\n\n result_df = gbq.read_gbq(\n \"SELECT * FROM {0}\".format(self.destination_table + test_id),\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n\n assert len(result_df) == test_size\n\n def test_upload_data_with_newlines(self, project_id):\n test_id = \"data_with_newlines\"\n test_size = 2\n df = DataFrame({\"s\": [\"abcd\", \"ef\\ngh\"]})\n\n gbq.to_gbq(\n df,\n self.destination_table + test_id,\n project_id=project_id,\n credentials=self.credentials,\n )\n\n result_df = gbq.read_gbq(\n \"SELECT * FROM {0}\".format(self.destination_table + test_id),\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n\n assert len(result_df) == test_size\n\n if sys.version_info.major < 3:\n pytest.skip(msg=\"Unicode comparison in Py2 not working\")\n\n result = result_df[\"s\"].sort_values()\n expected = df[\"s\"].sort_values()\n\n tm.assert_numpy_array_equal(expected.values, result.values)\n\n def test_upload_data_flexible_column_order(self, project_id):\n test_id = \"13\"\n test_size = 10\n df = make_mixed_dataframe_v2(test_size)\n\n # Initialize table with sample data\n gbq.to_gbq(\n df,\n self.destination_table + test_id,\n project_id,\n chunksize=10000,\n credentials=self.credentials,\n )\n\n df_columns_reversed = df[df.columns[::-1]]\n\n gbq.to_gbq(\n df_columns_reversed,\n self.destination_table + test_id,\n project_id,\n if_exists=\"append\",\n credentials=self.credentials,\n )\n\n def test_upload_data_with_valid_user_schema(self, project_id):\n # Issue #46; tests test scenarios with user-provided\n # schemas\n df = tm.makeMixedDataFrame()\n test_id = \"18\"\n test_schema = [\n {\"name\": \"A\", \"type\": \"FLOAT\"},\n {\"name\": \"B\", \"type\": \"FLOAT\"},\n {\"name\": \"C\", \"type\": \"STRING\"},\n {\"name\": \"D\", \"type\": \"TIMESTAMP\"},\n ]\n destination_table = self.destination_table + test_id\n gbq.to_gbq(\n df,\n destination_table,\n project_id,\n credentials=self.credentials,\n table_schema=test_schema,\n )\n dataset, table = destination_table.split(\".\")\n assert verify_schema(\n self.gbq_connector, dataset, table, dict(fields=test_schema)\n )\n\n def test_upload_data_with_invalid_user_schema_raises_error(\n self, project_id\n ):\n df = tm.makeMixedDataFrame()\n test_id = \"19\"\n test_schema = [\n {\"name\": \"A\", \"type\": \"FLOAT\"},\n {\"name\": \"B\", \"type\": \"FLOAT\"},\n {\"name\": \"C\", \"type\": \"FLOAT\"},\n {\"name\": \"D\", \"type\": \"FLOAT\"},\n ]\n destination_table = self.destination_table + test_id\n with pytest.raises(gbq.GenericGBQException):\n gbq.to_gbq(\n df,\n destination_table,\n project_id,\n credentials=self.credentials,\n table_schema=test_schema,\n )\n\n def test_upload_data_with_missing_schema_fields_raises_error(\n self, project_id\n ):\n df = tm.makeMixedDataFrame()\n test_id = \"20\"\n test_schema = [\n {\"name\": \"A\", \"type\": \"FLOAT\"},\n {\"name\": \"B\", \"type\": \"FLOAT\"},\n {\"name\": \"C\", \"type\": \"FLOAT\"},\n ]\n destination_table = self.destination_table + test_id\n with pytest.raises(gbq.GenericGBQException):\n gbq.to_gbq(\n df,\n destination_table,\n project_id,\n credentials=self.credentials,\n table_schema=test_schema,\n )\n\n def test_upload_data_with_timestamp(self, project_id):\n test_id = \"21\"\n test_size = 6\n df = DataFrame(\n np.random.randn(test_size, 4),\n index=range(test_size),\n columns=list(\"ABCD\"),\n )\n df[\"times\"] = pandas.Series(\n [\n \"2018-03-13T05:40:45.348318\",\n \"2018-04-13T05:40:45.348318\",\n \"2018-05-13T05:40:45.348318\",\n \"2018-06-13T05:40:45.348318\",\n \"2018-07-13T05:40:45.348318\",\n \"2018-08-13T05:40:45.348318\",\n ],\n dtype=\"datetime64[ns]\",\n ).dt.tz_localize(\"UTC\")\n\n gbq.to_gbq(\n df,\n self.destination_table + test_id,\n project_id=project_id,\n credentials=self.credentials,\n )\n\n result_df = gbq.read_gbq(\n \"SELECT * FROM {0}\".format(self.destination_table + test_id),\n project_id=project_id,\n credentials=self.credentials,\n dialect=\"legacy\",\n )\n\n assert len(result_df) == test_size\n\n expected = df[\"times\"].sort_values()\n result = result_df[\"times\"].sort_values()\n tm.assert_series_equal(expected, result)\n\n def test_upload_data_with_different_df_and_user_schema(self, project_id):\n df = tm.makeMixedDataFrame()\n df[\"A\"] = df[\"A\"].astype(str)\n df[\"B\"] = df[\"B\"].astype(str)\n test_id = \"22\"\n test_schema = [\n {\"name\": \"A\", \"type\": \"FLOAT\"},\n {\"name\": \"B\", \"type\": \"FLOAT\"},\n {\"name\": \"C\", \"type\": \"STRING\"},\n {\"name\": \"D\", \"type\": \"TIMESTAMP\"},\n ]\n destination_table = self.destination_table + test_id\n gbq.to_gbq(\n df,\n destination_table,\n project_id,\n credentials=self.credentials,\n table_schema=test_schema,\n )\n dataset, table = destination_table.split(\".\")\n assert verify_schema(\n self.gbq_connector, dataset, table, dict(fields=test_schema)\n )\n\n def test_upload_data_tokyo(\n self, project_id, tokyo_dataset, bigquery_client\n ):\n from google.cloud import bigquery\n\n test_size = 10\n df = make_mixed_dataframe_v2(test_size)\n tokyo_destination = \"{}.to_gbq_test\".format(tokyo_dataset)\n\n # Initialize table with sample data\n gbq.to_gbq(\n df,\n tokyo_destination,\n project_id,\n credentials=self.credentials,\n location=\"asia-northeast1\",\n )\n\n table = bigquery_client.get_table(\n bigquery.TableReference(\n bigquery.DatasetReference(project_id, tokyo_dataset),\n \"to_gbq_test\",\n )\n )\n assert table.num_rows > 0\n\n def test_upload_data_tokyo_non_existing_dataset(\n self, project_id, random_dataset_id, bigquery_client\n ):\n from google.cloud import bigquery\n\n test_size = 10\n df = make_mixed_dataframe_v2(test_size)\n non_existing_tokyo_dataset = random_dataset_id\n non_existing_tokyo_destination = \"{}.to_gbq_test\".format(\n non_existing_tokyo_dataset\n )\n\n # Initialize table with sample data\n gbq.to_gbq(\n df,\n non_existing_tokyo_destination,\n project_id,\n credentials=self.credentials,\n location=\"asia-northeast1\",\n )\n\n table = bigquery_client.get_table(\n bigquery.TableReference(\n bigquery.DatasetReference(\n project_id, non_existing_tokyo_dataset\n ),\n \"to_gbq_test\",\n )\n )\n assert table.num_rows > 0\n\n\n# _Dataset tests\n\n\ndef test_create_dataset(\n bigquery_client, gbq_dataset, random_dataset_id, project_id\n):\n from google.cloud import bigquery\n\n gbq_dataset.create(random_dataset_id)\n dataset_reference = bigquery.DatasetReference(\n project_id, random_dataset_id\n )\n assert bigquery_client.get_dataset(dataset_reference) is not None\n\n\ndef test_create_dataset_already_exists(gbq_dataset, random_dataset_id):\n gbq_dataset.create(random_dataset_id)\n with pytest.raises(gbq.DatasetCreationError):\n gbq_dataset.create(random_dataset_id)\n\n\ndef test_dataset_exists(gbq_dataset, random_dataset_id):\n gbq_dataset.create(random_dataset_id)\n assert gbq_dataset.exists(random_dataset_id)\n\n\ndef test_dataset_does_not_exist(gbq_dataset, random_dataset_id):\n assert not gbq_dataset.exists(random_dataset_id)\n\n\n# _Table tests\n\n\ndef test_create_table(gbq_table):\n schema = gbq._generate_bq_schema(tm.makeMixedDataFrame())\n gbq_table.create(\"test_create_table\", schema)\n assert gbq_table.exists(\"test_create_table\")\n\n\ndef test_create_table_already_exists(gbq_table):\n schema = gbq._generate_bq_schema(tm.makeMixedDataFrame())\n gbq_table.create(\"test_create_table_exists\", schema)\n with pytest.raises(gbq.TableCreationError):\n gbq_table.create(\"test_create_table_exists\", schema)\n\n\ndef test_table_does_not_exist(gbq_table):\n assert not gbq_table.exists(\"test_table_does_not_exist\")\n\n\ndef test_delete_table(gbq_table):\n test_schema = {\n \"fields\": [\n {\"name\": \"A\", \"type\": \"FLOAT\"},\n {\"name\": \"B\", \"type\": \"FLOAT\"},\n {\"name\": \"C\", \"type\": \"STRING\"},\n {\"name\": \"D\", \"type\": \"TIMESTAMP\"},\n ]\n }\n gbq_table.create(\"test_delete_table\", test_schema)\n gbq_table.delete(\"test_delete_table\")\n assert not gbq_table.exists(\"test_delete_table\")\n\n\ndef test_delete_table_not_found(gbq_table):\n with pytest.raises(gbq.NotFoundException):\n gbq_table.delete(\"test_delete_table_not_found\")\n\n\ndef test_create_table_data_dataset_does_not_exist(\n project, credentials, gbq_dataset, random_dataset_id\n):\n table_id = \"test_create_table_data_dataset_does_not_exist\"\n table_with_new_dataset = gbq._Table(\n project, random_dataset_id, credentials=credentials\n )\n df = make_mixed_dataframe_v2(10)\n table_with_new_dataset.create(table_id, gbq._generate_bq_schema(df))\n assert gbq_dataset.exists(random_dataset_id)\n assert table_with_new_dataset.exists(table_id)\n\n\ndef test_verify_schema_allows_flexible_column_order(gbq_table, gbq_connector):\n table_id = \"test_verify_schema_allows_flexible_column_order\"\n test_schema_1 = {\n \"fields\": [\n {\"name\": \"A\", \"type\": \"FLOAT\"},\n {\"name\": \"B\", \"type\": \"FLOAT\"},\n {\"name\": \"C\", \"type\": \"STRING\"},\n {\"name\": \"D\", \"type\": \"TIMESTAMP\"},\n ]\n }\n test_schema_2 = {\n \"fields\": [\n {\"name\": \"A\", \"type\": \"FLOAT\"},\n {\"name\": \"C\", \"type\": \"STRING\"},\n {\"name\": \"B\", \"type\": \"FLOAT\"},\n {\"name\": \"D\", \"type\": \"TIMESTAMP\"},\n ]\n }\n\n gbq_table.create(table_id, test_schema_1)\n assert verify_schema(\n gbq_connector, gbq_table.dataset_id, table_id, test_schema_2\n )\n\n\ndef test_verify_schema_fails_different_data_type(gbq_table, gbq_connector):\n table_id = \"test_verify_schema_fails_different_data_type\"\n test_schema_1 = {\n \"fields\": [\n {\"name\": \"A\", \"type\": \"FLOAT\"},\n {\"name\": \"B\", \"type\": \"FLOAT\"},\n {\"name\": \"C\", \"type\": \"STRING\"},\n {\"name\": \"D\", \"type\": \"TIMESTAMP\"},\n ]\n }\n test_schema_2 = {\n \"fields\": [\n {\"name\": \"A\", \"type\": \"FLOAT\"},\n {\"name\": \"B\", \"type\": \"STRING\"},\n {\"name\": \"C\", \"type\": \"STRING\"},\n {\"name\": \"D\", \"type\": \"TIMESTAMP\"},\n ]\n }\n\n gbq_table.create(table_id, test_schema_1)\n assert not verify_schema(\n gbq_connector, gbq_table.dataset_id, table_id, test_schema_2\n )\n\n\ndef test_verify_schema_fails_different_structure(gbq_table, gbq_connector):\n table_id = \"test_verify_schema_fails_different_structure\"\n test_schema_1 = {\n \"fields\": [\n {\"name\": \"A\", \"type\": \"FLOAT\"},\n {\"name\": \"B\", \"type\": \"FLOAT\"},\n {\"name\": \"C\", \"type\": \"STRING\"},\n {\"name\": \"D\", \"type\": \"TIMESTAMP\"},\n ]\n }\n test_schema_2 = {\n \"fields\": [\n {\"name\": \"A\", \"type\": \"FLOAT\"},\n {\"name\": \"B2\", \"type\": \"FLOAT\"},\n {\"name\": \"C\", \"type\": \"STRING\"},\n {\"name\": \"D\", \"type\": \"TIMESTAMP\"},\n ]\n }\n\n gbq_table.create(table_id, test_schema_1)\n assert not verify_schema(\n gbq_connector, gbq_table.dataset_id, table_id, test_schema_2\n )\n\n\ndef test_verify_schema_ignores_field_mode(gbq_table, gbq_connector):\n table_id = \"test_verify_schema_ignores_field_mode\"\n test_schema_1 = {\n \"fields\": [\n {\"name\": \"A\", \"type\": \"FLOAT\", \"mode\": \"NULLABLE\"},\n {\"name\": \"B\", \"type\": \"FLOAT\", \"mode\": \"NULLABLE\"},\n {\"name\": \"C\", \"type\": \"STRING\", \"mode\": \"NULLABLE\"},\n {\"name\": \"D\", \"type\": \"TIMESTAMP\", \"mode\": \"REQUIRED\"},\n ]\n }\n test_schema_2 = {\n \"fields\": [\n {\"name\": \"A\", \"type\": \"FLOAT\"},\n {\"name\": \"B\", \"type\": \"FLOAT\"},\n {\"name\": \"C\", \"type\": \"STRING\"},\n {\"name\": \"D\", \"type\": \"TIMESTAMP\"},\n ]\n }\n\n gbq_table.create(table_id, test_schema_1)\n assert verify_schema(\n gbq_connector, gbq_table.dataset_id, table_id, test_schema_2\n )\n\n\ndef test_retrieve_schema(gbq_table, gbq_connector):\n # Issue #24 schema function returns the schema in biquery\n table_id = \"test_retrieve_schema\"\n test_schema = {\n \"fields\": [\n {\n \"name\": \"A\",\n \"type\": \"FLOAT\",\n \"mode\": \"NULLABLE\",\n \"description\": None,\n },\n {\n \"name\": \"B\",\n \"type\": \"FLOAT\",\n \"mode\": \"NULLABLE\",\n \"description\": None,\n },\n {\n \"name\": \"C\",\n \"type\": \"STRING\",\n \"mode\": \"NULLABLE\",\n \"description\": None,\n },\n {\n \"name\": \"D\",\n \"type\": \"TIMESTAMP\",\n \"mode\": \"NULLABLE\",\n \"description\": None,\n },\n ]\n }\n\n gbq_table.create(table_id, test_schema)\n expected = [\n {\"name\": \"A\", \"type\": \"FLOAT\"},\n {\"name\": \"B\", \"type\": \"FLOAT\"},\n {\"name\": \"C\", \"type\": \"STRING\"},\n {\"name\": \"D\", \"type\": \"TIMESTAMP\"},\n ]\n assert verify_schema(\n gbq_connector, gbq_table.dataset_id, table_id, {\"fields\": expected}\n )\n\n\ndef test_to_gbq_does_not_override_mode(gbq_table, gbq_connector):\n # See: https://github.com/pydata/pandas-gbq/issues/315\n table_id = \"test_to_gbq_does_not_override_mode\"\n table_schema = {\n \"fields\": [\n {\n \"mode\": \"REQUIRED\",\n \"name\": \"A\",\n \"type\": \"FLOAT\",\n \"description\": \"A\",\n },\n {\n \"mode\": \"NULLABLE\",\n \"name\": \"B\",\n \"type\": \"FLOAT\",\n \"description\": \"B\",\n },\n {\n \"mode\": \"NULLABLE\",\n \"name\": \"C\",\n \"type\": \"STRING\",\n \"description\": \"C\",\n },\n ]\n }\n\n gbq_table.create(table_id, table_schema)\n gbq.to_gbq(\n pandas.DataFrame({\"A\": [1.0], \"B\": [2.0], \"C\": [\"a\"]}),\n \"{0}.{1}\".format(gbq_table.dataset_id, table_id),\n project_id=gbq_connector.project_id,\n if_exists=\"append\",\n )\n\n assert verify_schema(\n gbq_connector, gbq_table.dataset_id, table_id, table_schema\n )\n" ]
[ [ "pandas.util.testing.assert_numpy_array_equal", "pandas.util.testing.assert_frame_equal", "pandas.DataFrame", "numpy.random.randn", "pandas.util.testing.makeMixedDataFrame", "pandas.util.testing.assert_series_equal", "numpy.random.randint", "pandas.Series", "numpy.dtype", "numpy.datetime64" ] ]
ariashahverdi/DC2019
[ "6078c2a337b5c30d67cf63cf16a5434b51ee8d37" ]
[ "Aria/main.py" ]
[ "import numpy as np\nimport pandas as pd\n\ndf = pd.read_csv('../sbdc_data.csv')\n\n\nprint(df[\"Total Counseling Time, hrs\"].mean())\n\n\n# Counting Number of NonZero in each column\nprint(df.astype(bool).sum(axis=0))\n\n# Counting Whether They started business or Not\ndf_start_bs = df[df[\"Impact: Started Business\"]=='Yes'] #1667\n#print(df_start_bs)\ndf_start_bs_rev_inc = df_start_bs[df_start_bs[\"Impact: Revenue Increase\"]>0]\n#print(df_start_bs_rev_inc) #446\n\n\n\n\n" ]
[ [ "pandas.read_csv" ] ]
surf-sci-bc/uspy
[ "76af9bee19b3fdf3af431e77756e196284cdd7a1" ]
[ "uspy/xps/models.py" ]
[ "\"\"\"Models for the peaks.\"\"\"\n# pylint: disable=invalid-name\n# pylint: disable=abstract-method\n# pylint: disable=too-many-arguments\n\nimport numpy as np\nimport scipy.special as ss\nfrom lmfit.model import Model\nfrom lmfit.models import guess_from_peak, update_param_vals\n\n\ns2 = np.sqrt(2)\ns2pi = np.sqrt(2 * np.pi)\nln2 = 1 * np.log(2)\nsln2 = np.sqrt(ln2)\ns2ln2 = np.sqrt(2 * ln2)\nsqrtln2 = np.sqrt(ln2)\ntiny = 1e-5\n\n# ideas for further shapes: sech2-function\n\n\ndef pure_ds(x, amplitude=1.0, center=0.0, fwhm=1.0, asym=0.5):\n \"\"\"Roughly taken from\n https://rdrr.io/github/GSperanza/RxpsG/src/R/XPSFitAlgorithms.r\n \"\"\"\n sigma = max(fwhm / 2, tiny)\n arg = center - x\n am1 = 1 - asym\n y = (\n amplitude / np.pi * ss.gamma(am1)\n / (arg**2 + sigma**2) ** (am1 / 2)\n * np.cos(np.pi * asym / 2 + am1 * np.arctan(arg / sigma))\n )\n return y\n\ndef gaussian(x, amplitude=1.0, center=0.0, fwhm=1.0):\n \"\"\"Standard gaussian with amplitude = area.\"\"\"\n sigma = max(tiny, fwhm / (2 * s2ln2))\n arg = center - x\n y = amplitude / (s2pi * sigma) * np.exp(-arg**2 / (2 * sigma**2))\n return y\n\ndef lorentzian(x, amplitude=1.0, center=0.0, fwhm=1.0):\n \"\"\"Standard lorentzian with amplitude = area.\"\"\"\n gamma = max(tiny, fwhm / 2)\n arg = center - x\n y = amplitude / (gamma * np.pi) * gamma**2 / (arg**2 + gamma**2)\n return y\n\ndef voigt(x, amplitude=1.0, center=0.0, fwhm=1.0, fwhm_l=None):\n \"\"\"Voigt function using Faddeeva function wofz.\n https://en.wikipedia.org/wiki/Voigt_profile\n Taken from lmfit module, modified to take fwhms:\n Gaussian fwhm and Lorentzian fwhm_l\n \"\"\"\n if fwhm_l is None:\n fwhm_l = fwhm\n sigma = max(tiny, fwhm / (2 * s2ln2))\n gamma = max(tiny, fwhm_l / 2)\n arg = center - x\n z = (arg + 1j * gamma) / (sigma * s2)\n # pylint: disable=no-member\n y = amplitude * ss.wofz(z).real / (sigma * s2pi)\n return y\n\ndef voigt_defined_fwhm(x, amplitude=1.0, center=0.0, fwhm=1.0, fwhm_g=None):\n \"\"\"Voigt function using Faddeeva function wofz.\n https://en.wikipedia.org/wiki/Voigt_profile\n Taken from lmfit module, modified to take fwhms:\n Full fwhm and Gaussian fwhm (Lorentzian fwhm is inferred, see bottom\n of wikipedia link)\n \"\"\"\n if fwhm_g is None:\n fwhm_g = fwhm / 1.6376\n sigma = max(tiny, fwhm_g / (2 * s2ln2))\n fwhm_l = 7.72575 * fwhm - np.sqrt(45.23566 * fwhm**2 + 14.4514 * fwhm_g**2)\n gamma = max(tiny, fwhm_l / 2)\n arg = center - x\n z = (arg + 1j * gamma) / (sigma * s2)\n # pylint: disable=no-member\n y = amplitude * ss.wofz(z).real / (sigma * s2pi)\n return y\n\ndef gl_sum(x, amplitude=1.0, center=0.0, fwhm=1.0, fraction=0.5):\n \"\"\"Sum of a gaussian and a lorentzian component.\"\"\"\n G = gaussian(x, amplitude=amplitude, center=center, fwhm=fwhm)\n L = lorentzian(x, amplitude=amplitude, center=center, fwhm=fwhm)\n return (1 - fraction) * G + fraction * L\n\ndef gl_prod(x, amplitude=1.0, center=0.0, fwhm=1.0, fraction=0.5):\n \"\"\"Product form of a gaussian and a lorentzian component.\"\"\"\n # area and fwhm are not determined - don't use!\n fwhm_g = fwhm / (1 - fraction)\n fwhm_l = fwhm / fraction\n sigma = max(tiny, fwhm_g / (2 * s2ln2))\n gamma = max(tiny, fwhm_l / 2)\n arg = center - x\n # pylint: disable=no-member\n norm_area = (\n gamma * np.exp(gamma**2 / (4 * sigma**2))\n * ss.erfc(gamma / (2 * sigma))\n / (s2pi * sigma * gamma)\n )\n amplitude /= norm_area\n GL = (\n amplitude #/ (s2pi * sigma * gamma * np.pi)\n * np.exp(-arg**2 / (2 * sigma**2) * 4 * ln2 * (1 - fraction))\n * gamma**2 / (4 * fraction) / (arg**2 + gamma**2 / (4 * fraction))\n )\n # G = gaussian(x, amplitude=amplitude, center=center, fwhm=fwhm_g)\n # L = lorentzian(x, amplitude=amplitude, center=center, fwhm=fwhm_l)\n return GL\n\ndef gelius(x, amplitude=1.0, center=0.0, fwhm=1.0, a=0.5, b=0.5, fwhm_l=0.5):\n \"\"\"See http://www.casaxps.com/help_manual/line_shapes.htm\n Modified to use Voigt profile instead of GL product\"\"\"\n if fwhm_l is None:\n fwhm_l = fwhm\n arg = center - x\n below_c = (x <= center).astype(int)\n AW = np.exp(-(2 * sln2 * arg / (fwhm - a * 2 * sln2 * arg))**2)\n w = b * (0.7 + 0.3 / (a + 0.01))\n V = voigt(x, amplitude=amplitude, center=center, fwhm=fwhm, fwhm_l=fwhm_l)\n G = gaussian(x, amplitude=amplitude, center=center, fwhm=fwhm)\n y = V + below_c * (w * (AW - G))\n return y\n\ndef asymm_tail(x, center=0, fwhm=1.0, tail=1.0):\n \"\"\"Tail for dampening asymmetric lines below x = center.\"\"\"\n arg = (center - x) / fwhm\n try:\n zeros = np.zeros(len(x))\n except TypeError:\n zeros = 0\n y = np.exp(-np.maximum(arg, zeros) * tail)\n return y\n\ndef centered_ds(x, amplitude=1.0, center=0.0, fwhm=1.0, asym=0.5):\n \"\"\"DS lineshape with maximum at center.\"\"\"\n emax = fwhm / (2 * np.tan(np.pi / (2 - asym)))\n center += emax\n y = pure_ds(x, amplitude=amplitude, center=center, fwhm=fwhm, asym=asym)\n return y\n\ndef tailed_ds(x, amplitude=1.0, center=0.0, fwhm=1.0, asym=0.5, tail=1.0):\n \"\"\"Centered DS with exponential tail at lower x.\"\"\"\n emax = fwhm / (2 * np.tan(np.pi / (2 - asym)))\n center += emax\n ds = pure_ds(x, amplitude=amplitude, center=center, fwhm=fwhm, asym=asym)\n as_tail = asymm_tail(x, center=center, fwhm=fwhm, tail=tail)\n return ds * as_tail\n\n\nclass PeakModel(Model):\n \"\"\"Generic model for peaks.\"\"\"\n def __init__(self, func, **kwargs):\n kwargs[\"independent_vars\"] = kwargs.get(\"independent_vars\", [\"x\"])\n kwargs[\"prefix\"] = kwargs.get(\"prefix\", \"\")\n kwargs[\"nan_policy\"] = kwargs.get(\"nan_policy\", \"raise\")\n self.fwhm_res = kwargs.get(\"fwhm_res\", 0.01)\n self.area_res = kwargs.get(\"area_res\", 0.1)\n self.area_range = kwargs.get(\"area_range\", 20)\n for arg in (\"amplitude\", \"fwhm\", \"center\"):\n if arg not in func.__code__.co_varnames:\n raise ValueError(\"Function has wrong parameters for PeakModel\")\n super().__init__(func, **kwargs)\n\n def guess(self, data, **kwargs):\n \"\"\"Guess the pars.\"\"\"\n x = kwargs.get(\"x\", None)\n negative = kwargs.get(\"negative\", False)\n pars = guess_from_peak(self, data, x, negative, ampscale=0.5)\n return update_param_vals(pars, self.prefix, **kwargs)\n\n def get_fwhm(self, params, x=None):\n \"\"\"Generic FWHM calculator:\n Searches from center in both directions for values below maximum / 2\n \"\"\"\n if x:\n pass\n funcargs = self.make_funcargs(params)\n center = funcargs[\"center\"]\n fwhm = funcargs[\"fwhm\"]\n hm = self.func(x=center, **funcargs) / 2\n x_min, x_max = center, center\n while self.func(x=x_max, **funcargs) >= hm:\n x_max += self.fwhm_res\n if x_max > center + 5 * fwhm:\n print(\"Could not calculate correct FWHM\")\n break\n while self.func(x=x_min, **funcargs) >= hm:\n x_min -= self.fwhm_res\n if x_min < center - 5 * fwhm:\n print(\"Could not calculate correct FWHM\")\n break\n return x_max - x_min\n\n def get_area(self, params, x=None):\n \"\"\"Generic area calculator: Integrates interval\n (center - self.area_range/2, center + self.area_range/2)\n with resolution self.area_res.\n \"\"\"\n funcargs = self.make_funcargs(params)\n if x:\n start = x.min()\n end = x.max()\n N = len(x)\n res = (end - start) / N\n else:\n center = funcargs[\"center\"]\n start = center - self.area_range / 2\n end = center + self.area_range / 2\n N = self.area_range / self.area_res\n res = self.area_res\n x = np.linspace(start, end, int(N))\n y = self.func(x=x, **funcargs)\n return sum(y) * res\n\n\nclass VoigtModel(PeakModel):\n \"\"\"Voigt model with a defined fwhm.\"\"\"\n def __init__(self, **kwargs):\n super().__init__(voigt_defined_fwhm, **kwargs)\n\nclass PseudoVoigtModel(PeakModel):\n \"\"\"Standard Gaussian-Lorentzian product.\"\"\"\n def __init__(self, **kwargs):\n super().__init__(gl_sum, **kwargs)\n\nclass DoniachSunjicModel(PeakModel):\n \"\"\"x-axis reversed Doniach model (general formula taken from lmfit).\"\"\"\n def __init__(self, **kwargs):\n super().__init__(centered_ds, **kwargs)\n\nclass TailedDoniachSunjicModel(PeakModel):\n \"\"\"DS line shape with an exponentially decaying tail on the asymmetric\n side.\"\"\"\n def __init__(self, **kwargs):\n super().__init__(tailed_ds, **kwargs)\n\n\n\ndef pah2fwhm(_position, angle, height, shape):\n \"\"\"Calculates fwhm from position, angle, height depending on shape.\"\"\"\n if shape == \"PseudoVoigt\":\n return np.tan(angle) * height\n elif shape == \"DoniachSunjic\":\n return np.tan(angle) * height\n elif shape == \"Voigt\":\n return np.tan(angle) * height\n raise NotImplementedError\n\ndef pah2area(_position, angle, height, shape):\n \"\"\"Calculates area from position, angle, height depending on shape.\"\"\"\n if shape == \"PseudoVoigt\":\n fwhm = np.tan(angle) * height\n area = (height * (fwhm * np.sqrt(np.pi / ln2))\n / (1 + np.sqrt(1 / (np.pi * ln2))))\n return area\n elif shape == \"DoniachSunjic\":\n fwhm = np.tan(angle) * height\n area = height / pure_ds(0, amplitude=1, center=0, fwhm=fwhm, asym=0.5)\n return area\n elif shape == \"Voigt\":\n fwhm = np.tan(angle) * height\n area = height / voigt(0, amplitude=1, center=0, fwhm=fwhm, fwhm_l=0.5)\n return area\n raise NotImplementedError\n" ]
[ [ "scipy.special.gamma", "numpy.log", "scipy.special.wofz", "numpy.tan", "numpy.exp", "scipy.special.erfc", "numpy.arctan", "numpy.sqrt", "numpy.maximum" ] ]
SanzharMrz/language-models-are-knowledge-graphs-pytorch
[ "375f20ed17df40af9486faddbf153919f503f33d" ]
[ "utils.py" ]
[ "from collections import OrderedDict\nimport numpy as np\nimport torch\nimport re\n\nalphabet = re.compile(r'^[a-zA-Z]+$')\n\nfrom copy import copy\nfrom collections import defaultdict\n\ndef build_graph(matrix):\n graph = defaultdict(list) \n\n for idx in range(0, len(matrix)):\n for col in range(idx+1, len(matrix)):\n graph[idx].append((col, matrix[idx][col] ))\n return graph\n\ndef BFS(s, end, graph, max_size=-1, black_list_relation=[]):\n visited = [False] * (max(graph.keys())+100) \n # Create a queue for BFS \n queue = [] \n\n # Mark the source node as \n # visited and enqueue it \n queue.append((s, [(s, 0)]))\n \n found_paths = []\n\n visited[s] = True\n \n while queue: \n\n s, path = queue.pop(0)\n\n # Get all adjacent vertices of the \n # dequeued vertex s. If a adjacent \n # has not been visited, then mark it \n # visited and enqueue it \n for i, conf in graph[s]:\n if i == end:\n found_paths.append(path+[(i, conf)])\n break\n if visited[i] == False:\n queue.append((i, copy(path)+[(i, conf)]))\n visited[i] = True\n \n candidate_facts = []\n for path_pairs in found_paths:\n if len(path_pairs) < 3:\n continue\n path = []\n cum_conf = 0\n for (node, conf) in path_pairs:\n path.append(node)\n cum_conf += conf\n\n if path[1] in black_list_relation:\n continue\n\n candidate_facts.append((path, cum_conf))\n\n candidate_facts = sorted(candidate_facts, key=lambda x: x[1], reverse=True)\n return candidate_facts\n\ndef is_word(token):\n if len(token) == 1 and alphabet.match(token) == None:\n return False\n return True\n\n\ndef parse_ner_results(ner_results):\n candidates = []\n sub_fold = []\n for idx, curr in enumerate(ner_results):\n if idx == 0:\n sub_fold.append(curr['word'])\n prev_flag = curr['entity'].split('-')[0]\n prev = curr\n continue\n curr_flag = curr['entity'].split('-')[0]\n if prev_flag == 'B' and curr_flag == 'B' and not idx:\n candidates.append(sub_fold[0])\n sub_fold = []\n\n elif prev_flag == 'B' and curr_flag == 'B' and idx:\n sub_fold.append(prev['word'])\n candidates.append(sub_fold[0])\n sub_fold = []\n sub_fold.append(curr['word'])\n\n elif prev_flag == 'B' and curr_flag == 'I':\n sub_fold.append(prev['word'])\n sub_fold.append(curr['word'])\n\n elif (prev_flag == 'I') and (curr_flag == 'I' ) and (idx + 1 < len(ner_results)):\n sub_fold.append(curr['word'])\n\n elif (prev_flag == 'I') and (curr_flag == 'B' ):\n ordered = OrderedDict(dict(zip(sub_fold, range(len(sub_fold)))))\n candidates.append(' '.join(list(ordered.keys())).replace(' #', '').replace('#', ''))\n sub_fold = []\n sub_fold.append(curr['word'])\n\n elif (prev_flag == 'I') and (curr_flag == 'I' ) and (idx + 1) == len(ner_results):\n sub_fold.append(curr['word'])\n ordered = OrderedDict(dict(zip(sub_fold, range(len(sub_fold)))))\n candidates.append(' '.join(list(ordered.keys())))\n sub_fold = []\n\n prev = curr\n prev_flag = prev['entity'].split('-')[0]\n return candidates\n\ndef create_mapping(sentence, return_pt=False, nlp = None, tokenizer=None, pipeline_ner=None):\n '''Create a mapping\n nlp: spacy model\n tokenizer: huggingface tokenizer\n '''\n doc = nlp(sentence)\n ner_results = pipeline_ner(sentence)\n parsed_candidates = parse_ner_results(ner_results)\n\n tokens = list(doc)\n\n chunk2id = {}\n\n start_chunk = []\n end_chunk = []\n noun_chunks = []\n for chunk in doc.noun_chunks:\n if chunk.text in parsed_candidates:\n noun_chunks.append(chunk.text)\n start_chunk.append(chunk.start)\n end_chunk.append(chunk.end)\n\n sentence_mapping = []\n token2id = {}\n mode = 0 # 1 in chunk, 0 not in chunk\n chunk_id = 0\n for idx, token in enumerate(doc):\n if idx in start_chunk:\n mode = 1\n sentence_mapping.append(noun_chunks[chunk_id])\n if sentence_mapping[-1] not in token2id:\n token2id[sentence_mapping[-1]] = len(token2id)\n chunk_id += 1\n elif idx in end_chunk:\n mode = 0\n\n if mode == 0:\n sentence_mapping.append(token.text)\n if sentence_mapping[-1] not in token2id:\n token2id[sentence_mapping[-1]] = len(token2id)\n\n\n token_ids = []\n tokenid2word_mapping = []\n\n for token in sentence_mapping:\n subtoken_ids = tokenizer(str(token), add_special_tokens=False)['input_ids']\n tokenid2word_mapping += [ token2id[token] ]*len(subtoken_ids)\n token_ids += subtoken_ids\n\n tokenizer_name = str(tokenizer.__str__)\n if 'GPT2' in tokenizer_name:\n outputs = {\n 'input_ids': token_ids,\n 'attention_mask': [1]*(len(token_ids)),\n }\n\n else:\n outputs = {\n 'input_ids': [tokenizer.cls_token_id] + token_ids + [tokenizer.sep_token_id],\n 'attention_mask': [1]*(len(token_ids)+2),\n 'token_type_ids': [0]*(len(token_ids)+2)\n }\n\n if return_pt:\n for key, value in outputs.items():\n outputs[key] = torch.from_numpy(np.array(value)).long().unsqueeze(0)\n \n return outputs, tokenid2word_mapping, token2id, noun_chunks\n\ndef compress_attention(attention, tokenid2word_mapping, operator=np.mean):\n\n new_index = []\n \n prev = -1\n for idx, row in enumerate(attention):\n token_id = tokenid2word_mapping[idx]\n if token_id != prev:\n new_index.append( [row])\n prev = token_id\n else:\n new_index[-1].append(row)\n\n new_matrix = []\n for row in new_index:\n new_matrix.append(operator(np.array(row), 0))\n\n new_matrix = np.array(new_matrix)\n\n attention = np.array(new_matrix).T\n\n prev = -1\n new_index= []\n for idx, row in enumerate(attention):\n token_id = tokenid2word_mapping[idx]\n if token_id != prev:\n new_index.append( [row])\n prev = token_id\n else:\n new_index[-1].append(row)\n\n \n new_matrix = []\n for row in new_index:\n new_matrix.append(operator(np.array(row), 0))\n \n new_matrix = np.array(new_matrix)\n\n return new_matrix.T\n\ndef index2word(tokenid2word_mapping, token2id):\n tokens = []\n prev = -1\n for token_id in tokenid2word_mapping:\n if token_id == prev:\n continue\n\n tokens.append(token2id[token_id])\n prev = token_id\n\n return tokens\n\n\n\nif __name__ == '__main__':\n import en_core_web_sm\n from transformers import AutoTokenizer, BertModel\n tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')\n encoder = BertModel.from_pretrained('bert-base-cased')\n nlp = en_core_web_sm.load()\n\n sentence = 'Rolling Stone wrote: “No other pop song has so thoroughly challenged artistic conventions”'\n sentence = 'Dylan sing \"Time They Are Changing\"'\n inputs, tokenid2word_mapping, token2id, noun_chunks = create_mapping(sentence, return_pt=True, nlp=nlp, tokenizer=tokenizer)\n\n outputs = encoder(**inputs, output_attentions=True)\n print(noun_chunks, tokenid2word_mapping, token2id)\n" ]
[ [ "numpy.array" ] ]
stalbrec/coffea
[ "c298f24952d7e493f5a5921e39b64f44fd579a3c" ]
[ "tests/test_hist_tools.py" ]
[ "from __future__ import print_function, division\n\nfrom coffea import hist\nimport numpy as np\nimport awkward as ak\n\nfrom dummy_distributions import dummy_jagged_eta_pt\nimport pytest\nimport sys\n\n\ndef test_hist():\n counts, test_eta, test_pt = dummy_jagged_eta_pt()\n\n h_nothing = hist.Hist(\"empty inside\")\n assert h_nothing.sparse_dim() == h_nothing.dense_dim() == 0\n assert h_nothing.values() == {}\n\n h_regular_bins = hist.Hist(\n \"regular joe\", hist.Bin(\"x\", \"x\", 20, 0, 200), hist.Bin(\"y\", \"why\", 20, -3, 3)\n )\n h_regular_bins.fill(x=test_pt, y=test_eta)\n nentries = np.sum(counts)\n assert h_regular_bins.sum(\"x\", \"y\", overflow=\"all\").values(sumw2=True)[()] == (\n nentries,\n nentries,\n )\n # bin x=2, y=10 (when overflow removed)\n count_some_bin = np.sum(\n (test_pt >= 20.0) & (test_pt < 30.0) & (test_eta >= 0.0) & (test_eta < 0.3)\n )\n assert (\n h_regular_bins.integrate(\"x\", slice(20, 30)).values()[()][10] == count_some_bin\n )\n assert (\n h_regular_bins.integrate(\"y\", slice(0, 0.3)).values()[()][2] == count_some_bin\n )\n\n h_reduced = h_regular_bins[10:, -0.6:]\n # bin x=1, y=2\n assert h_reduced.integrate(\"x\", slice(20, 30)).values()[()][2] == count_some_bin\n assert h_reduced.integrate(\"y\", slice(0, 0.3)).values()[()][1] == count_some_bin\n h_reduced.fill(x=23, y=0.1)\n assert h_reduced.integrate(\"x\", slice(20, 30)).values()[()][2] == count_some_bin + 1\n assert h_reduced.integrate(\"y\", slice(0, 0.3)).values()[()][1] == count_some_bin + 1\n\n animal = hist.Cat(\"animal\", \"type of animal\")\n vocalization = hist.Cat(\"vocalization\", \"onomatopoiea is that how you spell it?\")\n h_cat_bins = hist.Hist(\"I like cats\", animal, vocalization)\n h_cat_bins.fill(animal=\"cat\", vocalization=\"meow\", weight=2.0)\n h_cat_bins.fill(\n animal=\"dog\", vocalization=\"meow\", weight=np.array([-1.0, -1.0, -5.0])\n )\n h_cat_bins.fill(animal=\"dog\", vocalization=\"woof\", weight=100.0)\n h_cat_bins.fill(animal=\"dog\", vocalization=\"ruff\")\n assert h_cat_bins.values()[(\"cat\", \"meow\")] == 2.0\n assert h_cat_bins.values(sumw2=True)[(\"dog\", \"meow\")] == (-7.0, 27.0)\n assert h_cat_bins.integrate(\"vocalization\", [\"woof\", \"ruff\"]).values(sumw2=True)[\n (\"dog\",)\n ] == (101.0, 10001.0)\n\n height = hist.Bin(\"height\", \"height [m]\", 10, 0, 5)\n h_mascots_1 = hist.Hist(\n \"fermi mascot showdown\",\n animal,\n vocalization,\n height,\n # weight is a reserved keyword\n hist.Bin(\n \"mass\", \"weight (g=9.81m/s**2) [kg]\", np.power(10.0, np.arange(5) - 1)\n ),\n )\n\n h_mascots_2 = hist.Hist(\n \"fermi mascot showdown\",\n axes=(\n animal,\n vocalization,\n height,\n # weight is a reserved keyword\n hist.Bin(\n \"mass\", \"weight (g=9.81m/s**2) [kg]\", np.power(10.0, np.arange(5) - 1)\n ),\n ),\n )\n\n h_mascots_3 = hist.Hist(\n axes=[\n animal,\n vocalization,\n height,\n # weight is a reserved keyword\n hist.Bin(\n \"mass\", \"weight (g=9.81m/s**2) [kg]\", np.power(10.0, np.arange(5) - 1)\n ),\n ],\n label=\"fermi mascot showdown\",\n )\n\n with pytest.warns(UserWarning):\n h_mascots_4 = hist.Hist(\n \"fermi mascot showdown\",\n animal,\n vocalization,\n height,\n # weight is a reserved keyword\n hist.Bin(\n \"mass\", \"weight (g=9.81m/s**2) [kg]\", np.power(10.0, np.arange(5) - 1)\n ),\n axes=[\n animal,\n vocalization,\n height,\n # weight is a reserved keyword\n hist.Bin(\n \"mass\",\n \"weight (g=9.81m/s**2) [kg]\",\n np.power(10.0, np.arange(5) - 1),\n ),\n ],\n )\n\n assert h_mascots_1._dense_shape == h_mascots_2._dense_shape\n assert h_mascots_2._dense_shape == h_mascots_3._dense_shape\n assert h_mascots_3._dense_shape == h_mascots_4._dense_shape\n\n assert h_mascots_1._axes == h_mascots_2._axes\n assert h_mascots_2._axes == h_mascots_3._axes\n assert h_mascots_3._axes == h_mascots_4._axes\n\n adult_bison_h = np.random.normal(loc=2.5, scale=0.2, size=40)\n adult_bison_w = np.random.normal(loc=700, scale=100, size=40)\n h_mascots_1.fill(\n animal=\"bison\", vocalization=\"huff\", height=adult_bison_h, mass=adult_bison_w\n )\n goose_h = np.random.normal(loc=0.4, scale=0.05, size=1000)\n goose_w = np.random.normal(loc=7, scale=1, size=1000)\n h_mascots_1.fill(animal=\"goose\", vocalization=\"honk\", height=goose_h, mass=goose_w)\n crane_h = np.random.normal(loc=1, scale=0.05, size=4)\n crane_w = np.random.normal(loc=10, scale=1, size=4)\n h_mascots_1.fill(animal=\"crane\", vocalization=\"none\", height=crane_h, mass=crane_w)\n\n with pytest.raises(ValueError):\n h_mascots_1.fill(\n beast=\"crane\", yelling=\"none\", tallness=crane_h, heavitivity=crane_w\n )\n\n h_mascots_2 = h_mascots_1.copy()\n h_mascots_2.clear()\n baby_bison_h = np.random.normal(loc=0.5, scale=0.1, size=20)\n baby_bison_w = np.random.normal(loc=200, scale=10, size=20)\n baby_bison_cutefactor = 2.5 * np.ones_like(baby_bison_w)\n h_mascots_2.fill(\n animal=\"bison\",\n vocalization=\"baa\",\n height=baby_bison_h,\n mass=baby_bison_w,\n weight=baby_bison_cutefactor,\n )\n h_mascots_2.fill(animal=\"fox\", vocalization=\"none\", height=1.0, mass=30.0)\n\n h_mascots = h_mascots_1 + h_mascots_2\n assert (\n h_mascots.integrate(\"vocalization\", \"h*\")\n .sum(\"height\", \"mass\", \"animal\")\n .values()[()]\n == 1040.0\n )\n\n species_class = hist.Cat(\"species_class\", \"where the subphylum is vertibrates\")\n classes = {\n \"birds\": [\"goose\", \"crane\"],\n \"mammals\": [\"bison\", \"fox\"],\n }\n h_mascots.scale({(\"goose\",): 0.5}, axis=(\"animal\",))\n h_mascots.scale({(\"goose\", \"honk\"): 2.0}, axis=(\"animal\", \"vocalization\"))\n h_species = h_mascots.group(\"animal\", species_class, classes)\n\n assert set(h_species.integrate(\"vocalization\").values().keys()) == set(\n [(\"birds\",), (\"mammals\",)]\n )\n nbirds_bin = np.sum(\n (goose_h >= 0.5) & (goose_h < 1) & (goose_w > 10) & (goose_w < 100)\n )\n nbirds_bin += np.sum(\n (crane_h >= 0.5) & (crane_h < 1) & (crane_w > 10) & (crane_w < 100)\n )\n assert h_species.integrate(\"vocalization\").values()[(\"birds\",)][1, 2] == nbirds_bin\n tally = h_species.sum(\"mass\", \"height\", \"vocalization\").values()\n assert tally[(\"birds\",)] == 1004.0\n assert tally[(\"mammals\",)] == 91.0\n\n h_species.scale({\"honk\": 0.1, \"huff\": 0.9}, axis=\"vocalization\")\n h_species.scale(5.0)\n tally = h_species.sum(\"mass\", height, vocalization).values(sumw2=True)\n assert tally[(\"birds\",)] == (520.0, 350.0)\n assert tally[(\"mammals\",)] == (435.0, 25 * (40 * (0.9**2) + 20 * (2.5**2) + 1))\n\n assert h_species.axis(\"vocalization\") is vocalization\n assert h_species.axis(\"height\") is height\n assert h_species.integrate(\"vocalization\", \"h*\").axis(\"height\") is height\n\n tall_class = hist.Cat(\"tall_class\", \"species class (species above 1m)\")\n mapping = {\n \"birds\": ([\"goose\", \"crane\"], slice(1.0, None)),\n \"mammals\": ([\"bison\", \"fox\"], slice(1.0, None)),\n }\n h_tall = h_mascots.group((animal, height), tall_class, mapping)\n tall_bird_count = np.sum(goose_h >= 1.0) + np.sum(crane_h >= 1)\n assert h_tall.sum(\"mass\", \"vocalization\").values()[(\"birds\",)] == tall_bird_count\n tall_mammal_count = np.sum(adult_bison_h >= 1.0) + np.sum(baby_bison_h >= 1) + 1\n assert (\n h_tall.sum(\"mass\", \"vocalization\").values()[(\"mammals\",)] == tall_mammal_count\n )\n\n h_less = h_mascots.remove([\"fox\", \"bison\"], axis=\"animal\")\n assert h_less.sum(\"vocalization\", \"height\", \"mass\", \"animal\").values()[()] == 1004.0\n\n\ndef test_export1d():\n import uproot3\n import os\n from coffea.hist.export import export1d\n\n counts, test_eta, test_pt = dummy_jagged_eta_pt()\n h_regular_bins = hist.Hist(\"regular_joe\", hist.Bin(\"x\", \"x\", 20, 0, 200))\n h_regular_bins.fill(x=test_pt)\n\n hout = export1d(h_regular_bins)\n\n filename = \"test_export1d.root\"\n\n with uproot3.create(filename) as fout:\n fout[\"regular_joe\"] = hout\n fout.close()\n\n with uproot3.open(filename) as fin:\n hin = fin[\"regular_joe\"]\n\n assert np.all(hin.edges == hout.edges)\n assert np.all(hin.values == hout.values)\n\n del hin\n del fin\n\n if os.path.exists(filename):\n os.remove(filename)\n\n\ndef test_hist_serdes():\n import pickle\n\n h_regular_bins = hist.Hist(\n \"regular joe\", hist.Bin(\"x\", \"x\", 20, 0, 200), hist.Bin(\"y\", \"why\", 20, -3, 3)\n )\n\n h_regular_bins.fill(\n x=np.array([1.0, 2.0, 3.0, 4.0, 5.0]), y=np.array([-2.0, 1.0, 0.0, 1.0, 2.0])\n )\n\n h_regular_bins.sum(\"x\").identifiers(\"y\")\n\n spkl = pickle.dumps(h_regular_bins)\n\n hnew = pickle.loads(spkl)\n\n hnew.sum(\"x\").identifiers(\"y\")\n\n assert h_regular_bins._dense_shape == hnew._dense_shape\n assert h_regular_bins._axes == hnew._axes\n\n\ndef test_hist_serdes_labels():\n import pickle\n\n ax = hist.Bin(\"asdf\", \"asdf\", 3, 0, 3)\n ax.identifiers()[0].label = \"type 1\"\n h = hist.Hist(\"a\", ax)\n h.identifiers(\"asdf\")\n\n spkl = pickle.dumps(h)\n\n hnew = pickle.loads(spkl)\n\n for old, new in zip(h.identifiers(\"asdf\"), hnew.identifiers(\"asdf\")):\n assert old.label == new.label\n\n assert h._dense_shape == hnew._dense_shape\n assert h._axes == hnew._axes\n\n\[email protected](\n sys.version_info < (3, 4),\n reason=\"requires python3.4 or higher, test file is pickle proto 4\",\n)\ndef test_hist_compat():\n from coffea.util import load\n\n test = load(\"tests/samples/old_hist_format.coffea\")\n\n expected_bins = np.array(\n [\n -np.inf,\n 0.0,\n 20.0,\n 40.0,\n 60.0,\n 80.0,\n 100.0,\n 120.0,\n 140.0,\n 160.0,\n 180.0,\n 200.0,\n 220.0,\n 240.0,\n 260.0,\n 280.0,\n 300.0,\n 320.0,\n 340.0,\n 360.0,\n 380.0,\n 400.0,\n 420.0,\n 440.0,\n 460.0,\n 480.0,\n 500.0,\n 520.0,\n 540.0,\n 560.0,\n 580.0,\n 600.0,\n 620.0,\n 640.0,\n 660.0,\n 680.0,\n 700.0,\n 720.0,\n 740.0,\n 760.0,\n 780.0,\n 800.0,\n 820.0,\n 840.0,\n 860.0,\n 880.0,\n 900.0,\n 920.0,\n 940.0,\n 960.0,\n 980.0,\n 1000.0,\n 1020.0,\n 1040.0,\n 1060.0,\n 1080.0,\n 1100.0,\n 1120.0,\n 1140.0,\n 1160.0,\n 1180.0,\n 1200.0,\n np.inf,\n np.nan,\n ]\n )\n assert np.all(test._axes[2]._interval_bins[:-1] == expected_bins[:-1])\n assert np.isnan(test._axes[2]._interval_bins[-1])\n\n\ndef test_issue_247():\n from coffea import hist\n\n h = hist.Hist(\"stuff\", hist.Bin(\"old\", \"old\", 20, -1, 1))\n h.fill(old=h.axis(\"old\").centers())\n h2 = h.rebin(h.axis(\"old\"), hist.Bin(\"new\", \"new\", 10, -1, 1))\n # check first if its even possible to have correct binning\n assert np.all(h2.axis(\"new\").edges() == h.axis(\"old\").edges()[::2])\n # make sure the lookup works properly\n assert np.all(h2.values()[()] == 2.0)\n h3 = h.rebin(h.axis(\"old\"), 2)\n assert np.all(h3.values()[()] == 2.0)\n\n with pytest.raises(ValueError):\n # invalid division\n _ = h.rebin(h.axis(\"old\"), hist.Bin(\"new\", \"new\", 8, -1, 1))\n\n newaxis = hist.Bin(\"new\", \"new\", h.axis(\"old\").edges()[np.cumsum([0, 2, 3, 5])])\n h.rebin(\"old\", newaxis)\n\n\ndef test_issue_333():\n axis = hist.Bin(\"channel\", \"Channel b1\", 50, 0, 2000)\n temp = np.arange(0, 2000, 40, dtype=np.int16)\n assert np.all(axis.index(temp) == np.arange(50) + 1)\n\n\ndef test_issue_394():\n dummy = hist.Hist(\n \"Dummy\",\n hist.Cat(\"sample\", \"sample\"),\n hist.Bin(\"dummy\", \"Number of events\", 1, 0, 1),\n )\n dummy.fill(sample=\"test\", dummy=1, weight=0.5)\n\n\ndef test_fill_none():\n dummy = hist.Hist(\"Dummy\", hist.Bin(\"x\", \"asdf\", 1, 0, 1))\n with pytest.raises(ValueError):\n # attempt to fill with none\n dummy.fill(x=ak.Array([0.1, None, 0.3]))\n\n # allow fill when masked type but no Nones remain\n dummy.fill(x=ak.Array([0.1, None, 0.3])[[True, False, True]])\n\n\ndef test_boost_conversion():\n import boost_histogram as bh\n\n dummy = hist.Hist(\n \"Dummy\",\n hist.Cat(\"sample\", \"sample\"),\n hist.Bin(\"dummy\", \"Number of events\", 1, 0, 1),\n )\n dummy.fill(sample=\"test\", dummy=1, weight=0.5)\n dummy.fill(sample=\"test\", dummy=0.1)\n dummy.fill(sample=\"test2\", dummy=-0.1)\n dummy.fill(sample=\"test3\", dummy=0.5, weight=0.1)\n dummy.fill(sample=\"test3\", dummy=0.5, weight=0.9)\n\n h = dummy.to_boost()\n assert len(h.axes) == 2\n assert h[bh.loc(\"test\"), bh.loc(1)].value == 0.5\n assert h[bh.loc(\"test\"), bh.loc(100)].value == 0.5\n assert h[bh.loc(\"test\"), bh.loc(1)].variance == 0.25\n assert h[0, 0].value == 1.0\n assert h[0, 0].variance == 1.0\n assert h[1, 0].value == 0.0\n assert h[bh.loc(\"test2\"), 0].value == 0.0\n assert h[1, bh.underflow].value == 1.0\n assert h[bh.loc(\"test3\"), bh.loc(0.5)].value == 1.0\n assert h[bh.loc(\"test3\"), bh.loc(0.5)].variance == 0.1 * 0.1 + 0.9 * 0.9\n\n dummy = hist.Hist(\n \"Dummy\",\n hist.Cat(\"sample\", \"sample\"),\n hist.Bin(\"dummy\", \"Number of events\", 1, 0, 1),\n )\n dummy.fill(sample=\"test\", dummy=0.1)\n dummy.fill(sample=\"test\", dummy=0.2)\n dummy.fill(sample=\"test2\", dummy=0.2)\n # No sumw2 -> simple bh storage\n h = dummy.to_boost()\n assert len(h.axes) == 2\n assert h[0, 0] == 2.0\n assert h[1, 0] == 1.0\n" ]
[ [ "numpy.random.normal", "numpy.array", "numpy.isnan", "numpy.ones_like", "numpy.sum", "numpy.arange", "numpy.cumsum", "numpy.all" ] ]
timoteogb/BNN-PYNQ-ZCU104
[ "ea5396bfa041623259bd59b7d622d82e81e19d20" ]
[ "bnn/bnn.py" ]
[ "# Copyright (c) 2016, Xilinx, Inc.\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without \n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, \n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright \n# notice, this list of conditions and the following disclaimer in the \n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its \n# contributors may be used to endorse or promote products derived from \n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, \n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR \n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR \n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, \n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, \n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, \n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR \n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF \n# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom pynq import Overlay, PL\nfrom PIL import Image\nimport numpy as np\nimport cffi\nimport os\nimport tempfile\n\nRUNTIME_HW = \"python_hw\"\nRUNTIME_SW = \"python_sw\"\n\nNETWORK_CNVW1A1 = \"cnvW1A1\"\nNETWORK_CNVW1A2 = \"cnvW1A2\"\nNETWORK_CNVW2A2 = \"cnvW2A2\"\nNETWORK_LFCW1A1 = \"lfcW1A1\"\nNETWORK_LFCW1A2 = \"lfcW1A2\"\n\nif os.environ['BOARD'] == 'Ultra96':\n\tPLATFORM=\"ultra96\"\nelif os.environ['BOARD'] == 'Pynq-Z1' or os.environ['BOARD'] == 'Pynq-Z2':\n\tPLATFORM=\"pynqZ1-Z2\"\nelif os.environ['BOARD'] == 'ZCU104':\n\tPLATFORM=\"zcu104\"\nelse:\n\traise RuntimeError(\"Board not supported\")\n\nBNN_ROOT_DIR = os.path.dirname(os.path.realpath(__file__))\nBNN_LIB_DIR = os.path.join(BNN_ROOT_DIR, 'libraries', PLATFORM)\nBNN_BIT_DIR = os.path.join(BNN_ROOT_DIR, 'bitstreams', PLATFORM)\nBNN_PARAM_DIR = os.path.join(BNN_ROOT_DIR, 'params')\n\n_ffi = cffi.FFI()\n\n_ffi.cdef(\"\"\"\nvoid load_parameters(const char* path);\nint inference(const char* path, int results[64], int number_class, float *usecPerImage);\nint* inference_multiple(const char* path, int number_class, int *image_number, float *usecPerImage, int enable_detail);\nvoid free_results(int * result);\nvoid deinit();\n\"\"\"\n)\n\n_libraries = {}\n\n# function to check which datasets are available for a given network\ndef available_params(network):\n\tdatasets = os.listdir(BNN_PARAM_DIR)\n\tret = []\n\tfor d in datasets:\n\t\tif os.path.isdir(os.path.join(BNN_PARAM_DIR, d)):\n\t\t\tsupportedNets = os.listdir(os.path.join(BNN_PARAM_DIR, d))\n\t\t\tfor nets in supportedNets:\n\t\t\t\tif nets == network:\n\t\t\t\t\tret.append(d)\n\treturn ret\n\n# pyhton object as interface for communication with host library through C++ shared object library\nclass PynqBNN:\n\n\t# on creating PynqBNN the shared library for a given network is loaded and bitstream is downloaded to PL\n\t# when intending to use hardware accelerated runtime\n\tdef __init__(self, runtime, network, load_overlay=True):\n\t\tself.bitstream_name = None\n\t\tif runtime == RUNTIME_HW:\n\t\t\tself.bitstream_name=\"{0}-{1}.bit\".format(network,PLATFORM)\n\t\t\tself.bitstream_path=os.path.join(BNN_BIT_DIR, self.bitstream_name)\n\t\t\tif PL.bitfile_name != self.bitstream_path:\n\t\t\t\tif load_overlay:\n\t\t\t\t\tOverlay(self.bitstream_path).download()\n\t\t\t\telse:\n\t\t\t\t\traise RuntimeError(\"Incorrect Overlay loaded\")\n\t\tdllname = \"{0}-{1}-{2}.so\".format(runtime, network,PLATFORM)\n\t\tif dllname not in _libraries:\n\t\t\t_libraries[dllname] = _ffi.dlopen(os.path.join(BNN_LIB_DIR, dllname))\n\t\tself.interface = _libraries[dllname]\n\t\tself.num_classes = 0\n\n\tdef __del__(self):\n\t\tself.interface.deinit()\n\n\t# function to set weights and activation thresholds of specific network\n\tdef load_parameters(self, params):\n\t\tif not os.path.isabs(params):\n\t\t\tparams = os.path.join(BNN_PARAM_DIR, params)\n\t\tif os.path.isdir(params):\n\t\t\tself.interface.load_parameters(params.encode())\n\t\t\tself.classes = []\n\t\t\twith open (os.path.join(params, \"classes.txt\")) as f:\n\t\t\t\tself.classes = [c.strip() for c in f.readlines()]\n\t\t\tfilter(None, self.classes)\n\t\telse:\n\t\t\tprint(\"\\nERROR: No such parameter directory \\\"\" + params + \"\\\"\")\n\n\t# starts inference on single image with highest ranked class output\n\tdef inference(self, path):\n\t\tusecperimage = _ffi.new(\"float *\")\n\t\tresult_ptr = self.interface.inference(path.encode(), _ffi.NULL, len(self.classes), usecperimage)\n\t\tprint(\"Inference took %.2f microseconds\" % (usecperimage[0]))\n\t\tprint(\"Classification rate: %.2f images per second\" % (1000000.0/usecperimage[0]))\n\t\tself.usecPerImage = usecperimage[0]\n\t\treturn result_ptr\n\n\t# starts inference on single image, output is vector containing rankings of all available classes\n\t# not available for LFC\n\tdef detailed_inference(self, path):\n\t\tdetails_ptr = _ffi.new(\"int[]\", len(self.classes))\n\t\tusecperimage = _ffi.new(\"float *\") \n\t\tself.interface.inference(path.encode(), details_ptr, len(self.classes), usecperimage)\n\t\tdetails_buf = _ffi.buffer(details_ptr, len(self.classes) * 4)\n\t\tprint(\"Inference took %.2f microseconds\" % (usecperimage[0]))\n\t\tprint(\"Classification rate: %.2f images per second\" % (1000000.0/usecperimage[0]))\n\t\tdetails_array = np.copy(np.frombuffer(details_buf, dtype=np.int32))\n\t\tself.usecPerImage = usecperimage[0]\n\t\treturn details_array\n\n\t# starts inference on multiple images, output is vector containing inferred class of each image\n\tdef inference_multiple(self, path):\n\t\tsize_ptr = _ffi.new(\"int *\")\n\t\tusecperimage = _ffi.new(\"float *\")\n\t\tresult_ptr = self.interface.inference_multiple(path.encode(), len(self.classes), size_ptr, usecperimage,0)\n\t\tresult_buffer = _ffi.buffer(result_ptr, size_ptr[0] * 4)\n\t\tprint(\"Inference took %.2f microseconds, %.2f usec per image\" % (usecperimage[0]*size_ptr[0],usecperimage[0]))\n\t\tresult_array = np.copy(np.frombuffer(result_buffer, dtype=np.int32))\n\t\tprint(\"Classification rate: %.2f images per second\" % (1000000.0/usecperimage[0]))\n\t\tself.interface.free_results(result_ptr)\n\t\tself.usecPerImage = usecperimage[0]\n\t\treturn result_array\n\n\t# starts inference on multiple images, output contains rankings for each class for each image flatten to 1 dimensional vector\n\tdef inference_multiple_detail(self, path):\n\t\tsize_ptr = _ffi.new(\"int *\")\n\t\tusecperimage = _ffi.new(\"float *\")\n\t\tresult_ptr = self.interface.inference_multiple(path.encode(), len(self.classes), size_ptr, usecperimage,1)\n\t\tprint(\"Inference took %.2f microseconds, %.2f usec per image\" % (usecperimage[0]*size_ptr[0],usecperimage[0]))\n\t\tprint(\"Classification rate: %.2f images per second\" % (1000000.0/usecperimage[0]))\n\t\tresult_buffer = _ffi.buffer(result_ptr,len(self.classes)* size_ptr[0] * 4)\n\t\tresult_array = np.copy(np.frombuffer(result_buffer, dtype=np.int32))\n\t\tself.interface.free_results(result_ptr)\n\t\tself.usecPerImage = usecperimage[0]\n\t\treturn result_array\n\n\t# function to resolve the class index to a class name\n\tdef class_name(self, index):\n\t\treturn self.classes[index]\n\n# classifier class for CNV networks to perform inference on cifar10 formatted images or images that have to be preprocessed\nclass CnvClassifier:\n\n\t# constructor will load the shared library, download the bitstream to PL and load the parameter set into network\n\tdef __init__(self, network, params, runtime=RUNTIME_HW):\n\t\tif params in available_params(network):\n\t\t\tself.net = network\n\t\t\tself.params = params\n\t\t\tself.runtime = runtime\n\t\t\tself.usecPerImage = 0.0\n\t\t\tself.bnn = PynqBNN(runtime, network)\n\t\t\tself.bnn.load_parameters(os.path.join(params, network))\n\t\t\tself.classes = self.bnn.classes\n\t\telse:\n\t\t\tprint(\"ERROR: parameters are not availlable for {0}\".format(network))\n\n\t# converting image to cifar10 format\n\tdef image_to_cifar(self, img, fp):\n\t\t# We resize the downloaded image to be 32x32 pixels as expected from the BNN\n\t\timg.thumbnail((32, 32), Image.ANTIALIAS)\n\t\tbackground = Image.new('RGBA', (32, 32), (255, 255, 255, 0))\n\t\tbackground.paste(\n\t\t\timg, (int((32 - img.size[0]) / 2), int((32 - img.size[1]) / 2))\n\t\t)\n\t\t# We write the image into the format used in the Cifar-10 dataset for code compatibility \n\t\timg = (np.array(background))\n\t\tr = img[:,:,0].flatten()\n\t\tg = img[:,:,1].flatten()\n\t\tb = img[:,:,2].flatten()\n\t\tlabel = np.identity(1, dtype=np.uint8)\n\t\tfp.write(label.tobytes())\n\t\tfp.write(r.tobytes())\n\t\tfp.write(g.tobytes())\n\t\tfp.write(b.tobytes())\n\n\t# classify non cifar10 formatted image, result is highest ranked class\n\tdef classify_image(self, img):\n\t\twith tempfile.NamedTemporaryFile() as tmp:\n\t\t\tself.image_to_cifar(img, tmp)\n\t\t\ttmp.flush()\n\t\t\tresult = self.bnn.inference(tmp.name)\n\t\tself.usecPerImage = self.bnn.usecPerImage\n\t\treturn result\n\n\t# classify cifar10 formatted image, result is highest ranked class\n\tdef classify_cifar(self, path):\n\t\tresult = self.bnn.inference(path)\n\t\tself.usecPerImage = self.bnn.usecPerImage\n\t\treturn result\t\n\n\t# classify non cifar10 formatted image, result is vector with all rankings of each class\n\tdef classify_image_details(self, img):\n\t\twith tempfile.NamedTemporaryFile() as tmp:\n\t\t\tself.image_to_cifar(img, tmp)\n\t\t\ttmp.flush()\n\t\t\tresult = self.bnn.detailed_inference(tmp.name)\n\t\tself.usecPerImage = self.bnn.usecPerImage\n\t\treturn result\n\n\t# classify cifar10 formatted image, result is vector with all rankings of each class\n\tdef classify_cifar_details(self, path):\n\t\tresult = self.bnn.detailed_inference(path)\n\t\tself.usecPerImage = self.bnn.usecPerImage\n\t\treturn result\n\n\t# classify images within a path (only regular images)\n\tdef classify_path(self, path):\n\t\timg = Image.open(path)\n\t\treturn self.classify_image(img)\n\n\t# classify multiple regular images, result is highest ranked class\n\tdef classify_images(self, imgs):\n\t\twith tempfile.NamedTemporaryFile() as tmp:\n\t\t\tfor img in imgs:\n\t\t\t\tself.image_to_cifar(img, tmp)\n\t\t\ttmp.flush()\n\t\t\tresult = self.bnn.inference_multiple(tmp.name)\n\t\tself.usecPerImage = self.bnn.usecPerImage\n\t\treturn result\n\n\t# classify multiple cifar10 preformatted pictures, output is inferred class\n\tdef classify_cifars(self, path):\n\t\tresult = self.bnn.inference_multiple(path)\n\t\tself.usecPerImage = self.bnn.usecPerImage\n\t\treturn result\t\n\n\t# multiple detailed inference returns a flatten 1 dimensional vector with each ranking for each class, image by image\n\t# .. for regular images\n\tdef classify_images_details(self, imgs):\n\t\twith tempfile.NamedTemporaryFile() as tmp:\n\t\t\tfor img in imgs:\n\t\t\t\tself.image_to_cifar(img, tmp)\n\t\t\ttmp.flush()\n\t\t\tresult = self.bnn.inference_multiple_detail(tmp.name)\n\t\tself.usecPerImage = self.bnn.usecPerImage\n\t\treturn result\n\n\t#.. for cifar10 preformatted pictures\n\tdef classify_cifars_details(self, path):\n\t\tresult = self.bnn.inference_multiple_detail(path)\n\t\tself.usecPerImage = self.bnn.usecPerImage\n\t\treturn result\n\n\t# classify regular images within paths while now a array of paths can be passed\n\tdef classify_paths(self, paths):\n\t\treturn self.classify_images([Image.open(p) for p in paths])\n\n\tdef class_name(self, index):\n\t\treturn self.bnn.classes[index]\n\n# classifier class for LFC networks to perform inference on mnist formatted images\nclass LfcClassifier:\n\n\t# constructor will load the shared library, download the bitstream to PL and load the specific parameter set into network\n\tdef __init__(self, network, params, runtime=RUNTIME_HW):\n\t\tif params in available_params(network):\n\t\t\tself.net = network\n\t\t\tself.params = params\n\t\t\tself.runtime = runtime\n\t\t\tself.usecPerImage = 0.0\n\t\t\tself.bnn = PynqBNN(runtime, network)\n\t\t\tself.bnn.load_parameters(os.path.join(params, network))\n\t\t\tself.classes = self.bnn.classes\n\t\telse:\n\t\t\tprint(\"ERROR: parameters are not availlable for {0}\".format(network))\n\n\t# classify single mnist formatted image, output is highest ranked class\n\tdef classify_mnist(self, mnist_format_file):\n\t\tresult = self.bnn.inference(mnist_format_file)\n\t\tself.usecPerImage = self.bnn.usecPerImage\n\t\treturn result\n\n\t# classify multiple mnist formatted image, output is vector of inferred classes\n\tdef classify_mnists(self, mnist_format_file):\n\t\tresult = self.bnn.inference_multiple(mnist_format_file)\n\t\tself.usecPerImage = self.bnn.usecPerImage\n\t\treturn result\n\n\tdef class_name(self, index):\n\t\treturn self.bnn.classes[index]\n" ]
[ [ "numpy.identity", "numpy.array", "numpy.frombuffer" ] ]
siravan/fib_tf
[ "7505a494875d880a6d49d480cb82e1fa55677f2c" ]
[ "fenton.py" ]
[ "#!/usr/bin/env python\n\"\"\"\n A TensorFlow-based 2D Cardiac Electrophysiology Modeler\n\n Copyright 2017-2018 Shahriar Iravanian ([email protected])\n\n Permission is hereby granted, free of charge, to any person obtaining a copy\n of this software and associated documentation files (the \"Software\"), to\n deal in the Software without restriction, including without limitation the\n rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n sell copies of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in\n all copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n IN THE SOFTWARE.\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n# from screen import Screen\nfrom ionic import IonicModel\n\nclass Fenton4v(IonicModel):\n \"\"\"\n The Cherry-Ehrlich-Nattel-Fenton (4v) canine left-atrial model\n\n Cherry EM, Ehrlich JR, Nattel S, Fenton FH. Pulmonary vein reentry--\n properties and size matter: insights from a computational analysis.\n Heart Rhythm. 2007 Dec;4(12):1553-62.\n \"\"\"\n\n def __init__(self, props):\n IonicModel.__init__(self, props)\n self.min_v = 0.0\n self.max_v = 1.0\n self.depol = 0.0\n\n def differentiate(self, U, V, W, S):\n \"\"\" the state differentiation for the 4v model \"\"\"\n # constants for the Fenton 4v left atrial action potential model\n tau_vp = 3.33\n tau_vn1 = 19.2\n tau_vn = tau_vn1\n tau_wp = 160.0\n tau_wn1 = 75.0\n tau_wn2 = 75.0\n tau_d = 0.065\n tau_si = 31.8364\n tau_so = tau_si\n tau_0 = 39.0\n tau_a = 0.009\n u_c = 0.23\n u_w = 0.146\n u_0 = 0.0\n u_m = 1.0\n u_csi = 0.8\n u_so = 0.3\n r_sp = 0.02\n r_sn = 1.2\n k_ = 3.0\n a_so = 0.115\n b_so = 0.84\n c_so = 0.02\n\n def H(x):\n \"\"\" the step function \"\"\"\n return (1 + tf.sign(x)) * 0.5\n\n def G(x):\n \"\"\" the step function \"\"\"\n return (1 - tf.sign(x)) * 0.5\n\n I_fi = -V * H(U - u_c) * (U - u_c) * (u_m - U) / tau_d\n I_si = -W * S / tau_si\n I_so = (0.5 * (a_so - tau_a) * (1 + tf.tanh((U - b_so) / c_so)) +\n (U - u_0) * G(U - u_so) / tau_so + H(U - u_so) * tau_a)\n\n dU = -(I_fi + I_si + I_so)\n dV = tf.where(U > u_c, -V / tau_vp, (1 - V) / tau_vn)\n dW = tf.where(U > u_c, -W / tau_wp, tf.where(U > u_w, (1 - W) / tau_wn2, (1 - W) / tau_wn1))\n r_s = (r_sp - r_sn) * H(U - u_c) + r_sn\n dS = r_s * (0.5 * (1 + tf.tanh((U - u_csi) * k_)) - S)\n\n return dU, dV, dW, dS\n\n\n def solve(self, state):\n \"\"\" Explicit Euler ODE solver \"\"\"\n U, V, W, S = state\n U0 = self.enforce_boundary(U)\n\n with self.jit_scope():\n dU, dV, dW, dS = self.differentiate(U, V, W, S)\n\n U1 = U0 + self.dt * dU + self.diff * self.dt * self.laplace(U0)\n V1 = V + self.dt * dV\n W1 = W + self.dt * dW\n S1 = S + self.dt * dS\n\n return (U1, V1, W1, S1)\n\n def define(self, s1=True):\n \"\"\"\n Create a tensorflow graph to run the Fenton 4v model\n \"\"\"\n super().define()\n # the initial values of the state variables\n u_init = np.zeros([self.height, self.width], dtype=np.float32)\n v_init = np.ones([self.height, self.width], dtype=np.float32)\n w_init = np.ones([self.height, self.width], dtype=np.float32)\n s_init = np.zeros([self.height, self.width], dtype=np.float32)\n\n # S1 stimulation: vertical along the left side\n if s1:\n u_init[:,1] = 1.0\n\n # define the graph...\n with tf.device('/device:GPU:0'):\n # Create variables for simulation state\n U = tf.Variable(u_init, name='U')\n V = tf.Variable(v_init, name='V')\n W = tf.Variable(w_init, name='W')\n S = tf.Variable(s_init, name='S')\n\n # Graph Unrolling\n states = [(U, V, W, S)]\n for i in range(10):\n states.append(self.solve(states[-1]))\n U1, V1, W1, S1 = states[-1]\n self.dt_per_step = 10\n\n self._ode_op = tf.group(\n U.assign(U1),\n V.assign(V1),\n W.assign(W1),\n S.assign(S1)\n )\n\n self._U = U\n\n def pot(self):\n return self._U\n\n def image(self):\n return self._U.eval()\n\nif __name__ == '__main__':\n config = {\n 'width': 512, # screen width in pixels\n 'height': 512, # screen height in pixels\n 'dt': 0.1, # integration time step in ms\n 'dt_per_plot' : 10, # screen refresh interval in dt unit\n 'diff': 1.5, # diffusion coefficient\n 'duration': 1000, # simulation duration in ms\n 'timeline': False, # flag to save a timeline (profiler)\n 'timeline_name': 'timeline_4v.json',\n 'save_graph': True # flag to save the dataflow graph\n }\n model = Fenton4v(config)\n\n model.add_hole_to_phase_field(256, 256, 30)\n model.define()\n model.add_pace_op('s2', 'luq', 1.0)\n # note: change the following line to im = None to run without a screen\n #im = Screen(model.height, model.width, 'Fenton 4v Model')\n im = None\n\n s2 = model.millisecond_to_step(210) # 210 ms\n ds = model.millisecond_to_step(10)\n n = int(model.duration / 10.0)\n cube = np.zeros([n, model.height, model.width], dtype=np.float32)\n\n for i in model.run(im):\n if i == s2:\n model.fire_op('s2')\n if i % ds == 0:\n cube[i//ds,:,:] = model.image() * model.phase\n\n np.save('cube', cube)\n" ]
[ [ "numpy.zeros", "tensorflow.where", "numpy.ones", "tensorflow.Variable", "numpy.save", "tensorflow.sign", "tensorflow.device", "tensorflow.tanh" ] ]
ProfesseurIssou/Easy-QLearning
[ "0c67ab96230776988b4193314f5cb039e93afa07" ]
[ "Easy-QLearning 1.0.0/src/EQL.py" ]
[ "import random\nimport numpy as np\n\nclass QLearning:\n def __init__(self,nbAction:int,nbState:int,gamma:float=0.9,learningRate:float=0.1):\n \"\"\"\n nbParam : Number of action in state\n gamma : Reward power [0;1] (0.1 long path priority, 0.9 short path priority)\n learningRate : Learning power [0;1]\n nbState : The number of states\n \"\"\"\n #Number of action\n self.nbAction = nbAction\n #Qtable\n self.QTable = []\n for x in range(nbState):\n self.QTable.append([])\n for y in range(nbAction):\n self.QTable[x].append(0.0)\n #gamma\n self.gamma = gamma\n #Learning Rate\n self.learningRate = learningRate\n #Old action\n self.oldAction = -1\n #New action\n self.newAction = -1\n def takeAction(self,state:int,epsilon:int):\n \"\"\"\n state : Current State\n epsilone : exploration value [0;1]\n Return action\n \"\"\"\n #Epsilon greedy\n if random.uniform(0,1) < epsilon: #Exploration\n #Get random action\n action = random.randint(0,self.nbAction-1)\n else: #Greedy action\n #Get the action with the highest Value Function in our state\n action = np.argmax(self.QTable[state])\n #Change the actions order\n self.oldAction = self.newAction\n self.newAction = action\n return action\n def updateQFunction(self,currentState:str,oldState:str,reward:int):\n \"\"\"\n \"\"\"\n #On prend la meilleur option pour le prochain etat\n self.takeAction(currentState,0.0)\n #On prend la difference entre l'etat+1 et l'etat de base\n a = self.QTable[currentState][self.newAction] - self.QTable[oldState][self.oldAction]\n #on le multiplie au gamma\n a = self.gamma*a\n #on additionne le reward\n a = reward + a\n #on le multiplie au learning rate\n a = self.learningRate*a\n #on ajoute la difference\n self.QTable[oldState][self.oldAction] += a\n" ]
[ [ "numpy.argmax" ] ]
sarang-IITKgp/plot-shapes
[ "33aff54515eabd55afe42bf0091395dc3e6e6829" ]
[ "src/plotshapes/conic_sections.py" ]
[ "import numpy as np\nfrom . import transform\n\nclass circle:\n\t\"\"\"This class defines an object of type circle. \n\tAttributes: \n\t\tcenter: (0,0) by default.\n\t\tradius: 1 by default.\n\t\t\"\"\"\n\t\n\tdef __init__(self, radius=1, center=(0,0),pts=100,text_tag='Circle'):\n\t\tself.text_tag = text_tag\n\t\tself.r = radius\n\t\tself.xc , self.yc = center\n\t\tself.perimeter = 2*np.pi*self.r\n\t\tself.area = np.pi*self.r**2\n\t\tself.theta = np.linspace(0,2*np.pi,pts)\n\t\tself.x = self.r*np.cos(self.theta) + self.xc\n\t\tself.y = self.r*np.sin(self.theta) + self.yc\n\t\t\n\t\treturn\n\t\t\n\t\t\n\tdef plot(self,ax_f,linewidth=2,text_tag=None):\n\t\tif text_tag is None:\n\t\t\tax_f.plot(self.x,self.y,linewidth=2,label=self.text_tag)\n\t\telse:\n\t\t\tax_f.plot(self.x,self.y,linewidth=2,label=text_tag)\n\t\t\t\n\n\nclass ellipse:\n\t\"\"\"This class defines an object of type ellipse.\n\tAttributes: \n\t\tcenter: (0,0) by default.\n\t\t(major_axis,minor_axis): (1,2) by default. \n\t\t\"\"\"\n\t\n\tdef __init__(self, maj_mi_tuple = (1,2), center=(0,0),pts=100,text_tag='Ellipse'):\n\t\t\n\t\tself.text_tag = text_tag\n\t\tself.major_axis, self.minor_axis = maj_mi_tuple\n\t\tself.xc , self.yc = center\n\t\tself.perimeter = np.pi*(self.major_axis + self.minor_axis)\n\t\t\n\t\t#self.area = np.pi*self.major_axis*self.minor_axis\n\t\tself.theta = np.linspace(0,2*np.pi,pts)\n\t\tself.x = self.major_axis*np.cos(self.theta) + self.xc\n\t\tself.y = self.minor_axis*np.sin(self.theta) + self.yc\n\t\t\n\tdef rotate(self,theta):\n\t\t#print('rotate by theta', theta)\n\t\tx_rot, y_rot = transform.rotate(self.x-self.xc,self.y-self.yc,theta)\n\t\t\n\t\tself.x = x_rot + self.xc\n\t\tself.y = y_rot + self.yc\n\t\t\n\t\t\n\tdef rotate(self,theta):\n\t\tx_rot, y_rot = transform.rotate(self.x-self.xc,self.y-self.yc,theta)\n\t\tself.x = x_rot + self.xc\n\t\tself.y = y_rot + self.yc\n\t\t\n\tdef rotate_about_center(self,theta):\n\t\tx_rot, y_rot = transform.rotate(self.x,self.y,theta)\n\t\tself.xc, self.yc = transform.rotate(self.xc,self.yc,theta)\n\t\tself.x = x_rot\n\t\tself.y = y_rot\n\t\t\n\tdef plot(self,ax_f,linewidth=2,text_tag=None):\n\t\tif text_tag is None:\n\t\t\tax_f.plot(self.x,self.y,linewidth=2,label=self.text_tag)\n\t\telse:\n\t\t\tax_f.plot(self.x,self.y,linewidth=2,label=text_tag)\n\t\t\t\n\n\t\t\n\t\t\n\n\n\n" ]
[ [ "numpy.linspace", "numpy.sin", "numpy.cos" ] ]
AFansGH/pysteps
[ "ee5cd10ed9058808f934cb1992913055fbcbb3d2" ]
[ "pysteps/tests/test_plt_precipfields.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport pytest\n\nfrom pysteps.visualization import plot_precip_field\nfrom pysteps.utils import conversion\nfrom pysteps.postprocessing import ensemblestats\nfrom pysteps.tests.helpers import get_precipitation_fields\nimport matplotlib.pyplot as pl\n\nplt_arg_names = (\n \"source\",\n \"type\",\n \"bbox\",\n \"colorscale\",\n \"probthr\",\n \"title\",\n \"colorbar\",\n \"axis\",\n)\n\nplt_arg_values = [\n (\"mch\", \"intensity\", None, \"pysteps\", None, None, False, \"off\"),\n (\"mch\", \"depth\", None, \"pysteps\", None, \"Title\", True, \"on\"),\n (\"mch\", \"prob\", None, \"pysteps\", 0.1, None, True, \"on\"),\n (\"mch\", \"intensity\", None, \"STEPS-BE\", None, None, True, \"on\"),\n (\"mch\", \"intensity\", None, \"BOM-RF3\", None, None, True, \"on\"),\n (\"bom\", \"intensity\", None, \"pysteps\", None, None, True, \"on\"),\n (\"fmi\", \"intensity\", None, \"pysteps\", None, None, True, \"on\"),\n (\"knmi\", \"intensity\", None, \"pysteps\", None, None, True, \"on\"),\n (\n \"knmi\",\n \"intensity\",\n [2e2, -4.1e3, 5e2, -3.8e3],\n \"pysteps\",\n None,\n None,\n True,\n \"on\",\n ),\n (\"opera\", \"intensity\", None, \"pysteps\", None, None, True, \"on\"),\n (\"saf\", \"intensity\", None, \"pysteps\", None, None, True, \"on\"),\n]\n\n\[email protected](plt_arg_names, plt_arg_values)\ndef test_visualization_plot_precip_field(\n source, type, bbox, colorscale, probthr, title, colorbar, axis,\n):\n\n if type == \"intensity\":\n\n field, metadata = get_precipitation_fields(0, 0, True, True, None, source)\n field = field.squeeze()\n field, metadata = conversion.to_rainrate(field, metadata)\n\n elif type == \"depth\":\n\n field, metadata = get_precipitation_fields(0, 0, True, True, None, source)\n field = field.squeeze()\n field, metadata = conversion.to_raindepth(field, metadata)\n\n elif type == \"prob\":\n\n field, metadata = get_precipitation_fields(0, 10, True, True, None, source)\n field, metadata = conversion.to_rainrate(field, metadata)\n field = ensemblestats.excprob(field, probthr)\n\n ax = plot_precip_field(\n field,\n type=type,\n bbox=bbox,\n geodata=metadata,\n colorscale=colorscale,\n probthr=probthr,\n units=metadata[\"unit\"],\n title=title,\n colorbar=colorbar,\n axis=axis,\n )\n\n\nif __name__ == \"__main__\":\n\n for i, args in enumerate(plt_arg_values):\n test_visualization_plot_precip_field(*args)\n pl.show()\n" ]
[ [ "matplotlib.pyplot.show" ] ]
LJOVO/TranSalNet
[ "a2aba83e3b8f54c47b712511bf4f515f236326ed" ]
[ "TranSalNet_Res.py" ]
[ "import os\nimport torch\nimport numpy as np\nimport pandas as pd\nfrom torch.utils.data import Dataset, DataLoader\nfrom skimage import io, transform\nfrom PIL import Image\nimport torch.nn as nn\nfrom torchvision import transforms, utils, models\nimport torch.nn.functional as F\nimport utils.resnet as resnet\n\nfrom utils.TransformerEncoder import Encoder\n\n\n\ncfg1 = {\n\"hidden_size\" : 768,\n\"mlp_dim\" : 768*4,\n\"num_heads\" : 12,\n\"num_layers\" : 2,\n\"attention_dropout_rate\" : 0,\n\"dropout_rate\" : 0.0,\n}\n\ncfg2 = {\n\"hidden_size\" : 768,\n\"mlp_dim\" : 768*4,\n\"num_heads\" : 12,\n\"num_layers\" : 2,\n\"attention_dropout_rate\" : 0,\n\"dropout_rate\" : 0.0,\n}\n\ncfg3 = {\n\"hidden_size\" : 512,\n\"mlp_dim\" : 512*4,\n\"num_heads\" : 8,\n\"num_layers\" : 2,\n\"attention_dropout_rate\" : 0,\n\"dropout_rate\" : 0.0,\n}\n\n\nclass TranSalNet(nn.Module):\n\n def __init__(self):\n super(TranSalNet, self).__init__()\n self.encoder = _Encoder()\n self.decoder = _Decoder()\n\n def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n return x\n\n\nclass _Encoder(nn.Module):\n def __init__(self):\n super(_Encoder, self).__init__()\n base_model = resnet.resnet50(pretrained=True)\n base_layers = list(base_model.children())[:8]\n self.encoder = nn.ModuleList(base_layers).eval()\n\n def forward(self, x):\n outputs = []\n for ii,layer in enumerate(self.encoder):\n x = layer(x)\n if ii in {5,6,7}:\n outputs.append(x)\n return outputs\n\n\nclass _Decoder(nn.Module):\n\n def __init__(self):\n super(_Decoder, self).__init__()\n self.conv1 = nn.Conv2d(768, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.conv2 = nn.Conv2d(768, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.conv3 = nn.Conv2d(512, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.conv4 = nn.Conv2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.conv5 = nn.Conv2d(128, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.conv6 = nn.Conv2d(64, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n self.conv7 = nn.Conv2d(32, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n\n self.batchnorm1 = nn.BatchNorm2d(768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.batchnorm2 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.batchnorm3 = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.batchnorm4 = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.batchnorm5 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n self.batchnorm6 = nn.BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n\n self.TransEncoder1 = TransEncoder(in_channels=2048, spatial_size=9*12, cfg=cfg1)\n self.TransEncoder2 = TransEncoder(in_channels=1024, spatial_size=18*24, cfg=cfg2)\n self.TransEncoder3 = TransEncoder(in_channels=512, spatial_size=36*48, cfg=cfg3)\n\n self.add = torch.add\n self.relu = nn.ReLU(True)\n self.upsample = nn.Upsample(scale_factor=2, mode='nearest')\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n x3, x4, x5 = x\n\n x5 = self.TransEncoder1(x5)\n x5 = self.conv1(x5)\n x5 = self.batchnorm1(x5)\n x5 = self.relu(x5)\n x5 = self.upsample(x5)\n\n x4_a = self.TransEncoder2(x4)\n x4 = x5 * x4_a\n x4 = self.relu(x4)\n x4 = self.conv2(x4)\n x4 = self.batchnorm2(x4)\n x4 = self.relu(x4)\n x4 = self.upsample(x4)\n\n x3_a = self.TransEncoder3(x3)\n x3 = x4 * x3_a\n x3 = self.relu(x3)\n x3 = self.conv3(x3)\n x3 = self.batchnorm3(x3)\n x3 = self.relu(x3)\n x3 = self.upsample(x3)\n\n x2 = self.conv4(x3)\n x2 = self.batchnorm4(x2)\n x2 = self.relu(x2)\n x2 = self.upsample(x2)\n x2 = self.conv5(x2)\n x2 = self.batchnorm5(x2)\n x2 = self.relu(x2)\n\n x1 = self.upsample(x2)\n x1 = self.conv6(x1)\n x1 = self.batchnorm6(x1)\n x1 = self.relu(x1)\n x1 = self.conv7(x1)\n x = self.sigmoid(x1)\n\n return x\n\n\nclass TransEncoder(nn.Module):\n\n def __init__(self, in_channels, spatial_size, cfg):\n super(TransEncoder, self).__init__()\n\n self.patch_embeddings = nn.Conv2d(in_channels=in_channels,\n out_channels=cfg['hidden_size'],\n kernel_size=1,\n stride=1)\n self.position_embeddings = nn.Parameter(torch.zeros(1, spatial_size, cfg['hidden_size']))\n\n self.transformer_encoder = Encoder(cfg)\n\n def forward(self, x):\n a, b = x.shape[2], x.shape[3]\n x = self.patch_embeddings(x)\n x = x.flatten(2)\n x = x.transpose(-1, -2)\n\n embeddings = x + self.position_embeddings\n x = self.transformer_encoder(embeddings)\n B, n_patch, hidden = x.shape\n x = x.permute(0, 2, 1)\n x = x.contiguous().view(B, hidden, a, b)\n\n return x\n\n" ]
[ [ "torch.zeros", "torch.nn.ModuleList", "torch.nn.Sigmoid", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.Upsample", "torch.nn.Conv2d" ] ]
Animadversio/Visual_Neuro_InSilico_Exp
[ "39b1e65e5613b064361c09c7d3f88496f3a7efd2" ]
[ "Hessian/StyleGAN_hess_spectrum.py" ]
[ "#%%\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nfrom tqdm import tqdm\nfrom time import time\nimport os\nfrom os.path import join\nimport sys\nimport lpips\nfrom Hessian.GAN_hessian_compute import hessian_compute\nfrom torchvision.transforms import ToPILImage\nfrom torchvision.utils import make_grid\nfrom Hessian.hessian_analysis_tools import average_H, scan_hess_npz\n\nuse_gpu = True if torch.cuda.is_available() else False\nImDist = lpips.LPIPS(net='squeeze').cuda()\n#%%\n\"\"\"Torch Hub version, really heavy and cumbersum\"\"\"\nmodel = torch.hub.load('ndahlquist/pytorch-hub-stylegan:0.0.1', 'style_gan', pretrained=True)\nclass StyleGAN_wrapper(): # nn.Module\n def __init__(self, StyleGAN, ):\n self.StyleGAN = StyleGAN\n\n def visualize(self, code, scale=1):\n imgs = self.StyleGAN.forward(code,) # Matlab version default to 0.7\n return torch.clamp((imgs + 1.0) / 2.0, 0, 1) * scale\nG = StyleGAN_wrapper(model.cuda())\n#%%\nsavedir = r\"E:\\OneDrive - Washington University in St. Louis\\Hessian_summary\\StyleGAN_hr\"\n#%%\ndata = np.load(join(savedir, \"Hessian_EPS_BP.npz\"))\nH_BP = data[\"H_BP\"]\nfeat = torch.tensor(data['feat']).detach().cuda()\n#%%\n# noise = torch.randn(1, 512)\n# feat = noise.detach().clone().cuda()\n# G.StyleGAN.cuda()\nH_col = []\nfor EPS in [1E-6, 1E-5, 1E-4, 3E-4, 1E-3, 3E-3, 1E-2, 3E-2, 1E-1, ]:\n T0 = time()\n eva_FI, evc_FI, H_FI = hessian_compute(G, feat, ImDist, hessian_method=\"ForwardIter\", EPS=EPS,\n preprocess=lambda img: F.interpolate(img, (256, 256), mode='bilinear', align_corners=True))\n print(\"%.2f sec\" % (time() - T0)) # 260.5 sec\n print(\"EPS %.1e Correlation of Flattened Hessian matrix BP vs ForwardIter %.3f\" % (\n EPS, np.corrcoef(H_BP.flatten(), H_FI.flatten())[0, 1]))\n H_col.append((eva_FI, evc_FI, H_FI))\n\nnp.savez(join(savedir, \"Hessian_EPS_accuracy.npz\"), H_col=H_col, feat=feat.detach().cpu().numpy())\nprint(\"Save Completed. \")\n#%%\nnp.savez(join(savedir, \"Hessian_EPS_BP.npz\"), eva_BP=eva_BP, evc_BP=evc_BP, H_BP=H_BP, feat=feat.detach().cpu().numpy())\n#%%\nG.StyleGAN.to(\"cpu\")\nfeat.cpu()\nT0 = time()\neva_BI, evc_BI, H_BI = hessian_compute(G, feat, ImDist, hessian_method=\"BackwardIter\", preprocess=lambda img: F.interpolate(img, (256, 256), mode='bilinear', align_corners=True), device=\"cpu\")\nprint(\"%.2f sec\" % (time() - T0)) # this will exceed gpu memory\nnp.savez(join(savedir, \"Hessian_EPS_BI.npz\"), eva_BI=eva_BI, evc_BI=evc_BI, H_BI=H_BI, feat=feat.detach().cpu().numpy())\n# print(\"Correlation of Flattened Hessian matrix BP vs BackwardIter %.3f\" % np.corrcoef(H_BP.flatten(), H_BI.flatten())[0, 1])\n# print(\"Correlation of Flattened Hessian matrix BP vs ForwardIter %.3f\" %\n# np.corrcoef(H_BP.flatten(), H_FI.flatten())[0, 1])\n# print(\"Correlation of Flattened Hessian matrix ForwardIter vs BackwardIter %.3f\"%\n# np.corrcoef(H_FI.flatten(), H_BI.flatten())[0, 1])\n#%% Load the Hessian data and compute the correlation value\ndata_BP = np.load(join(savedir, \"Hessian_EPS_BP.npz\"))\ndata_FI = np.load(join(savedir, \"Hessian_EPS_accuracy.npz\"), allow_pickle=True)\n\n# correlation with or without taking absolute of the eigenvalues\nH_BP, evc_BP, eva_BP = data_BP[\"H_BP\"], data_BP[\"evc_BP\"], data_BP[\"eva_BP\"]\nEPS_list = [1E-6, 1E-5, 1E-4, 3E-4, 1E-3, 3E-3, 1E-2, 3E-2, 1E-1, ]\nfor EPSi in range(data_FI['H_col'].shape[0]):\n EPS = EPS_list[EPSi]\n eva_FI, evc_FI, H_FI = data_FI['H_col'][EPSi, :]\n print(\"EPS %.1e Correlation of Flattened Hessian matrix BP vs ForwardIter %.3f\" % (\n EPS, np.corrcoef(H_BP.flatten(), H_FI.flatten())[0, 1]))\n H_PSD = [email protected](np.abs(eva_FI)) @evc_FI.T\n print(\"EPS %.1e Correlation of Flattened Hessian matrix BP vs ForwardIter (AbsHess) %.3f\" % (\n EPS, np.corrcoef(H_BP.flatten(), H_PSD.flatten())[0, 1]))\n # print(\"EPS %.1e Correlation of Flattened Hessian matrix BP vs ForwardIter %.3f\" % (\n # EPS, np.corrcoef(H_BP.flatten(), H_FI.flatten())[0, 1]))\n#%%%\nfrom Hessian.hessian_analysis_tools import compute_hess_corr, plot_consistentcy_mat, plot_consistency_example\nimport matplotlib.pylab as plt\nimport matplotlib\nmatplotlib.rcParams['pdf.fonttype'] = 42\n#%%\ndef plot_spectra(eigval_col, savename=\"spectrum_onetrial.jpg\", figdir=savedir, fig=None, label=\"BP\"):\n \"\"\"A local function to compute these figures for different subspaces. \"\"\"\n eigmean = eigval_col.mean(axis=0)\n eiglim = np.percentile(eigval_col, [5, 95], axis=0)\n sortidx = np.argsort(-np.abs(eigmean))\n eigmean = np.abs(eigmean[sortidx])\n eiglim = eiglim[:, sortidx]\n eigN = len(eigmean)\n if fig is None:\n fig, axs = plt.subplots(1, 2, figsize=[10, 5])\n else:\n # plt.figure(fig.number)\n plt.figure(num=fig.number)\n axs = fig.axes\n plt.sca(axs[0])\n plt.plot(range(eigN), eigmean, alpha=0.6)\n plt.fill_between(range(eigN), eiglim[0, :], eiglim[1, :], alpha=0.3, label=label)\n plt.ylabel(\"eigenvalue\")\n plt.xlabel(\"eig id\")\n plt.legend()\n plt.sca(axs[1])\n plt.plot(range(eigN), np.log10(eigmean), alpha=0.6)\n plt.fill_between(range(eigN), np.log10(eiglim[0, :]), np.log10(eiglim[1, :]), alpha=0.3, label=label)\n plt.ylabel(\"eigenvalue(log)\")\n plt.xlabel(\"eig id\")\n plt.legend()\n st = plt.suptitle(\"Hessian Spectrum of StyleGAN\\n (error bar for [5,95] percentile among all samples)\")\n plt.savefig(join(figdir, savename), bbox_extra_artists=[st]) # this is working.\n # fig.show()\n return fig\n\n\nfig = plot_spectra(data_BP[\"eva_BP\"][np.newaxis, :], label=\"BP\", savename=\"spectrum_onetrial.jpg\")\nfig = plot_spectra(data_FI[\"H_col\"][4, 0][np.newaxis, :], savename=\"spectrum_method_cmp.jpg\", label=\"ForwardIter 1E-3\", fig=fig)\nfig = plot_spectra(data_FI[\"H_col\"][5, 0][np.newaxis, :], savename=\"spectrum_method_cmp.jpg\", label=\"ForwardIter 3E-3\", fig=fig)\nfig = plot_spectra(data_FI[\"H_col\"][6, 0][np.newaxis, :], savename=\"spectrum_method_cmp.jpg\", label=\"ForwardIter 1E-2\", fig=fig)\nplt.show()\n#%%\n\"\"\"\nThis is the smaller explicit version of StyleGAN. Very easy to work with\n\"\"\"\n#%%\nsys.path.append(\"E:\\Github_Projects\\style-based-gan-pytorch\")\nsys.path.append(\"D:\\Github\\style-based-gan-pytorch\")\nfrom model import StyledGenerator\nfrom generate import get_mean_style\nimport math\n#%%\ngenerator = StyledGenerator(512).to(\"cuda\")\n# generator.load_state_dict(torch.load(r\"E:\\Github_Projects\\style-based-gan-pytorch\\checkpoint\\stylegan-256px-new.model\")['g_running'])\ngenerator.load_state_dict(torch.load(r\"D:\\Github\\style-based-gan-pytorch\\checkpoint\\stylegan-256px-new.model\")[\n 'g_running'])\ngenerator.eval()\nfor param in generator.parameters():\n param.requires_grad_(False)\nmean_style = get_mean_style(generator, \"cuda\")\nstep = int(math.log(256, 2)) - 2\n#%%\nfeat = torch.randn(1, 512, requires_grad=False).to(\"cuda\")\nimage = generator(\n feat,\n step=step,\n alpha=1,\n mean_style=mean_style,\n style_weight=0.7,\n )\n#%%\nclass StyleGAN_wrapper(): # nn.Module\n def __init__(self, StyleGAN, ):\n self.StyleGAN = StyleGAN\n\n def visualize(self, code, scale=1, step=step, mean_style=mean_style):\n imgs = self.StyleGAN(\n code,\n step=step,\n alpha=1,\n mean_style=mean_style,\n style_weight=0.7,\n ) # Matlab version default to 0.7\n return torch.clamp((imgs + 1.0) / 2.0, 0, 1) * scale\nG = StyleGAN_wrapper(generator)\n#%%\nfrom Hessian.GAN_hessian_compute import hessian_compute\n\n#%%\nfor triali in range(1, 15):\n feat = torch.randn(1, 512,).to(\"cuda\")\n T0 = time()\n eva_BP, evc_BP, H_BP = hessian_compute(G, feat, ImDist, hessian_method=\"BP\")\n print(\"%.2f sec\" % (time() - T0)) # 120 sec\n feat = feat.detach().clone()\n T0 = time()\n eva_BI, evc_BI, H_BI = hessian_compute(G, feat, ImDist, hessian_method=\"BackwardIter\")\n print(\"%.2f sec\" % (time() - T0)) # 120 sec\n T0 = time()\n eva_FI, evc_FI, H_FI = hessian_compute(G, feat, ImDist, hessian_method=\"ForwardIter\", EPS=1E-3)\n print(\"%.2f sec\" % (time() - T0)) # 64 sec\n print(\"Correlation of Flattened Hessian matrix BP vs BackwardIter %.3f\" % np.corrcoef(H_BP.flatten(), H_BI.flatten())[0, 1])\n print(\"Correlation of Flattened Hessian matrix BP vs ForwardIter %.3f\" %\n np.corrcoef(H_BP.flatten(), H_FI.flatten())[0, 1])\n print(\"Correlation of Flattened Hessian matrix ForwardIter vs BackwardIter %.3f\"%\n np.corrcoef(H_FI.flatten(), H_BI.flatten())[0, 1])\n H_col = []\n for EPS in [1E-6, 1E-5, 1E-4, 3E-4, 1E-3, 3E-3, 1E-2, 5E-2, 1E-1]:\n T0 = time()\n eva_FI, evc_FI, H_FI = hessian_compute(G, feat, ImDist, hessian_method=\"ForwardIter\", EPS=EPS)\n H_PSD = evc_FI @ np.diag(np.abs(eva_FI)) @ evc_FI.T\n print(\"%.2f sec\" % (time() - T0)) # 325.83 sec\n print(\"EPS %.1e Correlation of Flattened Hessian matrix BP vs ForwardIter %.3f\" % (EPS, np.corrcoef(H_BP.flatten(), H_FI.flatten())[0, 1]))\n print(\"EPS %.1e Correlation of Flattened Hessian matrix BP vs ForwardIter (AbsHess) %.3f\" % (\n EPS, np.corrcoef(H_BP.flatten(), H_PSD.flatten())[0, 1]))\n H_col.append((eva_FI, evc_FI, H_FI))\n\n np.savez(join(savedir, \"Hess_accuracy_cmp_%d.npz\" % triali), eva_BI=eva_BI, evc_BI=evc_BI, H_BI=H_BI,\n eva_FI=eva_FI, evc_FI=evc_FI, H_FI=H_FI, H_col=H_col,\n eva_BP=eva_BP, evc_BP=evc_BP, H_BP=H_BP, feat=feat.detach().cpu().numpy())\n print(\"Save finished\")\n#%%\ndatadir = r\"E:\\Cluster_Backup\\StyleGAN\"\nfor triali in tqdm(range(300)):\n feat = torch.randn(1, 512,).to(\"cuda\")\n T0 = time()\n eva_BP, evc_BP, H_BP = hessian_compute(G, feat, ImDist, hessian_method=\"BP\")\n print(\"%.2f sec\" % (time() - T0)) # 120 sec\n np.savez(join(datadir, \"Hessian_rand_%d.npz\" % triali), eva_BP=eva_BP, evc_BP=evc_BP, H_BP=H_BP,\n feat=feat.detach().cpu().numpy())\n#%%\n# eva_col = []\n# evc_col = []\n# for triali in tqdm(range(300)):\n# data = np.load(join(datadir, \"Hessian_rand_%d.npz\" % triali))\n# eva_col.append(data[\"eva_BP\"])\n# evc_col.append(data[\"evc_BP\"])\n# #%%\n# eva_col = np.array(eva_col)\ndatadir = r\"E:\\Cluster_Backup\\StyleGAN\"\nfigdir = r\"E:\\OneDrive - Washington University in St. Louis\\Hessian_summary\\StyleGAN\"\n\nos.makedirs(figdir, exist_ok=True)\neva_col, evc_col, feat_col, meta = scan_hess_npz(datadir, \"Hessian_rand_(\\d*).npz\", featkey=\"feat\")\nfeat_col = np.array(feat_col).squeeze()\nH_avg, eva_avg, evc_avg = average_H(eva_col, evc_col)\nnp.savez(join(figdir, \"H_avg_%s.npz\"%\"StyleGAN\"), H_avg=H_avg, eva_avg=eva_avg, evc_avg=evc_avg, feats=feat_col)\n#%%\nfig = plot_spectra(eva_col, figdir=figdir, titstr=\"StyleGAN\", )\n#%%\ncorr_mat_log, corr_mat_lin = compute_hess_corr(eva_col, evc_col, figdir=figdir, use_cuda=True)\n# without cuda 12:11 mins, with cuda 8:21\n# corr_mat_log, corr_mat_lin = compute_hess_corr(eva_col, evc_col, figdir=figdir, use_cuda=False)\n#%\nfig1, fig2 = plot_consistentcy_mat(corr_mat_log, corr_mat_lin, posN=300, figdir=figdir, titstr=\"StyleGAN\")\n#%\nfig3 = plot_consistency_example(eva_col, evc_col, figdir=figdir, nsamp=5, titstr=\"StyleGAN\",)\nfig3.show()\n#%%\n#%% Accuracy plot\nfigdir = r\"E:\\OneDrive - Washington University in St. Louis\\Hessian_summary\\StyleGAN\"\ndatadir = r\"E:\\Cluster_Data\\StyleGAN\"\nEPS_list = [1E-6, 1E-5, 1E-4, 3E-4, 1E-3, 3E-3, 1E-2, 5E-2, 1E-1]\nraw_corr_tab = []\nPSD_corr_tab = []\nfor triali in range(15):\n print(\"Computation trial %d\"%triali)\n data = np.load(join(savedir, \"Hess_accuracy_cmp_%d.npz\" % triali), allow_pickle=True)\n H_col = data[\"H_col\"]\n eva_BP, evc_BP, H_BP = data[\"eva_BP\"], data[\"evc_BP\"], data[\"H_BP\"]\n corr_vals = []\n PSD_corr_vals = []\n for EPSi, EPS in enumerate(EPS_list):\n eva_FI, evc_FI, H_FI = H_col[EPSi, :]\n H_PSD = evc_FI @ np.diag(np.abs(eva_FI)) @ evc_FI.T\n corr_vals.append(np.corrcoef(H_BP.flatten(), H_FI.flatten())[0, 1])\n PSD_corr_vals.append(np.corrcoef(H_BP.flatten(), H_PSD.flatten())[0, 1])\n print(\"EPS %.1e Correlation of Flattened Hessian matrix BP vs ForwardIter %.3f\" % (\n EPS, corr_vals[-1]))\n print(\"EPS %.1e Correlation of Flattened Hessian matrix BP vs ForwardIter (AbsHess) %.3f\" % (\n EPS, PSD_corr_vals[-1]))\n raw_corr_tab.append(corr_vals)\n PSD_corr_tab.append(PSD_corr_vals)\nraw_corr_tab = np.array(raw_corr_tab)\nPSD_corr_tab = np.array(PSD_corr_tab)\nnp.savez(join(figdir, \"accuracy_stats.npz\"), raw_corr_tab=raw_corr_tab, PSD_corr_tab=PSD_corr_tab,\n EPS_list=EPS_list)\n#%%\nplt.plot(PSD_corr_tab.T)\nplt.xticks(np.arange(len(EPS_list)), labels=EPS_list)\nplt.ylabel(\"Correlation for Vectorized Hessian\")\nplt.xlabel(\"EPS for Forward Diff\")\nplt.title(\"StyleGAN BP vs ForwardIter Pos-Semi-Definite Hessian Correlation\")\nplt.savefig(join(figdir, \"StyleGAN_BP-FI-PSD-HessCorr.png\"))\nplt.show()\n\n#%%\nplt.plot(raw_corr_tab.T)\nplt.xticks(np.arange(len(EPS_list)), labels=EPS_list)\nplt.ylabel(\"Correlation for Vectorized Hessian\")\nplt.xlabel(\"EPS for Forward Diff\")\nplt.title(\"StyleGAN BP vs ForwardIter Raw Hessian Correlation\")\nplt.savefig(join(figdir, \"StyleGAN_BP-FI-raw-HessCorr.png\"))\nplt.show()\n\nmen = raw_corr_tab.mean(axis=0)\nerr = raw_corr_tab.std(axis=0)/np.sqrt(raw_corr_tab.shape[0])\nplt.plot(men, )\nplt.fill_between(range(len(men)), men-err, men+err, alpha=0.3, label=\"raw\")\nmen = PSD_corr_tab.mean(axis=0)\nerr = PSD_corr_tab.std(axis=0)/np.sqrt(PSD_corr_tab.shape[0])\nplt.plot(men, )\nplt.fill_between(range(len(men)), men-err, men+err, alpha=0.3, label=\"PSD\")\nplt.xticks(np.arange(len(EPS_list)), labels=EPS_list)\nplt.legend()\nplt.ylabel(\"Correlation for Vectorized Hessian\")\nplt.xlabel(\"EPS for Forward Diff\")\nplt.title(\"StyleGAN BP vs ForwardIter Hessian Correlation\")\nplt.savefig(join(figdir, \"StyleGAN_BP-FI-HessCorr-cmp.png\"))\nplt.savefig(join(figdir, \"StyleGAN_BP-FI-HessCorr-cmp.pdf\"))\nplt.show()\n#%%\n\"\"\" modern API. Analyze the W space geometry \"\"\"\nfrom Hessian.hessian_analysis_tools import scan_hess_npz, compute_hess_corr, compute_vector_hess_corr, average_H, \\\n plot_consistentcy_mat, plot_consistency_hist, plot_spectra\nfrom GAN_utils import loadStyleGAN, StyleGAN_wrapper\nSGAN = loadStyleGAN()\nSG = StyleGAN_wrapper(SGAN)\nSG.wspace = True\n#%%\nimgs = SG.visualize(SG.mean_style + torch.randn(8, 512).cuda() * 0.15)\nToPILImage()(make_grid(imgs).cpu()).show()\n#%%\nimgs = SG.visualize(SG.mean_style + SG.StyleGAN.style(torch.randn(8, 512).cuda()) * 0.6)\nToPILImage()(make_grid(imgs).cpu()).show()\n\n#%%\nSG.wspace = True\nmean_style = SG.mean_style\ndatadir = r\"E:\\Cluster_Backup\\StyleGAN_wspace\"\nos.makedirs(datadir, exist_ok=True)\nfor triali in tqdm(range(80, 150)):\n feat_z = torch.randn(1, 512).cuda()\n feat = mean_style + 0.7 * SG.StyleGAN.style(feat_z) # torch.randn(1, 512,).to(\"cuda\")\n T0 = time()\n eva_BP, evc_BP, H_BP = hessian_compute(SG, feat, ImDist, hessian_method=\"BP\")\n print(\"%.2f sec\" % (time() - T0)) # 120 sec\n np.savez(join(datadir, \"Hessian_rand_0_7_%03d.npz\" % triali), eva_BP=eva_BP, evc_BP=evc_BP, H_BP=H_BP,\n feat=feat.detach().cpu().numpy(), feat_z=feat_z.detach().cpu().numpy())\n#%%\ndatadir = r\"E:\\Cluster_Backup\\StyleGAN_wspace\"\nfigdir = r\"E:\\OneDrive - Washington University in St. Louis\\Hessian_summary\\StyleGAN_wspace\"\nmodelnm = \"StyleGAN_Wspace\"\neva_col, evc_col, feat_col, meta = scan_hess_npz(datadir, \"Hessian_rand_0_7_(\\d*).npz\", featkey=\"feat\")\n# compute the Mean Hessian and save\nH_avg, eva_avg, evc_avg = average_H(eva_col, evc_col)\nnp.savez(join(figdir, \"H_avg_%s.npz\"%modelnm), H_avg=H_avg, eva_avg=eva_avg, evc_avg=evc_avg, feats=feat_col)\n# compute and plot spectra\nfig0 = plot_spectra(eigval_col=eva_col, savename=\"%s_spectrum\"%modelnm, figdir=figdir)\nfig0 = plot_spectra(eigval_col=eva_col, savename=\"%s_spectrum_med\"%modelnm, figdir=figdir, median=True)\nnp.savez(join(figdir, \"spectra_col_%s.npz\"%modelnm), eigval_col=eva_col, )\n# compute and plot the correlation between hessian at different points\ncorr_mat_log, corr_mat_lin = compute_hess_corr(eva_col, evc_col, figdir=figdir, use_cuda=True,\n savelabel=modelnm)\ncorr_mat_vec = compute_vector_hess_corr(eva_col, evc_col, figdir=figdir, use_cuda=True,\n savelabel=modelnm)\nfig1, fig2 = plot_consistentcy_mat(corr_mat_log, corr_mat_lin, figdir=figdir, titstr=\"%s\"%modelnm,\n savelabel=modelnm)\nfig11, fig22 = plot_consistency_hist(corr_mat_log, corr_mat_lin, figdir=figdir, titstr=\"%s\"%modelnm,\n savelabel=modelnm)\nfig3 = plot_consistency_example(eva_col, evc_col, figdir=figdir, nsamp=5, titstr=\"%s\"%modelnm, savelabel=modelnm)\nfig3 = plot_consistency_example(eva_col, evc_col, figdir=figdir, nsamp=3, titstr=\"%s\"%modelnm, savelabel=modelnm)\n" ]
[ [ "matplotlib.pylab.ylabel", "matplotlib.pylab.show", "torch.cuda.is_available", "torch.load", "torch.hub.load", "torch.tensor", "numpy.sqrt", "matplotlib.pylab.title", "numpy.log10", "matplotlib.pylab.plot", "matplotlib.pylab.suptitle", "numpy.array", "numpy.percentile", "torch.clamp", "torch.nn.functional.interpolate", "matplotlib.pylab.legend", "matplotlib.pylab.sca", "matplotlib.pylab.figure", "matplotlib.pylab.xlabel", "matplotlib.pylab.subplots", "numpy.abs", "torch.randn" ] ]
mtosity/CameraTrapHCMUS
[ "9b4215e505f97bb5240e1dec852595c9fdeac725" ]
[ "yolo/general_json2yolo.py" ]
[ "import json\n\nimport cv2\nimport pandas as pd\nfrom PIL import Image\n\nfrom utils import *\n\n\n# Convert INFOLKS JSON file into YOLO-format labels ----------------------------\ndef convert_infolks_json(name, files, img_path):\n # Create folders\n path = make_dirs()\n\n # Import json\n data = []\n for file in glob.glob(files):\n with open(file) as f:\n jdata = json.load(f)\n jdata['json_file'] = file\n data.append(jdata)\n\n # Write images and shapes\n name = path + os.sep + name\n file_id, file_name, wh, cat = [], [], [], []\n for x in tqdm(data, desc='Files and Shapes'):\n f = glob.glob(img_path + Path(x['json_file']).stem + '.*')[0]\n file_name.append(f)\n wh.append(exif_size(Image.open(f))) # (width, height)\n cat.extend(a['classTitle'].lower() for a in x['output']['objects']) # categories\n\n # filename\n with open(name + '.txt', 'a') as file:\n file.write('%s\\n' % f)\n\n # Write *.names file\n names = sorted(np.unique(cat))\n # names.pop(names.index('Missing product')) # remove\n with open(name + '.names', 'a') as file:\n [file.write('%s\\n' % a) for a in names]\n\n # Write labels file\n for i, x in enumerate(tqdm(data, desc='Annotations')):\n label_name = Path(file_name[i]).stem + '.txt'\n\n with open(path + '/labels/' + label_name, 'a') as file:\n for a in x['output']['objects']:\n # if a['classTitle'] == 'Missing product':\n # continue # skip\n\n category_id = names.index(a['classTitle'].lower())\n\n # The INFOLKS bounding box format is [x-min, y-min, x-max, y-max]\n box = np.array(a['points']['exterior'], dtype=np.float32).ravel()\n box[[0, 2]] /= wh[i][0] # normalize x by width\n box[[1, 3]] /= wh[i][1] # normalize y by height\n box = [box[[0, 2]].mean(), box[[1, 3]].mean(), box[2] - box[0], box[3] - box[1]] # xywh\n if (box[2] > 0.) and (box[3] > 0.): # if w > 0 and h > 0\n file.write('%g %.6f %.6f %.6f %.6f\\n' % (category_id, *box))\n\n # Split data into train, test, and validate files\n split_files(name, file_name)\n write_data_data(name + '.data', nc=len(names))\n print('Done. Output saved to %s' % (os.getcwd() + os.sep + path))\n\n\n# Convert vott JSON file into YOLO-format labels -------------------------------\ndef convert_vott_json(name, files, img_path):\n # Create folders\n path = make_dirs()\n name = path + os.sep + name\n\n # Import json\n data = []\n for file in glob.glob(files):\n with open(file) as f:\n jdata = json.load(f)\n jdata['json_file'] = file\n data.append(jdata)\n\n # Get all categories\n file_name, wh, cat = [], [], []\n for i, x in enumerate(tqdm(data, desc='Files and Shapes')):\n try:\n cat.extend(a['tags'][0] for a in x['regions']) # categories\n except:\n pass\n\n # Write *.names file\n names = sorted(pd.unique(cat))\n with open(name + '.names', 'a') as file:\n [file.write('%s\\n' % a) for a in names]\n\n # Write labels file\n n1, n2 = 0, 0\n missing_images = []\n for i, x in enumerate(tqdm(data, desc='Annotations')):\n\n f = glob.glob(img_path + x['asset']['name'] + '.jpg')\n if len(f):\n f = f[0]\n file_name.append(f)\n wh = exif_size(Image.open(f)) # (width, height)\n\n n1 += 1\n if (len(f) > 0) and (wh[0] > 0) and (wh[1] > 0):\n n2 += 1\n\n # append filename to list\n with open(name + '.txt', 'a') as file:\n file.write('%s\\n' % f)\n\n # write labelsfile\n label_name = Path(f).stem + '.txt'\n with open(path + '/labels/' + label_name, 'a') as file:\n for a in x['regions']:\n category_id = names.index(a['tags'][0])\n\n # The INFOLKS bounding box format is [x-min, y-min, x-max, y-max]\n box = a['boundingBox']\n box = np.array([box['left'], box['top'], box['width'], box['height']]).ravel()\n box[[0, 2]] /= wh[0] # normalize x by width\n box[[1, 3]] /= wh[1] # normalize y by height\n box = [box[0] + box[2] / 2, box[1] + box[3] / 2, box[2], box[3]] # xywh\n\n if (box[2] > 0.) and (box[3] > 0.): # if w > 0 and h > 0\n file.write('%g %.6f %.6f %.6f %.6f\\n' % (category_id, *box))\n else:\n missing_images.append(x['asset']['name'])\n\n print('Attempted %g json imports, found %g images, imported %g annotations successfully' % (i, n1, n2))\n if len(missing_images):\n print('WARNING, missing images:', missing_images)\n\n # Split data into train, test, and validate files\n split_files(name, file_name)\n print('Done. Output saved to %s' % (os.getcwd() + os.sep + path))\n\n\n# Convert ath JSON file into YOLO-format labels --------------------------------\ndef convert_ath_json(json_dir): # dir contains json annotations and images\n # Create folders\n dir = make_dirs() # output directory\n\n jsons = []\n for dirpath, dirnames, filenames in os.walk(json_dir):\n for filename in [f for f in filenames if f.lower().endswith('.json')]:\n jsons.append(os.path.join(dirpath, filename))\n\n # Import json\n n1, n2, n3 = 0, 0, 0\n missing_images, file_name = [], []\n for json_file in sorted(jsons):\n with open(json_file) as f:\n data = json.load(f)\n\n # # Get classes\n # try:\n # classes = list(data['_via_attributes']['region']['class']['options'].values()) # classes\n # except:\n # classes = list(data['_via_attributes']['region']['Class']['options'].values()) # classes\n\n # # Write *.names file\n # names = pd.unique(classes) # preserves sort order\n # with open(dir + 'data.names', 'w') as f:\n # [f.write('%s\\n' % a) for a in names]\n\n # Write labels file\n for i, x in enumerate(tqdm(data['_via_img_metadata'].values(), desc='Processing %s' % json_file)):\n\n image_file = str(Path(json_file).parent / x['filename'])\n f = glob.glob(image_file) # image file\n if len(f):\n f = f[0]\n file_name.append(f)\n wh = exif_size(Image.open(f)) # (width, height)\n\n n1 += 1 # all images\n if len(f) > 0 and wh[0] > 0 and wh[1] > 0:\n label_file = dir + 'labels/' + Path(f).stem + '.txt'\n\n nlabels = 0\n try:\n with open(label_file, 'a') as file: # write labelsfile\n for a in x['regions']:\n # try:\n # category_id = int(a['region_attributes']['class'])\n # except:\n # category_id = int(a['region_attributes']['Class'])\n category_id = 0 # single-class\n\n # bounding box format is [x-min, y-min, x-max, y-max]\n box = a['shape_attributes']\n box = np.array([box['x'], box['y'], box['width'], box['height']],\n dtype=np.float32).ravel()\n box[[0, 2]] /= wh[0] # normalize x by width\n box[[1, 3]] /= wh[1] # normalize y by height\n box = [box[0] + box[2] / 2, box[1] + box[3] / 2, box[2],\n box[3]] # xywh (left-top to center x-y)\n\n if box[2] > 0. and box[3] > 0.: # if w > 0 and h > 0\n file.write('%g %.6f %.6f %.6f %.6f\\n' % (category_id, *box))\n n3 += 1\n nlabels += 1\n\n if nlabels == 0: # remove non-labelled images from dataset\n os.system('rm %s' % label_file)\n # print('no labels for %s' % f)\n continue # next file\n\n # write image\n img_size = 4096 # resize to maximum\n img = cv2.imread(f) # BGR\n assert img is not None, 'Image Not Found ' + f\n r = img_size / max(img.shape) # size ratio\n if r < 1: # downsize if necessary\n h, w, _ = img.shape\n img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA)\n\n ifile = dir + 'images/' + Path(f).name\n if cv2.imwrite(ifile, img): # if success append image to list\n with open(dir + 'data.txt', 'a') as file:\n file.write('%s\\n' % ifile)\n n2 += 1 # correct images\n\n except:\n os.system('rm %s' % label_file)\n print('problem with %s' % f)\n\n else:\n missing_images.append(image_file)\n\n nm = len(missing_images) # number missing\n print('\\nFound %g JSONs with %g labels over %g images. Found %g images, labelled %g images successfully' %\n (len(jsons), n3, n1, n1 - nm, n2))\n if len(missing_images):\n print('WARNING, missing images:', missing_images)\n\n # Write *.names file\n names = ['knife'] # preserves sort order\n with open(dir + 'data.names', 'w') as f:\n [f.write('%s\\n' % a) for a in names]\n\n # Split data into train, test, and validate files\n split_rows_simple(dir + 'data.txt')\n write_data_data(dir + 'data.data', nc=1)\n print('Done. Output saved to %s' % Path(dir).absolute())\n\n\ndef convert_coco_json(json_dir='../coco/annotations/', use_segments=False, cls91to80=False):\n coco80 = coco91_to_coco80_class()\n save_dir = make_dirs() # output directory\n \n print(list(Path(json_dir).resolve().glob('*.json')))\n\n # Import json\n for json_file in sorted(Path(json_dir).resolve().glob('*.json')):\n \n fn = Path(save_dir) / 'labels' / json_file.stem.replace('instances_', '') # folder name\n fn.mkdir()\n with open(json_file) as f:\n data = json.load(f)\n \n print(type(data))\n\n \n if 'annotations' not in data or 'images' not in data:\n continue\n\n # Create image dict\n images = {x['id']: x for x in data['images']}\n\n save_image_dir = Path(save_dir) / 'images' / json_file.stem.replace('instances_', '')\n save_image_dir.mkdir()\n save_image_dir = str(save_image_dir)\n # Write labels file\n for x in tqdm(data['annotations'], desc=f'Annotations {json_file}'):\n if 'iscrowd' in x:\n continue\n \n if 'bbox' not in x:\n continue\n\n img = images[x['image_id']]\n h, w, f = img['height'], img['width'], img['file_name']\n \n if not os.path.isfile('datasets/ena24/images/' + f):\n continue\n\n # The COCO box format is [top left x, top left y, width, height]\n box = np.array(x['bbox'], dtype=np.float64)\n box[:2] += box[2:] / 2 # xy top-left corner to center\n box[[0, 2]] /= w # normalize x\n box[[1, 3]] /= h # normalize y\n\n # Segments\n if use_segments:\n segments = [j for i in x['segmentation'] for j in i] # all segments concatenated\n s = (np.array(segments).reshape(-1, 2) / np.array([w, h])).reshape(-1).tolist()\n\n # Write\n if box[2] > 0 and box[3] > 0: # if w > 0 and h > 0\n cls = coco80[x['category_id']] if cls91to80 else x['category_id'] # class (FIXED)\n line = cls, *(s if use_segments else box) # cls, box or segments\n with open((fn / f).with_suffix('.txt'), 'a') as file:\n file.write(('%g ' * len(line)).rstrip() % line + '\\n')\n \n # Copy image\n shutil.copy('datasets/ena24/images/' + f, save_image_dir + '/' + f)\n\n\nif __name__ == '__main__':\n source = 'COCO'\n\n if source == 'COCO':\n print(source)\n convert_coco_json('./') # directory with *.json\n\n # zip results\n # os.system('zip -r ../coco.zip ../coco')\n" ]
[ [ "pandas.unique" ] ]
jj-chung/caltech-ee148-spring2020-hw01
[ "c260232376a973d7346f0da4d06ae93cf7de1dcf" ]
[ "run_predictions.py" ]
[ "import os\nimport numpy as np\nimport json\nfrom PIL import Image, ImageDraw\n\nimport time\nimport sys\nnp.set_printoptions(threshold=sys.maxsize)\n\n\ndef rgb_to_hsv(r, g, b):\n r0 = r / 255.0\n g0 = g / 255.0\n b0 = b / 255.0\n\n c_max = max(r0, g0, b0)\n c_min = min(r0, g0, b0)\n delta = c_max - c_min\n \n if delta == 0:\n h = 0\n elif c_max == r0:\n h = (60 * ((g0 - b0) / delta + 6)) % 360\n elif c_max == g0:\n h = (60 * ((b0 - r0) / delta + 2)) % 360\n elif c_max == b0:\n h = (60 * ((r0 - g0) / delta + 4)) % 360\n\n if c_max == 0:\n s = 0\n else:\n s = (delta / c_max) * 100\n\n v = c_max * 100\n\n return h, s, v\n\ndef find_red_black(I):\n '''\n Takes a numpy array <I> and returns two lists <red_coords> and <black_coords>,\n and a 2D array of shape np.shape(I) with \n black coordinates having entry 0,\n red coordinates having entry 1, and \n all other colors -1. \n <red_coords> contains all coordinates in I which are approx. red, and \n <black_coords> contains all coordinates in I which are approx. black.\n '''\n # Find the dimensions of the image I and set threshold\n (n_rows, n_cols, n_channels) = np.shape(I)\n new_img = np.zeros((n_rows, n_cols)) \n red_coords = []\n black_coords = []\n\n for row in range(n_rows):\n for col in range(n_cols):\n if row > n_rows / 2:\n new_img[row, col] = -1\n else:\n r, g, b = I[row, col, :]\n h, s, v = rgb_to_hsv(r, g, b)\n\n '''\n # Check if this pixel is red or black\n if r > g * 2.5 and r > b * 2.5:\n red_coords.append([row, col])\n new_img[row, col] = 1\n elif r < 100 and g < 100 and b < 100:\n black_coords.append([row, col])\n new_img[row, col] = 0\n else:\n new_img[row, col] = -1\n '''\n # Check if this pixel is red or black\n if (h < 20 or h > 320) and v > 50 and s > 50:\n red_coords.append([row, col])\n new_img[row, col] = 1\n elif v < 35:\n black_coords.append([row, col])\n new_img[row, col] = 0\n else:\n new_img[row, col] = 0.3\n\n return red_coords, black_coords, new_img\n\ndef to_normalized_vec(matrix):\n vec = matrix.flatten()\n vec_norm = np.linalg.norm(vec)\n if vec_norm != 0:\n return vec/ vec_norm\n else:\n return vec\n\ndef delete_duplicates(bounding_boxes, l_rows, l_cols):\n # Clean up bounding boxes by removing duplicates\n visited = set()\n new_boxes = []\n\n for box in bounding_boxes:\n tl_row, tl_col, br_row, br_col = box\n\n if (tl_row, tl_col) not in visited:\n new_boxes.append(box)\n\n for i in range(tl_row - l_rows, br_row + l_rows):\n for j in range(tl_col - l_cols, br_col + l_cols):\n visited.add((i, j))\n\n return new_boxes\n\ndef detect_red_light_color(I, bounding_boxes):\n '''\n Called for functionality in detect_red_light, using a different algorithm (color-\n based) algorithm.\n '''\n\n r_coords, b_coords, img = find_red_black(I)\n n_rows, n_cols, n_channels = np.shape(I)\n \n '''\n # For visualization purposes only: draw where it sees red/black\n data_path = './data/RedLights2011_Small/'\n\n with Image.open(os.path.join(data_path, name)) as im:\n draw = ImageDraw.Draw(im)\n for r_coord in r_coords:\n draw.point([r_coord[1], r_coord[0]], fill='white')\n\n for b_coord in b_coords:\n draw.point([b_coord[1], b_coord[0]], fill='green')\n\n f_name = './data/boxed_images_color_example/' + name.split('.')[0] + '_red.jpg'\n im.save(f_name, 'JPEG')\n '''\n\n # For each red coordinate, we travel down until we reach black.\n # Taking this dist as the diameter, we draw a bounding box to check if it has \n # high inner product with a traffic light of that size.\n for r_coord in r_coords:\n curr_row, curr_col = r_coord \n\n curr_color = 1\n while curr_row - r_coord[0] < 80:\n # If we've reached black for the first time, change the current color.\n # Otherwise continue to make sure the area below the light is black.\n if img[curr_row][curr_col] == 0:\n # Save this row and diameter\n row = curr_row\n diam = row - r_coord[0]\n curr_color = 0\n break\n\n curr_row += 1\n\n if curr_color != 0:\n break\n\n # Find the radius of the circle, margin around the light, and center of\n # the light.\n radius = diam / 2.0\n margin = round(diam * 0.1)\n center_row = round(margin + radius)\n center_col = center_row\n\n # Determine the expected size of the light.\n l_cols = round(2 * margin + diam)\n l_rows = round(margin + diam + 1.5 * diam)\n light = np.zeros((l_rows, l_cols))\n\n # Determine points which we expect to be red (circular)\n for row in range(l_rows):\n for col in range(l_cols):\n dist = (row - center_row) ** 2 + (col - center_col) ** 2\n if dist < (radius) ** 2 and dist > (radius / 2.0) ** 2:\n light[row, col] = 1\n\n # Draw bounding box\n s_row = max(0, r_coord[0] - margin) \n e_row = min(n_rows, s_row + l_rows)\n s_col = max(0, r_coord[1] - round(radius) - margin) \n e_col = min(n_cols, s_col + l_cols)\n \n # Normalize things \n light_vec = to_normalized_vec(light)\n\n patch = img[s_row:e_row, s_col:e_col]\n patch_vec = to_normalized_vec(patch)\n\n # If the patch approximately matches what we expect for a traffic light,\n # then add the bounding box. \n try:\n prod = np.dot(patch_vec, light_vec)\n\n temp_img = img\n temp_img[r_coord[0], r_coord[1]] = 100\n temp_light = temp_img[s_row:e_row, s_col:e_col + 2]\n \n\n '''\n if 'RL-010' in name:\n print(light)\n print(r_coord)\n print(temp_light)\n print(prod)\n time.sleep(5)\n '''\n \n if prod > 0.5:\n bounding_boxes.append([s_row, s_col, e_row, e_col])\n\n except ValueError:\n pass\n\n # Remove duplicate bounding boxes \n if len(bounding_boxes) > 1:\n return delete_duplicates(bounding_boxes, l_rows, l_cols)\n else:\n return bounding_boxes\n\n\ndef detect_red_light_match(I, bounding_boxes):\n '''\n Called for functionality in detect_red_light, using match filtering algorithm. \n '''\n \n # Use an example traffic light from the first image\n im = Image.open('./data/RedLights2011_Medium/RL-001.jpg')\n ex_light = im.crop((316, 154, 323, 171))\n ex_light = np.asarray(ex_light) - 127.5\n\n # Find the dimensions of the traffic light\n (lt_rows, lt_cols, lt_channels) = np.shape(ex_light)\n box_height = lt_rows\n box_width = lt_cols\n\n # Find the dimensions of the image I and set threshold\n (n_rows, n_cols, n_channels) = np.shape(I)\n threshold = 0.9\n\n lt_vecs = []\n\n # For each channel, convert traffic light into normalized vector\n for i in range(3):\n lt_ch = ex_light[:, :, i]\n lt_vec = lt_ch.flatten()\n lt_norm = np.linalg.norm(lt_vec)\n if lt_norm != 0:\n lt_vec = lt_vec / lt_norm\n\n lt_vecs.append(lt_vec)\n\n\n # Go through all patches of this size \n for i in range(round(n_rows / 2)):\n # Only check the bottom half of the iamge\n for j in range(n_cols - box_width):\n tl_row = i\n tl_col = j\n br_row = tl_row + box_height\n br_col = tl_col + box_width\n ch_inner_prod = [] \n\n # Go through each channel\n for ch in range(3):\n # Get one channel of the image and the same channel of the light\n img_ch = I[:, :, ch]\n lt_vec = lt_vecs[ch]\n\n # Convert this patch to a normalized vector\n patch = img_ch[tl_row:br_row, tl_col:br_col]\n patch_vec = patch.flatten() - 127.5\n patch_norm = np.linalg.norm(patch_vec)\n if patch_norm != 0:\n patch_vec = patch_vec / patch_norm\n\n # Take the inner product of the traffic light with a patch.\n ch_inner_prod.append(np.dot(lt_vec, patch_vec))\n\n # If it's above the threshold add the box to the bounding boxes.\n for k in range(3):\n prod = ch_inner_prod[k]\n if prod < threshold:\n break\n elif k == 2:\n bounding_boxes.append([tl_row, tl_col, br_row, br_col]) \n\n # Remove duplicate bounding boxes \n if len(bounding_boxes) > 1:\n return delete_duplicates(bounding_boxes, lt_rows, lt_cols)\n else:\n return bounding_boxes\n\ndef detect_red_light(I):\n '''\n This function takes a numpy array <I> and returns a list <bounding_boxes>.\n The list <bounding_boxes> should have one element for each red light in the \n image. Each element of <bounding_boxes> should itself be a list, containing \n four integers that specify a bounding box: the row and column index of the \n top left corner and the row and column index of the bottom right corner (in\n that order). See the code below for an example.\n \n Note that PIL loads images in RGB order, so:\n I[:,:,0] is the red channel\n I[:,:,1] is the green channel\n I[:,:,2] is the blue channel\n '''\n \n \n bounding_boxes = [] # This should be a list of lists, each of length 4. See format example below. \n \n '''\n BEGIN YOUR CODE\n '''\n \n bounding_boxes = detect_red_light_match(I, bounding_boxes)\n \n '''\n END YOUR CODE\n '''\n\n for i in range(len(bounding_boxes)):\n assert len(bounding_boxes[i]) == 4\n \n return bounding_boxes\n\n# set the path to the downloaded data: \ndata_path = './data/RedLights2011_Medium'\n\n# set a path for saving predictions: \npreds_path = './data/hw01_preds_match' \nos.makedirs(preds_path,exist_ok=True) # create directory if needed \n\n# get sorted list of files: \nfile_names = sorted(os.listdir(data_path)) \n\n# remove any non-JPEG files: \nfile_names = [f for f in file_names if '.jpg' in f] \n\npreds = {}\nfor i in range(len(file_names)):\n \n # read image using PIL:\n I = Image.open(os.path.join(data_path,file_names[i]))\n \n # convert to numpy array:\n I = np.asarray(I)\n \n preds[file_names[i]] = detect_red_light(I)\n\n# save preds (overwrites any previous predictions!)\nwith open(os.path.join(preds_path,'preds.json'),'w') as f:\n json.dump(preds,f)\n" ]
[ [ "numpy.linalg.norm", "numpy.dot", "numpy.asarray", "numpy.zeros", "numpy.set_printoptions", "numpy.shape" ] ]
sisgandarli/text-similarity-checker-system
[ "2c7c5475642304c3fa842e36f0ef50887f0d5467" ]
[ "misc/ann_model_with_new_preprocessed_dataset.py" ]
[ "# -*- coding: utf-8 -*-\nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.metrics import classification_report,confusion_matrix\nimport pickle\nfrom random import random\n\ndt = pd.read_csv(\"../files/train_finalized.csv\")\n\n\nfor i in range(len(dt[dt['y'] == 0]) - len(dt[dt['y'] == 1])):\n row_num = int(random() * len(dt))\n while (dt.iloc[row_num]['y'] == 1):\n row_num = int(random() * len(dt))\n dt = dt.drop(dt.index[[row_num]])\n\ndt.to_csv(\"../files/train_50x50.csv\", index=False)\n\n\ndt = pd.read_csv(\"../files/train_50x50.csv\") \n\nX = dt.drop('y',axis=1)\ny = dt['y']\n\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n\n\nscaler = StandardScaler()\nscaler.fit(X_train)\nX_train = scaler.transform(X_train)\nX_test = scaler.transform(X_test)\n\n\nmlp = MLPClassifier(hidden_layer_sizes=(7, 7,), max_iter=1000)\nmlp.fit(X_train,y_train)\n\npredictions = mlp.predict(X_test)\n\nprint(confusion_matrix(y_test,predictions))\nprint(classification_report(y_test,predictions))\n\n\nfilename = '../files/ann_model_50x50.sav'\npickle.dump(mlp, open(filename, 'wb'))\n\n\"\"\"\nloaded_model = pickle.load(open(filename, 'rb'))\nresult = loaded_model.predict(X_test)\n\nprint(confusion_matrix(y_test,result))\nprint(classification_report(y_test,result))\n\"\"\"" ]
[ [ "sklearn.metrics.confusion_matrix", "sklearn.preprocessing.StandardScaler", "sklearn.neural_network.MLPClassifier", "sklearn.metrics.classification_report", "sklearn.model_selection.train_test_split", "pandas.read_csv" ] ]
nerdk312/AMDIM_Decoder
[ "7ca7eb869801d5fbe80b6bc3bb9ca4a2ba4b7238" ]
[ "stats.py" ]
[ "import torch\nfrom tensorboardX import SummaryWriter\n\n\nclass AverageMeterSet: # Nawid - Calculates averages\n def __init__(self):\n self.sums = {}\n self.counts = {}\n self.avgs = {}\n\n def _compute_avgs(self): # Nawid - Calculate average\n for name in self.sums:\n self.avgs[name] = float(self.sums[name]) / float(self.counts[name])\n\n def update_dict(self, name_val_dict, n=1): # Nawid - Updates the different entries in the dictionary\n for name, val in name_val_dict.items():\n self.update(name, val, n)\n\n def update(self, name, value, n=1):\n if name not in self.sums:\n self.sums[name] = value\n self.counts[name] = n\n else:\n self.sums[name] = self.sums[name] + value\n self.counts[name] = self.counts[name] + n\n\n def pretty_string(self, ignore=('zzz')):\n self._compute_avgs()\n s = []\n for name, avg in self.avgs.items():\n keep = True\n for ign in ignore:\n if ign in name:\n keep = False\n if keep:\n s.append('{0:s}: {1:.3f}'.format(name, avg))\n s = ', '.join(s)\n return s\n\n def averages(self, idx, prefix=''):\n self._compute_avgs()\n return {prefix + name: (avg, idx) for name, avg in self.avgs.items()}\n\n\nclass StatTracker:\n '''\n Helper class for collecting per-episode rewards and other stats during\n training.\n '''\n\n def __init__(self, log_name=None, log_dir=None):\n assert((log_name is None) or (log_dir is None))\n if log_dir is None:\n self.writer = SummaryWriter(comment=log_name)\n else:\n print('log_dir: {}'.format(str(log_dir)))\n try:\n self.writer = SummaryWriter(logdir=log_dir)\n except:\n self.writer = SummaryWriter(log_dir=log_dir)\n\n def close(self):\n self.writer.close()\n\n def record_stats(self, stat_dict): # Nawid- Upload stats in tensorboard\n '''\n Record some named stats in the underlying tensorboard log.\n '''\n for stat_name, stat_vals in stat_dict.items():\n self.writer.add_scalar(stat_name, stat_vals[0], stat_vals[1])\n\n def add_image(self, label, image, number):\n '''\n Add an image to the tensorboard log.\n '''\n self.writer.add_image(label, image, number)\n\n\ndef update_train_accuracies(epoch_stats, labels, lgt_glb_mlp, lgt_glb_lin):\n '''\n Helper function for tracking accuracy on training set\n '''\n labels_np = labels.cpu().numpy()\n max_lgt_glb_mlp = torch.max(lgt_glb_mlp.data, 1)[1].cpu().numpy() # Nawid - Max logit values from mlp\n max_lgt_glb_lin = torch.max(lgt_glb_lin.data, 1)[1].cpu().numpy() # Nawid - Max logit values from Linear\n for j in range(labels_np.shape[0]):\n if labels_np[j] > -0.1:\n hit_glb_mlp = 1 if (max_lgt_glb_mlp[j] == labels_np[j]) else 0\n hit_glb_lin = 1 if (max_lgt_glb_lin[j] == labels_np[j]) else 0\n epoch_stats.update('train_acc_glb_mlp', hit_glb_mlp, n=1) # Nawid - Updates the states with the current hit_glb_mlp score ( increases by 1 if it is scored correctly)\n epoch_stats.update('train_acc_glb_lin', hit_glb_lin, n=1)\n" ]
[ [ "torch.max" ] ]
abaisero/asym-porl
[ "8a76d920e51d783bbeeeea3cd2b02efffbb33c72" ]
[ "asym_rlpo/algorithms/a2c/base.py" ]
[ "import abc\nimport random\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom asym_rlpo.data import Episode\nfrom asym_rlpo.features import make_history_integrator\nfrom asym_rlpo.policies.base import PartiallyObservablePolicy\nfrom asym_rlpo.q_estimators import Q_Estimator, td0_q_estimator\n\nfrom ..base import PO_Algorithm_ABC\n\n\nclass PO_A2C_ABC(PO_Algorithm_ABC):\n def behavior_policy(self) -> PartiallyObservablePolicy:\n return BehaviorPolicy(\n self.models,\n truncated_histories=self.truncated_histories,\n truncated_histories_n=self.truncated_histories_n,\n )\n\n def evaluation_policy(self) -> PartiallyObservablePolicy:\n return EvaluationPolicy(\n self.models,\n truncated_histories=self.truncated_histories,\n truncated_histories_n=self.truncated_histories_n,\n )\n\n def compute_action_logits(\n self, models: nn.ModuleDict, episode: Episode\n ) -> torch.Tensor:\n\n history_features = self.compute_history_features(\n models.agent.action_model,\n models.agent.observation_model,\n models.agent.history_model,\n episode.actions,\n episode.observations,\n )\n action_logits = models.agent.policy_model(history_features)\n return action_logits\n\n @abc.abstractmethod\n def compute_v_values(\n self, models: nn.ModuleDict, episode: Episode\n ) -> torch.Tensor:\n assert False\n\n def actor_losses( # pylint: disable=too-many-locals\n self,\n episode: Episode,\n *,\n discount: float,\n q_estimator: Optional[Q_Estimator] = None,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n\n if q_estimator is None:\n q_estimator = td0_q_estimator\n\n action_logits = self.compute_action_logits(self.models, episode)\n device = action_logits.device\n\n with torch.no_grad():\n v_values = self.compute_v_values(self.models, episode)\n q_values = q_estimator(episode.rewards, v_values, discount=discount)\n\n discounts = discount ** torch.arange(len(episode), device=device)\n action_nlls = -action_logits.gather(\n 1, episode.actions.unsqueeze(-1)\n ).squeeze(-1)\n advantages = q_values.detach() - v_values.detach()\n actor_loss = (discounts * advantages * action_nlls).sum()\n\n action_dists = torch.distributions.Categorical(logits=action_logits)\n negentropy_loss = -action_dists.entropy().sum()\n\n return actor_loss, negentropy_loss\n\n def critic_loss( # pylint: disable=too-many-locals\n self,\n episode: Episode,\n *,\n discount: float,\n q_estimator: Optional[Q_Estimator] = None,\n ) -> torch.Tensor:\n\n if q_estimator is None:\n q_estimator = td0_q_estimator\n\n v_values = self.compute_v_values(self.models, episode)\n\n with torch.no_grad():\n target_v_values = self.compute_v_values(self.target_models, episode)\n target_q_values = q_estimator(\n episode.rewards, target_v_values, discount=discount\n )\n\n critic_loss = F.mse_loss(v_values, target_q_values, reduction='sum')\n\n return critic_loss\n\n\nclass BehaviorPolicy(PartiallyObservablePolicy):\n def __init__(\n self,\n models: nn.ModuleDict,\n *,\n truncated_histories: bool,\n truncated_histories_n: int,\n ):\n super().__init__()\n self.models = models\n self.history_integrator = make_history_integrator(\n models.agent.action_model,\n models.agent.observation_model,\n models.agent.history_model,\n truncated_histories=truncated_histories,\n truncated_histories_n=truncated_histories_n,\n )\n\n def reset(self, observation):\n self.history_integrator.reset(observation)\n\n def step(self, action, observation):\n self.history_integrator.step(action, observation)\n\n def action_logits(self):\n return self.models.agent.policy_model(self.history_integrator.features)\n\n def po_sample_action(self):\n action_dist = torch.distributions.Categorical(\n logits=self.action_logits()\n )\n return action_dist.sample().item()\n\n\nclass EvaluationPolicy(PartiallyObservablePolicy):\n def __init__(\n self,\n models: nn.ModuleDict,\n *,\n truncated_histories: bool,\n truncated_histories_n: int,\n ):\n super().__init__()\n self.models = models\n self.behavior_policy = BehaviorPolicy(\n models,\n truncated_histories=truncated_histories,\n truncated_histories_n=truncated_histories_n,\n )\n self.epsilon: float\n\n def reset(self, observation):\n self.behavior_policy.reset(observation)\n\n def step(self, action, observation):\n self.behavior_policy.step(action, observation)\n\n def po_sample_action(self):\n action_logits = self.behavior_policy.action_logits()\n return (\n torch.distributions.Categorical(logits=action_logits).sample()\n if random.random() < self.epsilon\n else action_logits.argmax()\n ).item()\n" ]
[ [ "torch.nn.functional.mse_loss", "torch.no_grad", "torch.distributions.Categorical" ] ]
Achazwl/BMTrain
[ "776c10b21886f12137641c56b12ebf8d601aa9e0" ]
[ "bmtrain/optim/adam_offload.py" ]
[ "import torch\nfrom ..global_var import config\nfrom . import _cpu as C\nfrom . import _cuda as G\nfrom .. import nccl\n\nclass AdamOffloadOptimizer(torch.optim.Optimizer):\n \"\"\"\n Adam optimizer\n \"\"\"\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, scale=65536, hold_steps=0):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n if not 0.0 <= weight_decay:\n raise ValueError(\"Invalid weight_decay value: {}\".format(weight_decay))\n\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n super().__init__(params, defaults)\n\n self._scale = scale\n self._steps_since_last_scale = 0\n self._hold_steps = hold_steps\n \n @property\n def scale(self):\n return self._scale\n \n @property\n def steps_since_last_scale(self):\n return self._steps_since_last_scale\n\n @torch.no_grad()\n def justify_scale(self, scale):\n self._scale = scale\n self._steps_since_last_scale = 0\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes.\n \"\"\"\n \n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n # check overflow\n has_inf_or_nan = torch.zeros(1, dtype=torch.uint8, device=\"cuda\")[0]\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is not None:\n G.f_has_inf_nan(p.grad, has_inf_or_nan)\n \n if \"comm\" in config:\n nccl.allReduce(has_inf_or_nan.storage(), has_inf_or_nan.storage(), \"max\", config[\"comm\"])\n\n if has_inf_or_nan > 0:\n raise OverflowError(\"Gradient overflow\")\n\n # parameters to be updated\n update_params = []\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is not None and p.requires_grad:\n if p.grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n\n state = self.state[p]\n # Lazy state initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros(p.size(), dtype=torch.float32, device=\"cpu\") # on host\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros(p.size(), dtype=torch.float32, device=\"cpu\") # on host\n\n state['_param_fp32'] = torch.empty(p.size(), dtype=torch.float32, device=\"cpu\") # on host\n state['_param_fp32'].copy_(p)\n\n # placeholder\n state[\"_param_fp16\"] = torch.empty(p.size(), dtype=torch.float16, pin_memory=True) # on host\n state[\"_grad_fp16\"] = torch.empty(p.size(), dtype=torch.float16, pin_memory=True) # on host\n state[\"_load_event\"] = torch.cuda.Event()\n update_params.append((p, state, group['betas'][0], group['betas'][1], group['eps'], group['lr'], group['weight_decay']))\n\n # transfer parameters to host asynchronously\n for param, state, _, _, _, _, _ in update_params:\n state[\"_grad_fp16\"].copy_(param.grad, non_blocking=True)\n torch.cuda.current_stream().record_event(state[\"_load_event\"])\n \n for param, state, beta1, beta2, eps, lr, weight_decay in update_params:\n # wait for transfer to host\n state[\"_load_event\"].synchronize()\n \n state[\"step\"] += 1\n \n # update parameters\n C.f_adam_cpu(\n state[\"_param_fp32\"].view(-1),\n state[\"_param_fp16\"].view(-1),\n state[\"_grad_fp16\"].view(-1),\n state[\"exp_avg\"].view(-1),\n state[\"exp_avg_sq\"].view(-1),\n beta1, beta2,\n eps, 0.0 if state[\"step\"] <= self._hold_steps else lr,\n self._scale,\n weight_decay,\n state[\"step\"]\n )\n \n\n # transfer parameters back to device asynchronously\n param.copy_(state[\"_param_fp16\"], non_blocking=True)\n \n self._steps_since_last_scale += 1\n\n return loss\n \n def loss_scale(self, loss : torch.Tensor) -> torch.Tensor:\n \"\"\"\n Backward with loss scale.\n \"\"\"\n return loss * (self.scale / config['world_size'])" ]
[ [ "torch.zeros", "torch.cuda.Event", "torch.cuda.current_stream", "torch.no_grad", "torch.enable_grad" ] ]
fourmi1995/IronExperiment-DCN
[ "5292539764588e0168016c7e7b4df038358e9f38" ]
[ "fpn/core/metric.py" ]
[ "# --------------------------------------------------------\r\n# Deformable Convolutional Networks\r\n# Copyright (c) 2017 Microsoft\r\n# Licensed under The MIT License [see LICENSE for details]\r\n# Modified by Haozhi Qi\r\n# --------------------------------------------------------\r\n# Based on:\r\n# MX-RCNN\r\n# Copyright (c) 2016 by Contributors\r\n# Licence under The Apache 2.0 License\r\n# https://github.com/ijkguo/mx-rcnn/\r\n# --------------------------------------------------------\r\n\r\nimport mxnet as mx\r\nimport numpy as np\r\n\r\n\r\ndef get_rpn_names():\r\n pred = ['rpn_cls_prob', 'rpn_bbox_loss']\r\n label = ['rpn_label', 'rpn_bbox_target', 'rpn_bbox_weight']\r\n return pred, label\r\n\r\n\r\ndef get_rcnn_names(cfg):\r\n pred = ['rcnn_cls_prob', 'rcnn_bbox_loss']\r\n label = ['rcnn_label', 'rcnn_bbox_target', 'rcnn_bbox_weight']\r\n if cfg.TRAIN.ENABLE_OHEM or cfg.TRAIN.END2END:\r\n pred.append('rcnn_label')\r\n if cfg.TRAIN.END2END:\r\n rpn_pred, rpn_label = get_rpn_names()\r\n pred = rpn_pred + pred\r\n label = rpn_label\r\n return pred, label\r\n\r\n\r\nclass RCNNFGAccuracy(mx.metric.EvalMetric):\r\n def __init__(self, cfg):\r\n super(RCNNFGAccuracy, self).__init__('R-CNN FG Accuracy')\r\n self.e2e = cfg.TRAIN.END2END\r\n self.ohem = cfg.TRAIN.ENABLE_OHEM\r\n self.pred, self.label = get_rcnn_names(cfg)\r\n\r\n def update(self, labels, preds):\r\n pred = preds[self.pred.index('rcnn_cls_prob')]\r\n if self.ohem or self.e2e:\r\n label = preds[self.pred.index('rcnn_label')]\r\n else:\r\n label = labels[self.label.index('rcnn_label')]\r\n num_classes = pred.shape[-1]\r\n pred_label = pred.asnumpy().reshape(-1, num_classes).argmax(axis=1).astype('int32')\r\n # selection of ground truth label is different from softmax or sigmoid classifier\r\n label = label.asnumpy().reshape(-1, ).astype('int32')\r\n keep_inds = np.where(label > 0)\r\n # filter out -1 label because of OHEM or invalid samples\r\n pred_label = pred_label[keep_inds]\r\n label = label[keep_inds]\r\n\r\n self.sum_metric += np.sum(np.equal(pred_label.flat, label.flat))\r\n self.num_inst += pred_label.shape[0]\r\n\r\n\r\nclass RPNFGFraction(mx.metric.EvalMetric):\r\n def __init__(self, cfg):\r\n super(RPNFGFraction, self).__init__('Proposal FG Fraction')\r\n self.e2e = cfg.TRAIN.END2END\r\n self.ohem = cfg.TRAIN.ENABLE_OHEM\r\n self.pred, self.label = get_rcnn_names(cfg)\r\n\r\n def update(self, labels, preds):\r\n pred = preds[self.pred.index('rcnn_cls_prob')]\r\n if self.ohem or self.e2e:\r\n label = preds[self.pred.index('rcnn_label')]\r\n else:\r\n label = labels[self.label.index('rcnn_label')]\r\n num_classes = pred.shape[-1]\r\n # selection of ground truth label is different from softmax or sigmoid classifier\r\n label = label.asnumpy().reshape(-1, ).astype('int32')\r\n fg_inds = np.where(label > 0)[0]\r\n bg_inds = np.where(label == 0)[0]\r\n self.sum_metric += fg_inds.shape[0]\r\n self.num_inst += (fg_inds.shape[0] + bg_inds.shape[0])\r\n\r\n\r\nclass RPNAccMetric(mx.metric.EvalMetric):\r\n def __init__(self):\r\n super(RPNAccMetric, self).__init__('RPNAcc')\r\n self.pred, self.label = get_rpn_names()\r\n\r\n def update(self, labels, preds):\r\n pred = preds[self.pred.index('rpn_cls_prob')]\r\n label = labels[self.label.index('rpn_label')]\r\n\r\n # pred (b, c, p) or (b, c, h, w)\r\n pred_label = mx.ndarray.argmax_channel(pred).asnumpy().astype('int32')\r\n pred_label = pred_label.reshape((pred_label.shape[0], -1))\r\n # label (b, p)\r\n label = label.asnumpy().astype('int32')\r\n\r\n # filter with keep_inds\r\n keep_inds = np.where(label != -1)\r\n pred_label = pred_label[keep_inds]\r\n label = label[keep_inds]\r\n\r\n self.sum_metric += np.sum(pred_label.flat == label.flat)\r\n self.num_inst += len(pred_label.flat)\r\n\r\n\r\nclass RCNNAccMetric(mx.metric.EvalMetric):\r\n def __init__(self, cfg):\r\n super(RCNNAccMetric, self).__init__('RCNNAcc')\r\n self.e2e = cfg.TRAIN.END2END\r\n self.ohem = cfg.TRAIN.ENABLE_OHEM\r\n self.pred, self.label = get_rcnn_names(cfg)\r\n\r\n def update(self, labels, preds):\r\n pred = preds[self.pred.index('rcnn_cls_prob')]\r\n if self.ohem or self.e2e:\r\n label = preds[self.pred.index('rcnn_label')]\r\n else:\r\n label = labels[self.label.index('rcnn_label')]\r\n\r\n last_dim = pred.shape[-1]\r\n pred_label = pred.asnumpy().reshape(-1, last_dim).argmax(axis=1).astype('int32')\r\n label = label.asnumpy().reshape(-1,).astype('int32')\r\n\r\n # filter with keep_inds\r\n keep_inds = np.where(label != -1)\r\n pred_label = pred_label[keep_inds]\r\n label = label[keep_inds]\r\n\r\n self.sum_metric += np.sum(pred_label.flat == label.flat)\r\n self.num_inst += len(pred_label.flat)\r\n\r\n\r\nclass RPNLogLossMetric(mx.metric.EvalMetric):\r\n def __init__(self):\r\n super(RPNLogLossMetric, self).__init__('RPNLogLoss')\r\n self.pred, self.label = get_rpn_names()\r\n\r\n def update(self, labels, preds):\r\n pred = preds[self.pred.index('rpn_cls_prob')]\r\n label = labels[self.label.index('rpn_label')]\r\n\r\n # label (b, p)\r\n label = label.asnumpy().astype('int32').reshape((-1))\r\n # pred (b, c, p) or (b, c, h, w) --> (b, p, c) --> (b*p, c)\r\n pred = pred.asnumpy().reshape((pred.shape[0], pred.shape[1], -1)).transpose((0, 2, 1))\r\n pred = pred.reshape((label.shape[0], -1))\r\n\r\n # filter with keep_inds\r\n keep_inds = np.where(label != -1)[0]\r\n label = label[keep_inds]\r\n cls = pred[keep_inds, label]\r\n\r\n cls += 1e-14\r\n cls_loss = -1 * np.log(cls)\r\n cls_loss = np.sum(cls_loss)\r\n self.sum_metric += cls_loss\r\n self.num_inst += label.shape[0]\r\n\r\n\r\nclass RCNNLogLossMetric(mx.metric.EvalMetric):\r\n def __init__(self, cfg):\r\n super(RCNNLogLossMetric, self).__init__('RCNNLogLoss')\r\n self.e2e = cfg.TRAIN.END2END\r\n self.ohem = cfg.TRAIN.ENABLE_OHEM\r\n self.pred, self.label = get_rcnn_names(cfg)\r\n\r\n def update(self, labels, preds):\r\n pred = preds[self.pred.index('rcnn_cls_prob')]\r\n if self.ohem or self.e2e:\r\n label = preds[self.pred.index('rcnn_label')]\r\n else:\r\n label = labels[self.label.index('rcnn_label')]\r\n\r\n last_dim = pred.shape[-1]\r\n pred = pred.asnumpy().reshape(-1, last_dim)\r\n label = label.asnumpy().reshape(-1,).astype('int32')\r\n\r\n # filter with keep_inds\r\n keep_inds = np.where(label != -1)[0]\r\n label = label[keep_inds]\r\n cls = pred[keep_inds, label]\r\n\r\n cls += 1e-14\r\n cls_loss = -1 * np.log(cls)\r\n cls_loss = np.sum(cls_loss)\r\n self.sum_metric += cls_loss\r\n self.num_inst += label.shape[0]\r\n\r\n\r\nclass RPNL1LossMetric(mx.metric.EvalMetric):\r\n def __init__(self):\r\n super(RPNL1LossMetric, self).__init__('RPNL1Loss')\r\n self.pred, self.label = get_rpn_names()\r\n\r\n def update(self, labels, preds):\r\n bbox_loss = preds[self.pred.index('rpn_bbox_loss')].asnumpy()\r\n\r\n # calculate num_inst (average on those kept anchors)\r\n label = labels[self.label.index('rpn_label')].asnumpy()\r\n num_inst = np.sum(label != -1)\r\n\r\n self.sum_metric += np.sum(bbox_loss)\r\n self.num_inst += num_inst\r\n\r\n\r\nclass RCNNL1LossMetric(mx.metric.EvalMetric):\r\n def __init__(self, cfg):\r\n super(RCNNL1LossMetric, self).__init__('RCNNL1Loss')\r\n self.e2e = cfg.TRAIN.END2END\r\n self.ohem = cfg.TRAIN.ENABLE_OHEM\r\n self.pred, self.label = get_rcnn_names(cfg)\r\n\r\n def update(self, labels, preds):\r\n bbox_loss = preds[self.pred.index('rcnn_bbox_loss')].asnumpy()\r\n if self.ohem:\r\n label = preds[self.pred.index('rcnn_label')].asnumpy()\r\n else:\r\n if self.e2e:\r\n label = preds[self.pred.index('rcnn_label')].asnumpy()\r\n else:\r\n label = labels[self.label.index('rcnn_label')].asnumpy()\r\n\r\n # calculate num_inst (average on those kept anchors)\r\n num_inst = np.sum(label != -1)\r\n\r\n self.sum_metric += np.sum(bbox_loss)\r\n self.num_inst += num_inst\r\n" ]
[ [ "numpy.equal", "numpy.where", "numpy.sum", "numpy.log" ] ]
pva701/text-classification-tf
[ "10b07c1a9c56b2662bf4539aab5fd3a75f1204d4" ]
[ "data_helpers.py" ]
[ "import re\n\nimport numpy as np\nfrom gensim.models import word2vec\nimport json\nimport pickle\n\n\ndef clean_str(string):\n \"\"\"\n Tokenization/string cleaning for all datasets except for SST.\n Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py\n \"\"\"\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()\n\n\ndef load_data_and_labels(positive_data_file, negative_data_file):\n \"\"\"\n Loads MR polarity data from files, splits the data into words and generates labels.\n Returns split sentences and labels.\n \"\"\"\n # Load data from files\n positive_examples = list(open(positive_data_file, \"r\").readlines())\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = list(open(negative_data_file, \"r\").readlines())\n negative_examples = [s.strip() for s in negative_examples]\n # Split by words\n x_text = positive_examples + negative_examples\n x_text = [clean_str(sent) for sent in x_text]\n # Generate labels\n positive_labels = [[0, 1] for _ in positive_examples]\n negative_labels = [[1, 0] for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n return [x_text, y]\n\n\ndef batch_iter(data, batch_size, num_epochs, shuffle=True):\n \"\"\"\n Generates a batch iterator for a dataset.\n \"\"\"\n data = np.array(data)\n data_size = len(data)\n num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1\n for epoch in range(num_epochs):\n # Shuffle the data at each epoch\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n else:\n shuffled_data = data\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n yield shuffled_data[start_index:end_index]\n\n\ndef load_word2vec(fname, vocab_dict):\n \"\"\"\n Loads 300x1 word vecs from Google (Mikolov) word2vec\n \"\"\"\n model = word2vec.KeyedVectors.load_word2vec_format(fname, binary=True)\n word_vecs = {}\n for word in vocab_dict:\n if word in model:\n word_vecs[word] = model[word]\n return word_vecs\n\n\ndef dump_json_word_vecs_np(fname, word_vecs):\n word2vec_list = {}\n for k, v in word_vecs.items():\n word2vec_list[k] = v.tolist()\n\n with open(fname, 'w') as f:\n json.dump(word2vec_list, f)\n\n\ndef load_json_word_vecs_np(fname):\n with open(fname, 'r') as f:\n word2vec_list = json.load(f)\n word2vec_np = {}\n for k, v in word2vec_list.items():\n word2vec_np[k] = np.array(v, dtype=np.float32)\n return word2vec_np\n\n\ndef dump_pickle_word_vecs_np(fname, word_vecs):\n with open(fname, 'wb') as f:\n pickle.dump(word_vecs, f)\n\n\ndef load_pickle_word_vecs_np(fname):\n with open(fname, 'rb') as f:\n return pickle.load(f)\n # word_vecs64 = {}\n # for k, v in word_vecs.items():\n # word_vecs64[k] = v.astype(np.float64)\n # print(list(word_vecs64.items())[0][1].dtype)\n\ndef add_unknown_words(word_vecs, vocab_dict, bound=0.25, k=300):\n \"\"\"\n For words that occur in at least min_df documents, create a separate word vector.\n 0.25 is chosen so the unknown vectors have (approximately) same variance as pre-trained ones\n \"\"\"\n for word in vocab_dict:\n if word not in word_vecs:\n word_vecs[word] = np.random.uniform(-bound, bound, k)\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.random.uniform", "numpy.arange" ] ]
zjpoh/pyjanitor
[ "3768d8054a2e370262de06a9be617ff790b04ab0" ]
[ "tests/utils/test_skipna.py" ]
[ "import pandas as pd\nfrom janitor.utils import skipna\nimport numpy as np\n\nimport pytest\n\n\[email protected]\ndef test_skipna():\n df = pd.DataFrame({\"x\": [\"a\", \"b\", \"c\", np.nan], \"y\": [1, 2, 3, np.nan]})\n\n def func(s):\n return s + \"1\"\n\n # Verify that applying function causes error\n with pytest.raises(Exception):\n df[\"x\"].apply(func)\n\n result = df[\"x\"].apply(skipna(func))\n assert (\n result.values[:-1] == np.array([\"a1\", \"b1\", \"c1\"])\n ).all() and np.isnan(result.values[-1])\n" ]
[ [ "pandas.DataFrame", "numpy.array", "numpy.isnan" ] ]
erwincoumans/temprepo
[ "bc46dee3355c6a0c0f491f2b0f59cb4ed3f34046" ]
[ "examples/SharedMemory/plugins/eglPlugin/bullet.py" ]
[ "import os \nimport sys\nimport time\nimport subprocess\nimport pybullet as p\nfrom pdb import set_trace\nimport matplotlib.pyplot as plt\nimport numpy as np\n#subprocess.call([\"hardening-check\", p.__file__])\n\n\n\np.connect(p.DIRECT)\nlogId = p.startStateLogging(p.STATE_LOGGING_PROFILE_TIMINGS, \"debugTimings\")\n\nplugin = True\nif plugin:\n plugin_fn = '/home/argusm/lang/bullet3/build/lib.linux-x86_64-3.5/eglRenderer.cpython-35m-x86_64-linux-gnu.so'\n plugin = p.loadPlugin(plugin_fn,\"_tinyRendererPlugin\")\n if plugin < 0:\n print(\"\\nPlugin Failed to load!\\n\")\n sys.exit()\n print(\"plugin =\",plugin)\n\npath = '/home/argusm/lang/bullet3/examples/pybullet/gym/pybullet_data/duck_vhacd.urdf'\np.loadURDF(path,globalScaling=12)\n#path = '/home/argusm/lang/gym-grasping/gym_grasping/robots/models/kuka_iiwa/kuka_weiss_bolt.sdf'\n#p.loadSDF(path)\n\nstart = time.time()\n\ncamTargetPos = [0,0,0]\nupAxisIndex = 2\nnearPlane = 0.01\nfarPlane = 100\ncamDistance = 2\npixelWidth = 128\npixelHeight = 128\nfov = 60\n\nplot = False\nanim = True\nif plot:\n plt.ion()\nif anim:\n import matplotlib.animation as manimation\n FFMpegWriter = manimation.writers['ffmpeg']\n metadata = dict(title='Movie Test', artist='Matplotlib',\n comment='Movie support!')\n writer = FFMpegWriter(fps=15, metadata=metadata)\nif plot or anim:\n fig = plt.figure()\n img = np.random.rand(pixelWidth,pixelHeight)\n image = plt.imshow(img,interpolation='none',animated=True,label=\"blah\")\n ax = plt.gca()\n ax.set_axis_off()\n ax.set_aspect('equal')\n plt.subplots_adjust(wspace=0, hspace=0, left=0, bottom=0, right=1, top=1)\n\n\ntry:\n iter = range(0,360,10)\n with writer.saving(fig, \"debug.mp4\", len(iter)):\n for i,yaw in enumerate(iter):\n viewMatrix = p.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, 0, yaw-90, 0, upAxisIndex)\n aspect = pixelWidth / pixelHeight;\n projectionMatrix = p.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);\n\n hight, width, img_arr, deept_arr, obj_arr = p.getCameraImage(pixelWidth,pixelHeight,viewMatrix,projectionMatrix)\n if plot:\n image.set_data(img_arr)#np_img_arr)\n ax.plot([0])\n #plt.draw()\n #plt.show()\n plt.pause(0.01)\n if anim:\n image.set_data(img_arr)#np_img_arr)\n ax.plot([0])\n writer.grab_frame()\n\n if i % 100 == 0 and i > 0:\n print(\"FPS\",100/(time.time()-start))\n start = time.time()\nfinally:\n p.stopStateLogging(logId)\n" ]
[ [ "matplotlib.pyplot.ion", "numpy.random.rand", "matplotlib.pyplot.figure", "matplotlib.pyplot.pause", "matplotlib.pyplot.gca", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.imshow" ] ]
ruchirjain86/professional-services
[ "739ac0f5ffc8237f750804fa9f0f14d4d918a0fa" ]
[ "examples/cloudml-bee-health-detection/trainer/inputs.py" ]
[ "#!/usr/bin/env python\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Takes care of all the input data for the model.\n\nParses columns and generates input specifications for `Estimator`.\n\nTypical usage example:\n\nestimator = model.create_classifier(\n config,\n parameters)\ntrain_spec = inputs.get_train_spec(\n training_path,\n image_path,\n batch_size,\n max_steps)\neval_spec = inputs.get_eval_spec(\n validation_path,\n image_path,\n eval_batch_size)\n\ntf.estimator.train_and_evaluate(\n estimator,\n train_spec,\n eval_spec)\n\"\"\"\n\nimport multiprocessing\n\nimport tensorflow as tf\n\nIMAGE_SIZE = 224\nTARGET_COLUMN = 'unhealthy'\nSHUFFLE_BUFFER_SIZE = 200\n\n\ndef _parse_csv(record):\n \"\"\"Parses columns from comma separated record.\n\n Defines types and column names for columns.\n\n Args:\n record: A Tensor of type string. Each string is a record/row in the csv\n and all records should have the same format.\n\n Returns:\n A dictionary with all column names and values for the record.\n \"\"\"\n column_defaults = [\n tf.constant([], tf.string),\n tf.constant([], tf.string),\n tf.constant([], tf.int32)]\n column_names = ['img_file', 'subspecies', TARGET_COLUMN]\n columns = tf.decode_csv(record, record_defaults=column_defaults)\n return dict(zip(column_names, columns))\n\n\ndef _get_features_target_tuple(features):\n \"\"\"Separates features from target.\n\n Args:\n features: Dictionary with all columns.\n\n Returns:\n A tuple of a dictionary of features and the target.\n \"\"\"\n target = features.pop(TARGET_COLUMN, None)\n return features, target\n\n\ndef _load_image(image_path):\n \"\"\"Loads, encodes, and resizes an image.\n\n Args:\n image_path: String with a path to an image.\n\n Returns:\n tensor representing the image.\n \"\"\"\n image_string = tf.read_file(image_path)\n image_decoded = tf.image.decode_png(image_string, channels=3)\n image_resized = tf.image.resize_images(\n image_decoded,\n [IMAGE_SIZE, IMAGE_SIZE])\n return image_resized\n\n\ndef _create_image_path(image_path, image_id):\n \"\"\"Generates path to a specific image.\n\n Args:\n image_path: String with path to the folder containing training images.\n image_id: String representing name of the file.\n\n Returns:\n String with path to the specific image.\n \"\"\"\n return image_path + image_id\n\n\ndef _process_features(features, image_path):\n \"\"\"Includes processed image in the features.\n\n Folder is expected to contain an image file for each record named the same\n as the row id.\n\n Args:\n features: Dictionary with data features.\n image_path: String with path to the folder containing training images.\n\n Returns:\n a features dict augmented or transformed by this function's processing.\n \"\"\"\n features['image'] = _load_image(\n _create_image_path(\n image_path,\n tf.reshape(features['img_file'], [])))\n return features\n\n\ndef _generate_input_fn(file_path, image_path, shuffle, batch_size, num_epochs):\n \"\"\"Generates a data input function.\n\n Args:\n file_path: String with path to the data.\n image_path: String with path to image folder.\n shuffle: Boolean flag specifying if data should be shuffled.\n batch_size: Number of records to be read at a time.\n num_epochs: Number of times to go through all of the records.\n\n Returns:\n A function used by `Estimator` to read data.\n \"\"\"\n def _input_fn():\n \"\"\"Returns features and target from input data.\n\n Defines the input dataset, specifies how to read the data, and reads it.\n\n Returns:\n A tuple os a dictionary containing the features and the target.\n \"\"\"\n num_threads = multiprocessing.cpu_count()\n dataset = tf.data.TextLineDataset(filenames=[file_path])\n dataset = dataset.skip(1)\n dataset = dataset.map(\n lambda x: _parse_csv(tf.expand_dims(x, -1)),\n num_parallel_calls=num_threads)\n dataset = dataset.map(\n lambda x: _process_features(x, image_path),\n num_parallel_calls=num_threads)\n dataset = dataset.map(\n _get_features_target_tuple,\n num_parallel_calls=num_threads)\n if shuffle:\n dataset = dataset.shuffle(SHUFFLE_BUFFER_SIZE)\n dataset = dataset.batch(batch_size)\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.prefetch(1)\n iterator = dataset.make_one_shot_iterator()\n features, target = iterator.get_next()\n return features, target\n return _input_fn\n\n\ndef _get_serving_function(image_path):\n \"\"\"Creates a serving function.\n\n Args:\n image_path: String with path to image folder.\n\n Returns:\n Serving function to be used during inference.\n \"\"\"\n def _csv_serving_input_fn():\n \"\"\"Creates a `ServingInputReceiver` for inference.\n\n Creates a placeholder for the record and specifies how to parse the\n features from that record.\n\n Returns:\n A `ServingInputReceiver`.\n \"\"\"\n csv_row = tf.placeholder(\n shape=[None],\n dtype=tf.string\n )\n\n features = _parse_csv(csv_row)\n features, _ = _get_features_target_tuple(features)\n features['image'] = tf.map_fn(\n _load_image,\n _create_image_path(image_path, features['img_file']),\n dtype=tf.float32)\n\n return tf.estimator.export.ServingInputReceiver(\n features=features,\n receiver_tensors={'csv_row': csv_row})\n\n return _csv_serving_input_fn\n\n\ndef get_train_spec(training_path, image_path, batch_size, max_steps):\n \"\"\"Creates a `TrainSpec` for the `Estimaor`.\n\n Args:\n training_path: String with path to training data.\n image_path: String with path to image folder.\n batch_size: Number of records to be read at a time.\n max_steps: Maximum number of steps to take during training.\n\n Returns:\n A Train Spec.\n \"\"\"\n return tf.estimator.TrainSpec(\n input_fn=_generate_input_fn(\n training_path,\n image_path,\n shuffle=True,\n batch_size=batch_size,\n num_epochs=None),\n max_steps=max_steps)\n\n\ndef get_eval_spec(validation_path, image_path, batch_size):\n \"\"\"Creates an `EvalSpec` for the `Estimaor`.\n\n Args:\n validation_path: String with path to validation data.\n image_path: String with path to image folder.\n batch_size: Number of records to be read at a time.\n\n Returns:\n An Eval Spec.\n \"\"\"\n exporter = tf.estimator.FinalExporter(\n 'estimator',\n _get_serving_function(image_path),\n as_text=False)\n return tf.estimator.EvalSpec(\n input_fn=_generate_input_fn(\n validation_path,\n image_path,\n shuffle=False,\n batch_size=batch_size,\n num_epochs=None),\n exporters=[exporter],\n name='estimator-eval')\n" ]
[ [ "tensorflow.decode_csv", "tensorflow.expand_dims", "tensorflow.read_file", "tensorflow.reshape", "tensorflow.constant", "tensorflow.estimator.export.ServingInputReceiver", "tensorflow.image.decode_png", "tensorflow.data.TextLineDataset", "tensorflow.placeholder", "tensorflow.image.resize_images" ] ]
PacktPublishing/Practical-Convolutional-Neural-Networks
[ "365aa803d38316ed9749e4c8c0f3ae2667788781" ]
[ "Chapter01/code files/tf_basics_5.py" ]
[ "import tensorflow as tf\n\nx = tf.placeholder(\"float\", [None, 3])\ny = x * 2\n\nwith tf.Session() as session:\n input_data = [[1, 2, 3],\n [4, 5, 6],]\n result = session.run(y, feed_dict={x: input_data})\n print(result)\n" ]
[ [ "tensorflow.Session", "tensorflow.placeholder" ] ]
UKPLab/acl2022-structure-batches
[ "d7e116c1254ad00d8b59da3116043424a30f6f64" ]
[ "src/util/training_util.py" ]
[ "import random\n\nimport itertools\nimport numpy\nimport torch\n\nfrom util.strategies import STRATEGIES\nfrom util.evaluators import BiF1Evaluator, CrossF1Evaluator, BiRegressionEvaluator, CrossRegressionEvaluator, \\\n BiAccuracyEvaluator, CrossAccuracyEvaluator\n\n\ndef seed_all(seed = 0):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.cuda.manual_seed(seed)\n numpy.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\ndef get_evaluators(dev_samples, test_samples, strategy: STRATEGIES, num_labels:int=2, directed:bool=False, dev_threshold=True, train_loss=None, use_wandb=False):\n if not directed and num_labels == 2 and \"BI\" in strategy.name:\n evaluator_classes = [BiF1Evaluator]\n elif not directed and num_labels == 2 and \"CROSS\" in strategy.name:\n evaluator_classes = [CrossF1Evaluator]\n elif not directed and num_labels == 1 and \"BI\" in strategy.name:\n evaluator_classes = [BiRegressionEvaluator]\n elif not directed and num_labels == 1 and \"CROSS\" in strategy.name:\n evaluator_classes = [CrossRegressionEvaluator]\n elif num_labels > 2 and \"BI\" in strategy.name:\n evaluator_classes = [BiAccuracyEvaluator]\n elif num_labels > 2 and \"CROSS\" in strategy.name:\n evaluator_classes = [CrossAccuracyEvaluator]\n elif directed and num_labels == 2 and \"BI\" in strategy.name:\n evaluator_classes = [BiAccuracyEvaluator]\n elif directed and num_labels == 2 and \"CROSS\" in strategy.name:\n evaluator_classes = [CrossAccuracyEvaluator]\n\n return list(itertools.chain.from_iterable([\n [\n evaluator_class(devs, tests, dev_threshold, test_set, train_loss, use_wandb)\n for evaluator_class in evaluator_classes\n ]\n for (dev_set, devs, test_set, tests) in list(zip(dev_samples.keys(), dev_samples.values(), test_samples.keys(), test_samples.values()))\n ]))\n" ]
[ [ "torch.manual_seed", "torch.cuda.manual_seed", "torch.cuda.manual_seed_all", "numpy.random.seed" ] ]
MaybeS/MOT
[ "bae66c46c0cd74b29a0e66c5af58422ad050977b" ]
[ "utils/image.py" ]
[ "from typing import Tuple\n\nimport cv2\nimport numpy as np\n\n\ndef factor_crop(image: np.ndarray, dest_size,\n factor: int = 32, padding: int = 0, based: str = 'min') \\\n -> Tuple[np.ndarray, float, tuple]:\n\n def closest(num: int) \\\n -> int:\n return int(np.ceil(float(num) / factor)) * factor\n\n base = {\n 'min': np.min(image.shape[0:2]),\n 'max': np.max(image.shape[0:2]),\n 'w': image.shape[1],\n 'h': image.shape[0],\n }\n\n scale = float(dest_size) / base.get(based, base['min'])\n\n # Scale the image\n image = cv2.resize(image, None, fx=scale, fy=scale)\n\n # Compute the padded image shape\n # Ensure it's divisible by factor\n h, w, *_ = image.shape\n nh, nw = closest(h), closest(w)\n new_shape = [nh, nw] if image.ndim < 3 else [nh, nw, image.shape[-1]]\n\n # Pad the image\n padded = np.full(new_shape, fill_value=padding, dtype=image.dtype)\n padded[0:h, 0:w] = image\n\n return padded, scale, image.shape\n" ]
[ [ "numpy.max", "numpy.full", "numpy.min" ] ]
jangedoo/jange
[ "7f6ee5c341f417cae9e60318fb00716b39b02c00" ]
[ "tests/ops/test_neighbors.py" ]
[ "from unittest.mock import ANY\n\nimport numpy as np\nimport pytest\n\nfrom jange import ops, stream\n\n\[email protected](\"metric\", [\"cosine\", \"euclidean\"])\ndef test_nearest_neighbors(metric):\n # create a features vector where 1st and 3rd item are in same direction\n # and are near to each other so that both cosine and euclidean dist work\n # similarly 2nd and 4th vectors are opposite in direction and far from\n # the remaining two so that they are similar to each other based on both\n # cosine and euclidean distance\n features = np.array(\n [[1.0, 1, 1], [-0.1, -0.1, -0.1], [1, 0.9, 0.9], [-0.1, -0.1, -0.2]]\n )\n ds = stream.DataStream(features, context=[\"a\", \"b\", \"c\", \"d\"])\n op = ops.neighbors.NearestNeighborsOperation(n_neighbors=2, metric=metric)\n nbors_ds = ds.apply(op)\n nbors = list(nbors_ds)\n\n # distance does not matter as long as the items we expect to be same are\n # returned as neighbors\n assert nbors[0] == [\n {\"context\": \"a\", \"distance\": ANY, \"item_idx\": 0},\n {\"context\": \"c\", \"distance\": ANY, \"item_idx\": 2},\n ]\n\n assert nbors[1] == [\n {\"context\": \"b\", \"distance\": ANY, \"item_idx\": 1},\n {\"context\": \"d\", \"distance\": ANY, \"item_idx\": 3},\n ]\n\n assert nbors[2] == [\n {\"context\": \"c\", \"distance\": ANY, \"item_idx\": 2},\n {\"context\": \"a\", \"distance\": ANY, \"item_idx\": 0},\n ]\n\n assert nbors[3] == [\n {\"context\": \"d\", \"distance\": ANY, \"item_idx\": 3},\n {\"context\": \"b\", \"distance\": ANY, \"item_idx\": 1},\n ]\n\n\[email protected](\"metric\", [\"cosine\", \"euclidean\"])\ndef test_similar_pairs(metric):\n features = np.array(\n [[1.0, 1, 1], [-0.1, -0.1, -0.1], [1, 0.9, 0.9], [-0.1, -0.1, -0.2]]\n )\n # ds = stream.DataStream(features, context=[{\"A\": 1}, {\"B\": 2}, {\"c\": 3}, {\"d\": 4}])\n ds = stream.DataStream(features, context=[\"a\", \"b\", \"c\", \"d\"])\n op = ops.neighbors.SimilarPairOperation(n_neighbors=2, metric=metric)\n pairs_ds = ds.apply(op)\n pairs = list(pairs_ds)\n\n assert sorted(pairs) == sorted([(\"a\", \"c\", ANY), (\"b\", \"d\", ANY)])\n" ]
[ [ "numpy.array" ] ]
zjuchenll/dataflow-multithreading-on-tia
[ "8ed7238655d2a63c8ca3d730da52fe9fd6edf396" ]
[ "tools/simulator/memory.py" ]
[ "\"\"\"\nMemories and related utilities.\n\"\"\"\n\nfrom enum import IntEnum\n\nimport numpy as np\n\nfrom simulator.interconnect import Packet, SenderChannelBuffer, ReceiverChannelBuffer\n\n\nclass ReadPort:\n \"\"\"\n Read port interface.\n \"\"\"\n\n def __init__(self, name, buffer_depth):\n \"\"\"\n Initialize a read port.\n\n :param name: an English name\n :param buffer_depth: access buffer depth\n \"\"\"\n\n # Derive buffer names.\n self.name = name\n self.addr_in_channel_buffer = ReceiverChannelBuffer(f\"{name} Address-In Channel Buffer\", buffer_depth)\n self.data_out_channel_buffer = SenderChannelBuffer(f\"{name} Data-Out Channel Buffer\", buffer_depth)\n self.pending_read_packet = None\n\n\nclass WritePort:\n \"\"\"\n Write port interface.\n \"\"\"\n\n def __init__(self, name, buffer_depth):\n \"\"\"\n Initialize a write port.\n\n :param name: an English name\n :param buffer_depth: access buffer depth\n \"\"\"\n\n # Derive buffer names.\n self.name = name\n self.addr_in_channel_buffer = ReceiverChannelBuffer(f\"{name} Address-In Channel Buffer\", buffer_depth)\n self.data_in_channel_buffer = ReceiverChannelBuffer(f\"{name} Data-In Channel Buffer\", buffer_depth)\n\n\nclass Memory:\n \"\"\"\n Memory with arbitrary read and write ports and stream ports.\n \"\"\"\n\n def __init__(self, name, size):\n \"\"\"\n Initialize a memory element.\n\n :param name: an English name\n :param size: number of words of storage\n \"\"\"\n\n # Set to values expected upon reset.\n self.name = name\n self.contents = np.zeros(size, dtype=np.uint32)\n self.read_ports = []\n self.write_ports = []\n\n # --- Setup Methods ---\n\n def add_read_port(self, read_port):\n # Accessor method.\n self.read_ports.append(read_port)\n\n def add_write_port(self, write_port):\n # Accessor method.\n self.write_ports.append(write_port)\n\n # --- System Registration Method ---\n\n def _register(self, system):\n \"\"\"\n Register the memory with the system event loop.\n \n :param system: the rest of the system\n \"\"\"\n\n # Register the memory itself and any buffers.\n system.memories.append(self)\n for read_port in self.read_ports:\n system.buffers.append(read_port.addr_in_channel_buffer)\n system.buffers.append(read_port.data_out_channel_buffer)\n for write_port in self.write_ports:\n system.buffers.append(write_port.addr_in_channel_buffer)\n system.buffers.append(write_port.data_in_channel_buffer)\n\n # --- Time-stepping Method ---\n\n def iterate(self, debug):\n \"\"\"\n Perform a single cycle of execution.\n\n :param debug: whether to print out information about internal state\n \"\"\"\n\n # Write out the current contents of memory if debugging.\n if debug:\n print(f\"name: {self.name}\")\n print(\"contents:\")\n i = 0\n while i < 10:\n if i < len(self.contents):\n print(f\"0x{self.contents[i]:08x}\")\n else:\n break\n i += 1\n if len(self.contents) > 10:\n print(\"...\\n\")\n else:\n print(\"bound\\n\")\n\n # Output data packet origination and pending reads.\n for read_port in self.read_ports:\n if read_port.data_out_channel_buffer.peripheral_destination is not None:\n if (not read_port.data_out_channel_buffer.peripheral_destination.full\n and not read_port.data_out_channel_buffer.empty):\n read_data_packet = read_port.data_out_channel_buffer.dequeue()\n read_port.data_out_channel_buffer.peripheral_destination.enqueue(read_data_packet)\n if read_port.pending_read_packet:\n if not read_port.data_out_channel_buffer.full:\n if debug:\n print(f\"{read_port.name} read {read_port.pending_read_packet}.\\n\")\n read_port.data_out_channel_buffer.enqueue(read_port.pending_read_packet)\n read_port.pending_read_packet = None\n\n # Serve all valid requests on available ports.\n for read_port in self.read_ports:\n if not read_port.addr_in_channel_buffer.empty and read_port.pending_read_packet is None:\n read_addr_packet = read_port.addr_in_channel_buffer.dequeue()\n read_addr = read_addr_packet.value\n if debug:\n print(f\"{read_port.name} requesting data at address {read_addr_packet}.\\n\")\n read_port.pending_read_packet = Packet(read_addr_packet.tag, self.contents[read_addr])\n\n # Perform all valid write requests on available ports.\n for write_port in self.write_ports:\n if not write_port.addr_in_channel_buffer.empty and not write_port.data_in_channel_buffer.empty:\n write_addr_packet = write_port.addr_in_channel_buffer.dequeue()\n write_addr = write_addr_packet.value\n write_data_packet = write_port.data_in_channel_buffer.dequeue()\n write_data = write_data_packet.value\n if debug:\n print(f\"{write_port.name} writing {write_data_packet} at address {write_addr_packet}.\\n\")\n self.contents[write_addr] = write_data\n\n # --- Reset Method ---\n\n def reset(self):\n \"\"\"\n Reset the memory.\n \"\"\"\n\n # Note: we assume the contents of the memory is persistent.\n\n # Reset any internal state.\n for read_port in self.read_ports:\n read_port.pending_read_packet = None\n for write_port in self.write_ports:\n pass # No internal state for now.\n\n # Reset any buffers.\n for read_port in self.read_ports:\n read_port.addr_in_channel_buffer.reset()\n read_port.data_out_channel_buffer.reset()\n for write_port in self.write_ports:\n write_port.addr_in_channel_buffer.reset()\n write_port.data_in_channel_buffer.reset()\n" ]
[ [ "numpy.zeros" ] ]
nflanner/lumin
[ "9713f5d18cae43f6d3ef5badeca785eb21e00510" ]
[ "lumin/nn/models/blocks/conv_blocks.py" ]
[ "from typing import Callable, Union, Optional, Any\n\nimport torch.nn as nn\nfrom torch.tensor import Tensor\n\nfrom ..initialisations import lookup_normal_init\nfrom ..layers.activations import lookup_act\n\n\n__all__ = ['Conv1DBlock', 'Res1DBlock', 'ResNeXt1DBlock']\n\n\nclass Conv1DBlock(nn.Module):\n r'''\n Basic building block for a building and applying a single 1D convolutional layer.\n\n Arguments:\n in_c: number of input channels (number of features per object / rows in input matrix)\n out_c: number of output channels (number of features / rows in output matrix)\n kernel_sz: width of kernel, i.e. the number of columns to overlay\n padding: amount of padding columns to add at start and end of convolution.\n If left as 'auto', padding will be automatically computed to conserve the number of columns.\n stride: number of columns to move kernel when computing convolutions. Stride 1 = kernel centred on each column,\n stride 2 = kernel centred on ever other column and input size halved, et cetera.\n act: string representation of argument to pass to lookup_act\n bn: whether to use batch normalisation (default order weights->activation->batchnorm)\n lookup_init: function taking choice of activation function, number of inputs, and number of outputs an returning a function to initialise layer weights.\n lookup_act: function taking choice of activation function and returning an activation function layer\n\n Examples::\n >>> conv = Conv1DBlock(in_c=3, out_c=16, kernel_sz=3)\n >>>\n >>> conv = Conv1DBlock(in_c=16, out_c=32, kernel_sz=3, stride=2)\n >>> \n >>> conv = Conv1DBlock(in_c=3, out_c=16, kernel_sz=3, act='swish', bn=True)\n '''\n\n def __init__(self, in_c:int, out_c:int, kernel_sz:int, padding:Union[int,str]='auto', stride:int=1, act:str='relu', bn:bool=False,\n lookup_init:Callable[[str,Optional[int],Optional[int]],Callable[[Tensor],None]]=lookup_normal_init,\n lookup_act:Callable[[str],Any]=lookup_act):\n super().__init__()\n self.in_c,self.out_c,self.ks,self.pad,self.stride,self.act,self.bn = in_c,out_c,kernel_sz,padding,stride,act,bn\n self.lookup_init,self.lookup_act = lookup_init,lookup_act\n if self.pad == 'auto': self.pad = self.get_padding(self.ks)\n self.set_layers()\n \n @staticmethod\n def get_padding(kernel_sz:int) -> int:\n r'''\n Automatically computes the required padding to keep the number of columns equal before and after convolution\n\n Arguments:\n kernel_sz: width of convolutional kernel\n\n Returns:\n size of padding\n '''\n\n return kernel_sz//2\n \n def set_layers(self) -> None:\n r'''\n One of the main function to overload when inheriting from class. By default calls `self.get_conv_layer` once but can be changed to produce more\n complicated architectures. Sets `self.layers` to the constructed architecture.\n '''\n\n self.layers = self.get_conv_layer(in_c=self.in_c, out_c=self.out_c, kernel_sz=self.ks, padding=self.pad, stride=self.stride)\n \n def get_conv_layer(self, in_c:int, out_c:int, kernel_sz:int, padding:Union[int,str]='auto', stride:int=1, pre_act:bool=False, groups:int=1) -> nn.Module:\n r'''\n Builds a sandwich of layers with a single concilutional layer, plus any requested batch norm and activation.\n Also initialises layers to requested scheme.\n\n Arguments:\n in_c: number of input channels (number of features per object / rows in input matrix)\n out_c: number of output channels (number of features / rows in output matrix)\n kernel_sz: width of kernel, i.e. the number of columns to overlay\n padding: amount of padding columns to add at start and end of convolution.\n If left as 'auto', padding will be automatically computed to conserve the number of columns.\n stride: number of columns to move kernel when computing convolutions. Stride 1 = kernel centred on each column,\n stride 2 = kernel centred on ever other column and input size halved, et cetera.\n pre_act: whether to apply batchnorm and activation layers prior to the weight layer, or afterwards\n groups: number of blocks of connections from input channels to output channels\n '''\n \n if padding == 'auto': padding = self.get_padding(kernel_sz)\n layers = []\n if pre_act:\n if self.bn: layers.append(nn.BatchNorm1d(in_c))\n if self.act != 'linear': layers.append(self.lookup_act(self.act))\n \n layers.append(nn.Conv1d(in_channels=in_c, out_channels=out_c, kernel_size=kernel_sz, padding=padding, stride=stride, groups=groups))\n self.lookup_init(self.act)(layers[-1].weight)\n nn.init.zeros_(layers[-1].bias)\n \n if not pre_act:\n if self.act != 'linear': layers.append(self.lookup_act(self.act))\n if self.bn: layers.append(nn.BatchNorm1d(out_c))\n return nn.Sequential(*layers)\n\n def forward(self, x:Tensor) -> Tensor:\n r'''\n Passes input through the layers.\n Might need to be overloaded in inheritance, depending on architecture.\n\n Arguments:\n x: input tensor\n \n Returns:\n Resulting tensor\n '''\n\n return self.layers(x)\n\n\nclass Res1DBlock(Conv1DBlock):\n r'''\n Basic building block for a building and applying a pair of residually connected 1D convolutional layers (https://arxiv.org/abs/1512.03385).\n Batchnorm is applied 'pre-activation' as per https://arxiv.org/pdf/1603.05027.pdf, and convolutional shortcuts (again https://arxiv.org/pdf/1603.05027.pdf)\n are used when the stride of the first layer is greater than 1, or the number of input channels does not equal the number of output channels.\n\n Arguments:\n in_c: number of input channels (number of features per object / rows in input matrix)\n out_c: number of output channels (number of features / rows in output matrix)\n kernel_sz: width of kernel, i.e. the number of columns to overlay\n padding: amount of padding columns to add at start and end of convolution.\n If left as 'auto', padding will be automatically computed to conserve the number of columns.\n stride: number of columns to move kernel when computing convolutions. Stride 1 = kernel centred on each column,\n stride 2 = kernel centred on ever other column and input size halved, et cetera.\n act: string representation of argument to pass to lookup_act\n bn: whether to use batch normalisation (order is pre-activation: batchnorm->activation->weights)\n lookup_init: function taking choice of activation function, number of inputs, and number of outputs an returning a function to initialise layer weights.\n lookup_act: function taking choice of activation function and returning an activation function layer\n\n Examples::\n >>> conv = Res1DBlock(in_c=16, out_c=16, kernel_sz=3)\n >>>\n >>> conv = Res1DBlock(in_c=16, out_c=32, kernel_sz=3, stride=2)\n >>> \n >>> conv = Res1DBlock(in_c=16, out_c=16, kernel_sz=3, act='swish', bn=True)\n '''\n\n def __init__(self, in_c:int, out_c:int, kernel_sz:int, padding:Union[int,str]='auto', stride:int=1, act:str='relu', bn:bool=False,\n lookup_init:Callable[[str,Optional[int],Optional[int]],Callable[[Tensor],None]]=lookup_normal_init,\n lookup_act:Callable[[str],Any]=lookup_act):\n super().__init__(in_c=in_c, out_c=out_c, kernel_sz=kernel_sz, padding=padding, stride=stride, act=act, bn=bn,\n lookup_init=lookup_init, lookup_act=lookup_act)\n \n def set_layers(self):\n r'''\n Constructs a pair of pre-activation convolutional layers, and a shortcut layer if necessary.\n '''\n\n self.layers = nn.Sequential(self.get_conv_layer(in_c=self.in_c, out_c=self.out_c, kernel_sz=self.ks, padding=self.pad, stride=self.stride,\n pre_act=True),\n self.get_conv_layer(in_c=self.out_c, out_c=self.out_c, kernel_sz=self.ks, padding=self.pad, stride=1,\n pre_act=True))\n if self.stride != 1 or self.in_c != self.out_c:\n self.shortcut = nn.Conv1d(in_channels=self.in_c, out_channels=self.out_c, kernel_size=1, stride=self.stride)\n else:\n self.shortcut = None\n\n def forward(self, x:Tensor) -> Tensor:\n r'''\n Passes input through the pair of layers and then adds the resulting tensor to the original input,\n which may be passed through a shortcut connection is necessary.\n\n Arguments:\n x: input tensor\n \n Returns:\n Resulting tensor\n '''\n\n skip = x if self.shortcut is None else self.shortcut(x)\n return skip + self.layers(x)\n\n\nclass ResNeXt1DBlock(Conv1DBlock):\n r'''\n Basic building block for a building and applying a set of residually connected groups of 1D convolutional layers (https://arxiv.org/abs/1611.05431).\n Batchnorm is applied 'pre-activation' as per https://arxiv.org/pdf/1603.05027.pdf, and convolutional shortcuts (again https://arxiv.org/pdf/1603.05027.pdf)\n are used when the stride of the first layer is greater than 1, or the number of input channels does not equal the number of output channels.\n\n Arguments:\n in_c: number of input channels (number of features per object / rows in input matrix)\n inter_c: number of intermediate channels in groups\n cardinality: number of groups\n out_c: number of output channels (number of features / rows in output matrix)\n kernel_sz: width of kernel, i.e. the number of columns to overlay\n padding: amount of padding columns to add at start and end of convolution.\n If left as 'auto', padding will be automatically computed to conserve the number of columns.\n stride: number of columns to move kernel when computing convolutions. Stride 1 = kernel centred on each column,\n stride 2 = kernel centred on ever other column and input size halved, et cetera.\n act: string representation of argument to pass to lookup_act\n bn: whether to use batch normalisation (order is pre-activation: batchnorm->activation->weights)\n lookup_init: function taking choice of activation function, number of inputs, and number of outputs an returning a function to initialise layer weights.\n lookup_act: function taking choice of activation function and returning an activation function layer\n\n Examples::\n >>> conv = ResNeXt1DBlock(in_c=32, inter_c=4, cardinality=4, out_c=32, kernel_sz=3)\n >>>\n >>> conv = ResNeXt1DBlock(in_c=32, inter_c=4, cardinality=4, out_c=32, kernel_sz=3, stride=2)\n >>> \n >>> conv = ResNeXt1DBlock(in_c=32, inter_c=4, cardinality=4, out_c=32, kernel_sz=3, act='swish', bn=True)\n '''\n\n def __init__(self, in_c:int, inter_c:int, cardinality:int, out_c:int, kernel_sz:int, padding:Union[int,str]='auto', stride:int=1, act:str='relu',\n bn:bool=False,\n lookup_init:Callable[[str,Optional[int],Optional[int]],Callable[[Tensor],None]]=lookup_normal_init,\n lookup_act:Callable[[str],Any]=lookup_act):\n self.inter_c,self.cardinality = inter_c,cardinality\n super().__init__(in_c=in_c, out_c=out_c, kernel_sz=kernel_sz, padding=padding, stride=stride, act=act, bn=bn,\n lookup_init=lookup_init, lookup_act=lookup_act)\n \n def set_layers(self):\n r'''\n Constructs a set of grouped pre-activation convolutional layers, and a shortcut layer if necessary.\n '''\n\n self.layers = nn.Sequential(self.get_conv_layer(in_c=self.in_c, out_c=self.inter_c*self.cardinality, kernel_sz=1, stride=self.stride, pre_act=True),\n self.get_conv_layer(in_c=self.inter_c*self.cardinality, out_c=self.inter_c*self.cardinality, kernel_sz=self.ks,\n padding=self.pad, stride=1, groups=self.cardinality, pre_act=True),\n self.get_conv_layer(in_c=self.inter_c*self.cardinality, out_c=self.out_c, kernel_sz=1, stride=1, pre_act=True))\n if self.stride != 1 or self.in_c != self.out_c:\n self.shortcut = nn.Conv1d(in_channels=self.in_c, out_channels=self.out_c, kernel_size=1, stride=self.stride)\n else:\n self.shortcut = None\n\n def forward(self, x:Tensor) -> Tensor:\n r'''\n Passes input through the set of layers and then adds the resulting tensor to the original input,\n which may be passed through a shortcut connection is necessary.\n\n Arguments:\n x: input tensor\n \n Returns:\n Resulting tensor\n '''\n\n skip = x if self.shortcut is None else self.shortcut(x)\n return skip + self.layers(x)\n" ]
[ [ "torch.nn.Sequential", "torch.nn.init.zeros_", "torch.nn.BatchNorm1d", "torch.nn.Conv1d" ] ]
jonaan99/LEDband
[ "8d576817f15074b06d29a620517f4201ca372b31" ]
[ "client/libs/color_service.py" ]
[ "import numpy as np\n\nclass ColorService():\n def __init__(self, config):\n\n self._config = config\n self.full_gradients = {}\n\n def build_gradients(self):\n\n self.full_gradients = {}\n\n for gradient in self._config[\"gradients\"]:\n not_mirrored_gradient = self._easing_gradient_generator(\n self._config[\"gradients\"][gradient], # All colors of the current gradient\n self._config[\"device_config\"][\"LED_Count\"]\n )\n\n # Mirror the gradient to get seemsles transition from start to the end\n # [1,2,3,4]\n # -> [1,2,3,4,4,3,2,1]\n self.full_gradients[gradient] = np.concatenate(\n (not_mirrored_gradient[:, ::-1], not_mirrored_gradient), \n axis = 1\n )\n \n\n\n def _easing_gradient_generator(self, colors, length):\n \"\"\"\n returns np.array of given length that eases between specified colours\n\n parameters:\n colors - list, colours must be in self.config.colour_manager[\"colours\"]\n eg. [\"Red\", \"Orange\", \"Blue\", \"Purple\"]\n length - int, length of array to return. should be from self.config.settings\n eg. self.config.settings[\"devices\"][\"my strip\"][\"configuration\"][\"N_PIXELS\"]\n \"\"\"\n def _easing_func(x, length, slope=2.5):\n # returns a nice eased curve with defined length and curve\n xa = (x/length)**slope\n return xa / (xa + (1 - (x/length))**slope)\n colors = colors[::-1] # needs to be reversed, makes it easier to deal with\n n_transitions = len(colors) - 1\n ease_length = length // n_transitions\n pad = length - (n_transitions * ease_length)\n output = np.zeros((3, length))\n ease = np.array([_easing_func(i, ease_length, slope=2.5) for i in range(ease_length)])\n # for r,g,b\n for i in range(3):\n # for each transition\n for j in range(n_transitions):\n # Starting ease value\n start_value = colors[j][i]\n # Ending ease value\n end_value = colors[j+1][i]\n # Difference between start and end\n diff = end_value - start_value\n # Make array of all start value\n base = np.empty(ease_length)\n base.fill(start_value)\n # Make array of the difference between start and end\n diffs = np.empty(ease_length)\n diffs.fill(diff)\n # run diffs through easing function to make smooth curve\n eased_diffs = diffs * ease\n # add transition to base values to produce curve from start to end value\n base += eased_diffs\n # append this to the output array\n output[i, j*ease_length:(j+1)*ease_length] = base\n # cast to int\n output = np.asarray(output, dtype=int)\n # pad out the ends (bit messy but it works and looks good)\n if pad:\n for i in range(3):\n output[i, -pad:] = output[i, -pad-1]\n return output\n\n def colour(self, colour):\n # returns the values of a given colour. use this function to get colour values.\n if colour in self._config[\"colours\"]:\n return self._config[\"colours\"][colour]\n else:\n print(\"colour {} has not been defined\".format(colour))\n return (0,0,0)" ]
[ [ "numpy.concatenate", "numpy.empty", "numpy.asarray", "numpy.zeros" ] ]
BinhMisfit/PSRMTE
[ "dead57779d56d1ec19eec77b763dbda128be53c6" ]
[ "xlnet-paper/classifier_utils.py" ]
[ "from absl import flags\n\nimport re\nimport numpy as np\n\nimport tensorflow as tf\nfrom data_utils import SEP_ID, CLS_ID\n\nFLAGS = flags.FLAGS\n\nSEG_ID_A = 0\nSEG_ID_B = 1\nSEG_ID_CLS = 2\nSEG_ID_SEP = 3\nSEG_ID_PAD = 4\n\nclass PaddingInputExample(object):\n \"\"\"Fake example so the num input examples is a multiple of the batch size.\n When running eval/predict on the TPU, we need to pad the number of examples\n to be a multiple of the batch size, because the TPU requires a fixed batch\n size. The alternative is to drop the last batch, which is bad because it means\n the entire output data won't be generated.\n We use this class instead of `None` because treating `None` as padding\n battches could cause silent errors.\n \"\"\"\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self,\n input_ids,\n input_mask,\n segment_ids,\n label_id,\n is_real_example=True):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n self.is_real_example = is_real_example\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenize_fn):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[1] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)\n\n if label_list is not None:\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenize_fn(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenize_fn(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for two [SEP] & one [CLS] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for one [SEP] & one [CLS] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:max_seq_length - 2]\n\n tokens = []\n segment_ids = []\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(SEG_ID_A)\n tokens.append(SEP_ID)\n segment_ids.append(SEG_ID_A)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(SEG_ID_B)\n tokens.append(SEP_ID)\n segment_ids.append(SEG_ID_B)\n\n tokens.append(CLS_ID)\n segment_ids.append(SEG_ID_CLS)\n\n input_ids = tokens\n\n # The mask has 0 for real tokens and 1 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n if len(input_ids) < max_seq_length:\n delta_len = max_seq_length - len(input_ids)\n input_ids = [0] * delta_len + input_ids\n input_mask = [1] * delta_len + input_mask\n segment_ids = [SEG_ID_PAD] * delta_len + segment_ids\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if label_list is not None:\n label_id = label_map[example.label]\n else:\n label_id = example.label\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens_a: %s\" % \" \".join([str(x) for x in tokens_a]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"label: {} (id = {})\".format(example.label, label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id)\n return feature\n\n\n\n" ]
[ [ "tensorflow.logging.info" ] ]
shenw33/generate_CFD
[ "4ce47ab75fa15343712430bcde59eee88f45c293" ]
[ "dataset_creator.py" ]
[ "import torch\nimport numpy as np\nfrom torch.utils import data\n\n\ndef dataset_creater(domain_size, velocity_mag = 0.5, early_time_steps=20, num_samples = 2000):\n \n size = domain_size\n num_nodes = 100\n lidVecList = np.linspace(0, velocity_mag, num_nodes)\n \n\n L = 1 # dimensionless LX / LX\n H = 1 # dimensionless LY / LX\n dx = L / (size - 1)\n dy = H / (size - 1)\n CFL = 0.04\n dt = CFL * min(dx , dy)\n RE = 20\n FlowData = torch.zeros((num_samples,3,size,size))\n \n for k in range(0,num_samples):\n u = np.zeros((size, size))\n v = np.zeros((size, size))\n p = np.zeros((size, size))\n \n U0, V0 = np.random.choice(lidVecList, 2)\n\n# blank_part = np.random.randint(2,domain_size) # From second node to end\n# u0_vector = np.zeros((1,size))\n# u0_vector[0, 0:blank_part] = u0 \n usol, vsol, psol = solve_flow(early_time_steps, size, size, u, v, dt, dx, dy, p, u0=U0, v0=V0)\n FlowData[k,0:1,:,:] = torch.from_numpy(usol) # Channel 0\n FlowData[k,1:2,:,:] = torch.from_numpy(vsol) # Channel 1\n FlowData[k,2:3,:,:] = torch.from_numpy(psol) # Channel 2\n \n torch.save(FlowData, 'FlowData_UV_0130.pt')\n \n return FlowData\n\nclass CavityFlowDataset(data.Dataset):\n \"\"\"Characterizes the cavity flow dataset for training. \"\"\"\n \n def __init__(self, root_dir, flowfile):\n 'Initialization'\n self.flowdata = torch.load(root_dir + flowfile)\n\n def __len__(self):\n 'Denotes the total number of samples'\n return self.flowdata.size()[0]\n\n def __getitem__(self, index):\n 'Generates one sample of data'\n X = self.flowdata[index]\n return X" ]
[ [ "torch.zeros", "numpy.random.choice", "numpy.zeros", "torch.save", "torch.from_numpy", "torch.load", "numpy.linspace" ] ]
xiaochunxin/athena
[ "42a12252a07d9638f8e3bcbba788fe48e25e8548" ]
[ "athena/transform/feats/framepow_test.py" ]
[ "# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"The model tests framepow FE.\"\"\"\n\nimport os\nfrom pathlib import Path\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.framework.ops import disable_eager_execution\nfrom athena.transform.feats.read_wav import ReadWav\nfrom athena.transform.feats.framepow import Framepow\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\n\nclass FramePowTest(tf.test.TestCase):\n \"\"\"Framepow extraction test.\"\"\"\n def test_framepow(self):\n wav_path_16k = str(\n Path(os.environ[\"MAIN_ROOT\"]).joinpath(\"examples/sm1_cln.wav\")\n )\n\n with self.session():\n read_wav = ReadWav.params().instantiate()\n input_data, sample_rate = read_wav(wav_path_16k)\n config = {\"snip_edges\": 1}\n framepow = Framepow.params(config).instantiate()\n framepow_test = framepow(input_data, sample_rate)\n\n real_framepow_feats = np.array(\n [9.819611, 9.328745, 9.247337, 9.26451, 9.266059]\n )\n\n if tf.executing_eagerly():\n self.assertAllClose(\n framepow_test.numpy()[0:5],\n real_framepow_feats,\n rtol=1e-05,\n atol=1e-05,\n )\n print(framepow_test.numpy()[0:5])\n else:\n self.assertAllClose(\n framepow_test.eval()[0:5],\n real_framepow_feats,\n rtol=1e-05,\n atol=1e-05,\n )\n print(framepow_test.eval()[0:5])\n\n\nif __name__ == \"__main__\":\n is_eager = True\n if not is_eager:\n disable_eager_execution()\n else:\n if tf.__version__ < \"2.0.0\":\n tf.compat.v1.enable_eager_execution()\n tf.test.main()\n" ]
[ [ "numpy.array", "tensorflow.executing_eagerly", "tensorflow.test.main", "tensorflow.compat.v1.enable_eager_execution", "tensorflow.python.framework.ops.disable_eager_execution" ] ]
joshloyal/pydata-amazon-products
[ "fdfe4d0cd49b12fa5a74b05f5d862bb13e7bd72d" ]
[ "amazon_products/image_utils.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport glob\nimport os\nimport functools\nimport itertools\n\nimport pandas as pd\nimport numpy as np\nfrom joblib import Parallel, delayed\nfrom PIL import Image as pil_image\n\n\nimage_extensions = {'jpg', 'jpeg', 'png'}\n\n\ndef image_path(image_file, image_dir=''):\n return os.path.join(image_dir, image_file)\n\n\ndef sample_images(seq, n_samples, seed=123):\n random_state = np.random.RandomState(seed)\n return random_state.choice(seq, size=n_samples, replace=False)\n\n\ndef load_image(image_file,\n image_dir='',\n target_size=None,\n dtype=np.uint8,\n as_image=False):\n \"\"\"Loads an image from a file on disk. Support formats are\n `jpg`, `png`, or `gif`.\n\n Parameters\n ----------\n image_file : str\n The image file on disk.\n image_dir : str (default='')\n The directory where the image resides on disk. This string will\n be appended to the beginning of `image_file`.\n target_size : tuple (default=None)\n The target size in pixels. This is a 2-tuple (width, height).\n If None then no resizing is performed.\n dtype : numpy dtype (default=np.uint8)\n The dtype of the output numpy array.\n as_image : bool (default=False)\n Whether to return a PIL Image. If True a PIL Image is returned\n otherwise the output is a numpy array.\n \"\"\"\n image_loc = image_path(image_file, image_dir=image_dir)\n img = pil_image.open(image_loc).convert('RGB')\n\n if target_size:\n img = img.resize((target_size[1], target_size[0]), pil_image.LANCZOS)\n\n if as_image:\n return img\n\n return np.asarray(img, dtype)\n\n\ndef load_images(image_files,\n image_dir='',\n n_samples=None,\n target_size=(128, 128),\n dtype=np.uint8,\n as_image=False,\n random_state=123,\n n_jobs=1):\n if n_samples is not None and n_samples < len(image_files):\n image_files = sample_images(image_files, n_samples, seed=random_state)\n\n # perform this in parallel with joblib\n images = Parallel(n_jobs=n_jobs)(\n delayed(load_image)(img,\n image_dir=image_dir,\n target_size=target_size,\n as_image=as_image,\n dtype=dtype)\n for img in image_files)\n\n if as_image:\n return images\n\n return np.stack(images, axis=0)\n\n\ndef image_glob_pattern(image_directory, ext):\n return os.path.join(image_directory, '*.' + ext)\n\n\ndef image_glob(image_directory, ext):\n return glob.glob(image_glob_pattern(image_directory, ext))\n\n\ndef load_from_directory(image_directory,\n n_samples=None,\n dtype=np.uint8,\n as_image=False,\n random_state=123,\n n_jobs=1,):\n image_files = list(itertools.chain.from_iterable(\n [image_glob(image_directory, ext) for ext in image_extensions]))\n return load_images(image_files,\n n_samples=n_samples,\n dtype=dtype,\n as_image=as_image,\n random_state=random_state,\n n_jobs=n_jobs)\n\n\ndef images_to_sprite(images):\n \"\"\"Creates a sprite image along with any necessary padding.\n\n Parameters\n ----------\n images : list\n A List of PIL Image objects.\n\n Returns\n -------\n A properly shaped NxWx3 PIL Image with any necessary padding.\n \"\"\"\n n_samples = len(images)\n\n #features = hsv_features(images, background='white', n_jobs=-1)\n #image_order = np.argsort(features[:, 0])\n\n if n_samples < 1:\n raise ValueError('Cannot create a sprite image from zero images.')\n\n image_width, image_height = images[0].size\n\n # sprite image should be sqrt(n_samples) x sqrt(n_samples). If\n # n_samples is not a perfect square then we pad with white images.\n table_size = int(np.ceil(np.sqrt(n_samples)))\n\n # create the new image. Hard-code the background color to white\n background_color = (255, 255, 255)\n sprite_size = (table_size * image_width, table_size * image_height)\n sprite_image = pil_image.new('RGB', sprite_size, background_color)\n\n # loop through the images and add them to the sprite image\n for index, image in enumerate(images):\n #for index, image_index in enumerate(image_order):\n # image = images[image_index]\n # Determine where we are in the sprite image.\n row_index = int(index / table_size)\n column_index = index % table_size\n\n # determine the bounding box of the image (where it is)\n left = column_index * image_width\n right = left + image_width\n upper = row_index * image_height\n lower = upper + image_height\n bounding_box = (left, upper, right, lower)\n\n sprite_image.paste(image, bounding_box)\n\n return sprite_image\n\n\ndef directory_to_sprites(image_directory,\n n_samples=None,\n random_state=123,\n n_jobs=1):\n \"\"\"Creates a sprite image along with any necessary padding.\n\n Parameters\n ----------\n image_directory : str\n Path to the directory holding the images.\n\n n_samples : int (default=None)\n The number of random sample images to use. If None, then\n all images are loaded. This can be memory expensive.\n\n as_image : bool (default=False)\n Whether to return a PIL image otherwise return a numpy array.\n\n random_state : int (default=123)\n The seed to use for the random sampling.\n\n n_jobs : int (default=1)\n The number of parallel workers to use for loading\n the image files.\n\n Returns\n -------\n A properly shaped NxWx3 image with any necessary padding.\n \"\"\"\n images = load_from_directory(\n image_directory,\n n_samples=n_samples,\n dtype=np.float32,\n as_image=True,\n random_state=random_state,\n n_jobs=n_jobs)\n\n return images_to_sprite(images)\n\n\ndef column_to_sprites(image_column,\n sort_by=None,\n data=None,\n image_directory='',\n n_samples=None,\n random_state=123,\n n_jobs=1):\n \"\"\"Creates a sprite image along with any necessary padding.\n\n Parameters\n ----------\n image_column : str\n Column name corresponding to the images.\n\n sort_by : str\n Column to sort by.\n\n data : pd.DataFrame\n Pandas dataframe holding the dataset.\n\n image_directory : str (default='')\n The location of the image files on disk.\n\n n_samples : int (default=None)\n The number of random sample images to use. If None, then\n all images are loaded. This can be memory expensive.\n\n as_image : bool (default=False)\n Whether to return a PIL image otherwise return a numpy array.\n\n random_state : int (default=123)\n The seed to use for the random sampling.\n\n n_jobs : int (default=1)\n The number of parallel workers to use for loading\n the image files.\n\n Returns\n -------\n A properly shaped NxWx3 image with any necessary padding.\n \"\"\"\n if n_samples is not None and n_samples < len(data):\n data = data.sample(n=n_samples,\n replace=False,\n random_state=random_state)\n\n if sort_by is not None:\n data = data.sort_values(by=sort_by, ascending=True)\n\n images = load_images(\n data[image_column],\n image_dir=image_directory,\n as_image=True,\n n_jobs=n_jobs)\n\n return images_to_sprite(images)\n\n\ndef image_histogram(image_column,\n x_column,\n y_column,\n data,\n n_bins=100,\n thumbnail_size=50,\n image_directory='',\n n_samples=None,\n fig_size=(1000, 1000),\n random_state=123):\n \"\"\"Create an image histogram binned by the `x_column`.\n\n Parameters\n ----------\n image_column : str\n Name of the column pointing to the image files\n\n x_column : str\n Name of the column bin the x-axis.\n\n y_column : str\n Name of the column to sort they values. No sorting is performed\n if y_column is None.\n\n data : pandas.DataFrame\n The dataframe where both columns are present.\n\n thumbnail_size : int\n The size of each image in the histogram.\n\n image_directory : str\n Path to the directory holding the images.\n\n n_samples : int (default=None)\n The number of samples do downsample the dataset to.\n\n fig_size : tuple\n The (width_px, height_px) of the final image in pixels.\n\n random_state : int\n The seed to use for the random number generator.\n \"\"\"\n data = data.copy()\n if n_samples is not None and n_samples < len(data):\n data = data.sample(n_samples, replace=True, random_state=random_state)\n\n data['x_bin'] = pd.cut(data[x_column], n_bins, labels=False)\n bin_max = data.groupby('x_bin').size().max()\n\n px_w = thumbnail_size * n_bins\n px_h = thumbnail_size * bin_max\n\n background_color = (50, 50, 50)\n canvas = pil_image.new('RGB', (px_w, px_h), background_color)\n\n thumbnail_px = (thumbnail_size, thumbnail_size)\n bins = list(set(list(data.x_bin)))\n\n for item in bins:\n tmp = data[data.x_bin == item].copy()\n\n # sort y values if present\n if y_column is not None:\n tmp.sort_values(by=y_column, ascending=False, inplace=True)\n\n tmp.reset_index(drop=True, inplace=True)\n\n y_coord = px_h\n x_coord = thumbnail_size * item\n\n for i in range(len(tmp.index)):\n image_loc = image_path(tmp[image_column].iloc[i], image_directory)\n thumbnail = pil_image.open(image_loc)\n thumbnail.thumbnail(thumbnail_px, pil_image.BICUBIC)\n canvas.paste(thumbnail, (x_coord, y_coord))\n y_coord -= thumbnail_size\n\n if fig_size:\n canvas.thumbnail(fig_size, pil_image.BICUBIC)\n\n return canvas\n\n\ndef image_scatter_plot(image_column,\n x_column,\n y_column,\n data,\n thumbnail_size=5,\n image_directory='',\n n_samples=None,\n fig_size=(500, 500),\n random_state=123):\n \"\"\"Create an image scatter plot based on columns `y_column` vs\n `x_column.\n\n Parameters\n ----------\n image_column : str\n Name of the column pointing to the image files\n\n x_column : str\n Name of the column to use for the x-axis.\n\n y_column : str\n Name of the column to use for the y-axis\n\n data : pandas.DataFrame\n The dataframe where both columns are present.\n\n thumbnail_size : int\n The size of each image in the scatter plot.\n\n image_directory : str\n Path to the directory holding the images.\n\n n_samples : int (default=None)\n The number of samples do downsample the dataset to.\n\n fig_size : tuple\n The (width_px, height_px) of the final image in pixels.\n\n random_state : int\n The seed to use for the random number generator.\n \"\"\"\n data = data.copy()\n if n_samples is not None and n_samples < len(data):\n data = data.sample(n_samples, replace=True, random_state=random_state)\n\n x_var = data[x_column].values\n y_var = data[y_column].values\n\n # scale the variables between 0-1\n x_var /= np.abs(x_var).max()\n y_var /= np.abs(y_var).max()\n\n # now stretch them to fit the canvas\n fig_width, fig_height = fig_size\n padding = 0\n x_var = np.floor(x_var * (fig_width / 2 - padding) + fig_width / 2)\n y_var = np.floor(y_var * (fig_height / 2 - padding) + fig_height / 2)\n\n background_color = (255, 255, 255)\n canvas = pil_image.new('RGB', fig_size, background_color)\n\n for i in range(len(data[image_column])):\n image_loc = image_path(data[image_column].iloc[i], image_directory)\n point_img = pil_image.open(image_loc).convert('RGB')\n point_img = point_img.resize(\n (thumbnail_size, thumbnail_size), pil_image.LANCZOS)\n point_width, point_height = point_img.size\n\n\n width_pad = 0 if not point_width % 2 else 1\n height_pad = 0 if not point_height % 2 else 1\n bounding_box = (\n int(x_var[i] - point_width // 2),\n int(y_var[i] - point_height // 2),\n int(x_var[i] + point_width // 2 + width_pad),\n int(y_var[i] + point_height // 2 + height_pad)\n )\n canvas.paste(point_img, bounding_box)\n\n return canvas\n" ]
[ [ "pandas.cut", "numpy.asarray", "numpy.random.RandomState", "numpy.stack", "numpy.sqrt", "numpy.abs", "numpy.floor" ] ]
bramreinders97/fairlearn
[ "3335c33117993843252b4adeaeac3ac18683b2ca" ]
[ "test_othermlpackages/test_tensorflow.py" ]
[ "# Copyright (c) Microsoft Corporation and Fairlearn contributors.\n# Licensed under the MIT License.\n\nimport pytest\nfrom . import package_test_common as ptc\n\nfrom fairlearn.reductions import DemographicParity\n\ntf = pytest.importorskip(\"tensorflow\")\nfrom tensorflow.keras.layers import Dense # noqa\nfrom tensorflow.keras.models import Sequential # noqa\nfrom scikeras.wrappers import KerasClassifier # noqa\n\n\ndef create_model():\n # create model\n model = Sequential()\n # 103 is the number of X columns after the get_dummies() call\n model.add(Dense(12, input_dim=103, activation='relu'))\n model.add(Dense(8, activation='relu'))\n model.add(Dense(1, activation='sigmoid'))\n # Compile model\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n\ndef test_expgrad_classification():\n estimator = KerasClassifier(build_fn=create_model)\n disparity_moment = DemographicParity()\n\n ptc.run_expgrad_classification(estimator, disparity_moment)\n\n\ndef test_gridsearch_classification():\n estimator = KerasClassifier(build_fn=create_model)\n disparity_moment = DemographicParity()\n\n ptc.run_gridsearch_classification(estimator, disparity_moment)\n\n\ndef test_thresholdoptimizer_classification():\n estimator = KerasClassifier(build_fn=create_model)\n\n ptc.run_thresholdoptimizer_classification(estimator)\n" ]
[ [ "tensorflow.keras.models.Sequential", "tensorflow.keras.layers.Dense" ] ]
edyeung4/scikit-learn
[ "bb547a2646573f6f95673d0a6b471fe947424345" ]
[ "sklearn/utils/validation.py" ]
[ "\"\"\"Utilities for input validation\"\"\"\n\n# Authors: Olivier Grisel\n# Gael Varoquaux\n# Andreas Mueller\n# Lars Buitinck\n# Alexandre Gramfort\n# Nicolas Tresegnie\n# Sylvain Marie\n# License: BSD 3 clause\n\nfrom functools import wraps\nimport warnings\nimport numbers\nimport operator\n\nimport numpy as np\nimport scipy.sparse as sp\nfrom inspect import signature, isclass, Parameter\n\n# mypy error: Module 'numpy.core.numeric' has no attribute 'ComplexWarning'\nfrom numpy.core.numeric import ComplexWarning # type: ignore\nimport joblib\n\nfrom contextlib import suppress\n\nfrom .fixes import _object_dtype_isnan, parse_version\nfrom .. import get_config as _get_config\nfrom ..exceptions import PositiveSpectrumWarning\nfrom ..exceptions import NotFittedError\nfrom ..exceptions import DataConversionWarning\n\nFLOAT_DTYPES = (np.float64, np.float32, np.float16)\n\n\ndef _deprecate_positional_args(func=None, *, version=\"1.1 (renaming of 0.26)\"):\n \"\"\"Decorator for methods that issues warnings for positional arguments.\n\n Using the keyword-only argument syntax in pep 3102, arguments after the\n * will issue a warning when passed as a positional argument.\n\n Parameters\n ----------\n func : callable, default=None\n Function to check arguments on.\n version : callable, default=\"1.1 (renaming of 0.26)\"\n The version when positional arguments will result in error.\n \"\"\"\n\n def _inner_deprecate_positional_args(f):\n sig = signature(f)\n kwonly_args = []\n all_args = []\n\n for name, param in sig.parameters.items():\n if param.kind == Parameter.POSITIONAL_OR_KEYWORD:\n all_args.append(name)\n elif param.kind == Parameter.KEYWORD_ONLY:\n kwonly_args.append(name)\n\n @wraps(f)\n def inner_f(*args, **kwargs):\n extra_args = len(args) - len(all_args)\n if extra_args <= 0:\n return f(*args, **kwargs)\n\n # extra_args > 0\n args_msg = [\n \"{}={}\".format(name, arg)\n for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])\n ]\n args_msg = \", \".join(args_msg)\n warnings.warn(\n f\"Pass {args_msg} as keyword args. From version \"\n f\"{version} passing these as positional arguments \"\n \"will result in an error\",\n FutureWarning,\n )\n kwargs.update(zip(sig.parameters, args))\n return f(**kwargs)\n\n return inner_f\n\n if func is not None:\n return _inner_deprecate_positional_args(func)\n\n return _inner_deprecate_positional_args\n\n\ndef _assert_all_finite(X, allow_nan=False, msg_dtype=None):\n \"\"\"Like assert_all_finite, but only for ndarray.\"\"\"\n # validation is also imported in extmath\n from .extmath import _safe_accumulator_op\n\n if _get_config()[\"assume_finite\"]:\n return\n X = np.asanyarray(X)\n # First try an O(n) time, O(1) space solution for the common case that\n # everything is finite; fall back to O(n) space np.isfinite to prevent\n # false positives from overflow in sum method. The sum is also calculated\n # safely to reduce dtype induced overflows.\n is_float = X.dtype.kind in \"fc\"\n if is_float and (np.isfinite(_safe_accumulator_op(np.sum, X))):\n pass\n elif is_float:\n msg_err = \"Input contains {} or a value too large for {!r}.\"\n if (\n allow_nan\n and np.isinf(X).any()\n or not allow_nan\n and not np.isfinite(X).all()\n ):\n type_err = \"infinity\" if allow_nan else \"NaN, infinity\"\n raise ValueError(\n msg_err.format(\n type_err, msg_dtype if msg_dtype is not None else X.dtype\n )\n )\n # for object dtype data, we only check for NaNs (GH-13254)\n elif X.dtype == np.dtype(\"object\") and not allow_nan:\n if _object_dtype_isnan(X).any():\n raise ValueError(\"Input contains NaN\")\n\n\ndef assert_all_finite(X, *, allow_nan=False):\n \"\"\"Throw a ValueError if X contains NaN or infinity.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix}\n\n allow_nan : bool, default=False\n \"\"\"\n _assert_all_finite(X.data if sp.issparse(X) else X, allow_nan)\n\n\ndef as_float_array(X, *, copy=True, force_all_finite=True):\n \"\"\"Converts an array-like to an array of floats.\n\n The new dtype will be np.float32 or np.float64, depending on the original\n type. The function can create a copy or modify the argument depending\n on the argument copy.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}\n\n copy : bool, default=True\n If True, a copy of X will be created. If False, a copy may still be\n returned if X's dtype is not a floating point type.\n\n force_all_finite : bool or 'allow-nan', default=True\n Whether to raise an error on np.inf, np.nan, pd.NA in X. The\n possibilities are:\n\n - True: Force all values of X to be finite.\n - False: accepts np.inf, np.nan, pd.NA in X.\n - 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot\n be infinite.\n\n .. versionadded:: 0.20\n ``force_all_finite`` accepts the string ``'allow-nan'``.\n\n .. versionchanged:: 0.23\n Accepts `pd.NA` and converts it into `np.nan`\n\n Returns\n -------\n XT : {ndarray, sparse matrix}\n An array of type float.\n \"\"\"\n if isinstance(X, np.matrix) or (\n not isinstance(X, np.ndarray) and not sp.issparse(X)\n ):\n return check_array(\n X,\n accept_sparse=[\"csr\", \"csc\", \"coo\"],\n dtype=np.float64,\n copy=copy,\n force_all_finite=force_all_finite,\n ensure_2d=False,\n )\n elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:\n return X.copy() if copy else X\n elif X.dtype in [np.float32, np.float64]: # is numpy array\n return X.copy(\"F\" if X.flags[\"F_CONTIGUOUS\"] else \"C\") if copy else X\n else:\n if X.dtype.kind in \"uib\" and X.dtype.itemsize <= 4:\n return_dtype = np.float32\n else:\n return_dtype = np.float64\n return X.astype(return_dtype)\n\n\ndef _is_arraylike(x):\n \"\"\"Returns whether the input is array-like.\"\"\"\n return hasattr(x, \"__len__\") or hasattr(x, \"shape\") or hasattr(x, \"__array__\")\n\n\ndef _num_features(X):\n \"\"\"Return the number of features in an array-like X.\n\n This helper function tries hard to avoid to materialize an array version\n of X unless necessary. For instance, if X is a list of lists,\n this function will return the length of the first element, assuming\n that subsequent elements are all lists of the same length without\n checking.\n Parameters\n ----------\n X : array-like\n array-like to get the number of features.\n\n Returns\n -------\n features : int\n Number of features\n \"\"\"\n type_ = type(X)\n if type_.__module__ == \"builtins\":\n type_name = type_.__qualname__\n else:\n type_name = f\"{type_.__module__}.{type_.__qualname__}\"\n message = f\"Unable to find the number of features from X of type {type_name}\"\n if not hasattr(X, \"__len__\") and not hasattr(X, \"shape\"):\n if not hasattr(X, \"__array__\"):\n raise TypeError(message)\n # Only convert X to a numpy array if there is no cheaper, heuristic\n # option.\n X = np.asarray(X)\n\n if hasattr(X, \"shape\"):\n if not hasattr(X.shape, \"__len__\") or len(X.shape) <= 1:\n message += f\" with shape {X.shape}\"\n raise TypeError(message)\n return X.shape[1]\n\n first_sample = X[0]\n\n # Do not consider an array-like of strings or dicts to be a 2D array\n if isinstance(first_sample, (str, bytes, dict)):\n message += f\" where the samples are of type {type(first_sample).__qualname__}\"\n raise TypeError(message)\n\n try:\n # If X is a list of lists, for instance, we assume that all nested\n # lists have the same length without checking or converting to\n # a numpy array to keep this function call as cheap as possible.\n return len(first_sample)\n except Exception as err:\n raise TypeError(message) from err\n\n\ndef _num_samples(x):\n \"\"\"Return number of samples in array-like x.\"\"\"\n message = \"Expected sequence or array-like, got %s\" % type(x)\n if hasattr(x, \"fit\") and callable(x.fit):\n # Don't get num_samples from an ensembles length!\n raise TypeError(message)\n\n if not hasattr(x, \"__len__\") and not hasattr(x, \"shape\"):\n if hasattr(x, \"__array__\"):\n x = np.asarray(x)\n else:\n raise TypeError(message)\n\n if hasattr(x, \"shape\") and x.shape is not None:\n if len(x.shape) == 0:\n raise TypeError(\n \"Singleton array %r cannot be considered a valid collection.\" % x\n )\n # Check that shape is returning an integer or default to len\n # Dask dataframes may not return numeric shape[0] value\n if isinstance(x.shape[0], numbers.Integral):\n return x.shape[0]\n\n try:\n return len(x)\n except TypeError as type_error:\n raise TypeError(message) from type_error\n\n\ndef check_memory(memory):\n \"\"\"Check that ``memory`` is joblib.Memory-like.\n\n joblib.Memory-like means that ``memory`` can be converted into a\n joblib.Memory instance (typically a str denoting the ``location``)\n or has the same interface (has a ``cache`` method).\n\n Parameters\n ----------\n memory : None, str or object with the joblib.Memory interface\n\n Returns\n -------\n memory : object with the joblib.Memory interface\n\n Raises\n ------\n ValueError\n If ``memory`` is not joblib.Memory-like.\n \"\"\"\n\n if memory is None or isinstance(memory, str):\n if parse_version(joblib.__version__) < parse_version(\"0.12\"):\n memory = joblib.Memory(cachedir=memory, verbose=0)\n else:\n memory = joblib.Memory(location=memory, verbose=0)\n elif not hasattr(memory, \"cache\"):\n raise ValueError(\n \"'memory' should be None, a string or have the same\"\n \" interface as joblib.Memory.\"\n \" Got memory='{}' instead.\".format(memory)\n )\n return memory\n\n\ndef check_consistent_length(*arrays):\n \"\"\"Check that all arrays have consistent first dimensions.\n\n Checks whether all objects in arrays have the same shape or length.\n\n Parameters\n ----------\n *arrays : list or tuple of input objects.\n Objects that will be checked for consistent length.\n \"\"\"\n\n lengths = [_num_samples(X) for X in arrays if X is not None]\n uniques = np.unique(lengths)\n if len(uniques) > 1:\n raise ValueError(\n \"Found input variables with inconsistent numbers of samples: %r\"\n % [int(l) for l in lengths]\n )\n\n\ndef _make_indexable(iterable):\n \"\"\"Ensure iterable supports indexing or convert to an indexable variant.\n\n Convert sparse matrices to csr and other non-indexable iterable to arrays.\n Let `None` and indexable objects (e.g. pandas dataframes) pass unchanged.\n\n Parameters\n ----------\n iterable : {list, dataframe, ndarray, sparse matrix} or None\n Object to be converted to an indexable iterable.\n \"\"\"\n if sp.issparse(iterable):\n return iterable.tocsr()\n elif hasattr(iterable, \"__getitem__\") or hasattr(iterable, \"iloc\"):\n return iterable\n elif iterable is None:\n return iterable\n return np.array(iterable)\n\n\ndef indexable(*iterables):\n \"\"\"Make arrays indexable for cross-validation.\n\n Checks consistent length, passes through None, and ensures that everything\n can be indexed by converting sparse matrices to csr and converting\n non-interable objects to arrays.\n\n Parameters\n ----------\n *iterables : {lists, dataframes, ndarrays, sparse matrices}\n List of objects to ensure sliceability.\n\n Returns\n -------\n result : list of {ndarray, sparse matrix, dataframe} or None\n Returns a list containing indexable arrays (i.e. NumPy array,\n sparse matrix, or dataframe) or `None`.\n \"\"\"\n\n result = [_make_indexable(X) for X in iterables]\n check_consistent_length(*result)\n return result\n\n\ndef _ensure_sparse_format(\n spmatrix, accept_sparse, dtype, copy, force_all_finite, accept_large_sparse\n):\n \"\"\"Convert a sparse matrix to a given format.\n\n Checks the sparse format of spmatrix and converts if necessary.\n\n Parameters\n ----------\n spmatrix : sparse matrix\n Input to validate and convert.\n\n accept_sparse : str, bool or list/tuple of str\n String[s] representing allowed sparse matrix formats ('csc',\n 'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). If the input is sparse but\n not in the allowed format, it will be converted to the first listed\n format. True allows the input to be any format. False means\n that a sparse matrix input will raise an error.\n\n dtype : str, type or None\n Data type of result. If None, the dtype of the input is preserved.\n\n copy : bool\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n force_all_finite : bool or 'allow-nan'\n Whether to raise an error on np.inf, np.nan, pd.NA in X. The\n possibilities are:\n\n - True: Force all values of X to be finite.\n - False: accepts np.inf, np.nan, pd.NA in X.\n - 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot\n be infinite.\n\n .. versionadded:: 0.20\n ``force_all_finite`` accepts the string ``'allow-nan'``.\n\n .. versionchanged:: 0.23\n Accepts `pd.NA` and converts it into `np.nan`\n\n Returns\n -------\n spmatrix_converted : sparse matrix.\n Matrix that is ensured to have an allowed type.\n \"\"\"\n if dtype is None:\n dtype = spmatrix.dtype\n\n changed_format = False\n\n if isinstance(accept_sparse, str):\n accept_sparse = [accept_sparse]\n\n # Indices dtype validation\n _check_large_sparse(spmatrix, accept_large_sparse)\n\n if accept_sparse is False:\n raise TypeError(\n \"A sparse matrix was passed, but dense \"\n \"data is required. Use X.toarray() to \"\n \"convert to a dense numpy array.\"\n )\n elif isinstance(accept_sparse, (list, tuple)):\n if len(accept_sparse) == 0:\n raise ValueError(\n \"When providing 'accept_sparse' \"\n \"as a tuple or list, it must contain at \"\n \"least one string value.\"\n )\n # ensure correct sparse format\n if spmatrix.format not in accept_sparse:\n # create new with correct sparse\n spmatrix = spmatrix.asformat(accept_sparse[0])\n changed_format = True\n elif accept_sparse is not True:\n # any other type\n raise ValueError(\n \"Parameter 'accept_sparse' should be a string, \"\n \"boolean or list of strings. You provided \"\n \"'accept_sparse={}'.\".format(accept_sparse)\n )\n\n if dtype != spmatrix.dtype:\n # convert dtype\n spmatrix = spmatrix.astype(dtype)\n elif copy and not changed_format:\n # force copy\n spmatrix = spmatrix.copy()\n\n if force_all_finite:\n if not hasattr(spmatrix, \"data\"):\n warnings.warn(\n \"Can't check %s sparse matrix for nan or inf.\" % spmatrix.format,\n stacklevel=2,\n )\n else:\n _assert_all_finite(spmatrix.data, allow_nan=force_all_finite == \"allow-nan\")\n\n return spmatrix\n\n\ndef _ensure_no_complex_data(array):\n if (\n hasattr(array, \"dtype\")\n and array.dtype is not None\n and hasattr(array.dtype, \"kind\")\n and array.dtype.kind == \"c\"\n ):\n raise ValueError(\"Complex data not supported\\n{}\\n\".format(array))\n\n\ndef check_array(\n array,\n accept_sparse=False,\n *,\n accept_large_sparse=True,\n dtype=\"numeric\",\n order=None,\n copy=False,\n force_all_finite=True,\n ensure_2d=True,\n allow_nd=False,\n ensure_min_samples=1,\n ensure_min_features=1,\n estimator=None,\n):\n\n \"\"\"Input validation on an array, list, sparse matrix or similar.\n\n By default, the input is checked to be a non-empty 2D array containing\n only finite values. If the dtype of the array is object, attempt\n converting to float, raising on failure.\n\n Parameters\n ----------\n array : object\n Input object to check / convert.\n\n accept_sparse : str, bool or list/tuple of str, default=False\n String[s] representing allowed sparse matrix formats, such as 'csc',\n 'csr', etc. If the input is sparse but not in the allowed format,\n it will be converted to the first listed format. True allows the input\n to be any format. False means that a sparse matrix input will\n raise an error.\n\n accept_large_sparse : bool, default=True\n If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by\n accept_sparse, accept_large_sparse=False will cause it to be accepted\n only if its indices are stored with a 32-bit dtype.\n\n .. versionadded:: 0.20\n\n dtype : 'numeric', type, list of type or None, default='numeric'\n Data type of result. If None, the dtype of the input is preserved.\n If \"numeric\", dtype is preserved unless array.dtype is object.\n If dtype is a list of types, conversion on the first type is only\n performed if the dtype of the input is not in the list.\n\n order : {'F', 'C'} or None, default=None\n Whether an array will be forced to be fortran or c-style.\n When order is None (default), then if copy=False, nothing is ensured\n about the memory layout of the output array; otherwise (copy=True)\n the memory layout of the returned array is kept as close as possible\n to the original array.\n\n copy : bool, default=False\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n force_all_finite : bool or 'allow-nan', default=True\n Whether to raise an error on np.inf, np.nan, pd.NA in array. The\n possibilities are:\n\n - True: Force all values of array to be finite.\n - False: accepts np.inf, np.nan, pd.NA in array.\n - 'allow-nan': accepts only np.nan and pd.NA values in array. Values\n cannot be infinite.\n\n .. versionadded:: 0.20\n ``force_all_finite`` accepts the string ``'allow-nan'``.\n\n .. versionchanged:: 0.23\n Accepts `pd.NA` and converts it into `np.nan`\n\n ensure_2d : bool, default=True\n Whether to raise a value error if array is not 2D.\n\n allow_nd : bool, default=False\n Whether to allow array.ndim > 2.\n\n ensure_min_samples : int, default=1\n Make sure that the array has a minimum number of samples in its first\n axis (rows for a 2D array). Setting to 0 disables this check.\n\n ensure_min_features : int, default=1\n Make sure that the 2D array has some minimum number of features\n (columns). The default value of 1 rejects empty datasets.\n This check is only enforced when the input data has effectively 2\n dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0\n disables this check.\n\n estimator : str or estimator instance, default=None\n If passed, include the name of the estimator in warning messages.\n\n Returns\n -------\n array_converted : object\n The converted and validated array.\n \"\"\"\n if isinstance(array, np.matrix):\n warnings.warn(\n \"np.matrix usage is deprecated in 1.0 and will raise a TypeError \"\n \"in 1.2. Please convert to a numpy array with np.asarray. For \"\n \"more information see: \"\n \"https://numpy.org/doc/stable/reference/generated/numpy.matrix.html\", # noqa\n FutureWarning,\n )\n\n # store reference to original array to check if copy is needed when\n # function returns\n array_orig = array\n\n # store whether originally we wanted numeric dtype\n dtype_numeric = isinstance(dtype, str) and dtype == \"numeric\"\n\n dtype_orig = getattr(array, \"dtype\", None)\n if not hasattr(dtype_orig, \"kind\"):\n # not a data type (e.g. a column named dtype in a pandas DataFrame)\n dtype_orig = None\n\n # check if the object contains several dtypes (typically a pandas\n # DataFrame), and store them. If not, store None.\n dtypes_orig = None\n has_pd_integer_array = False\n if hasattr(array, \"dtypes\") and hasattr(array.dtypes, \"__array__\"):\n # throw warning if columns are sparse. If all columns are sparse, then\n # array.sparse exists and sparsity will be preserved (later).\n with suppress(ImportError):\n from pandas.api.types import is_sparse\n\n if not hasattr(array, \"sparse\") and array.dtypes.apply(is_sparse).any():\n warnings.warn(\n \"pandas.DataFrame with sparse columns found.\"\n \"It will be converted to a dense numpy array.\"\n )\n\n dtypes_orig = list(array.dtypes)\n # pandas boolean dtype __array__ interface coerces bools to objects\n for i, dtype_iter in enumerate(dtypes_orig):\n if dtype_iter.kind == \"b\":\n dtypes_orig[i] = np.dtype(object)\n elif dtype_iter.name.startswith((\"Int\", \"UInt\")):\n # name looks like an Integer Extension Array, now check for\n # the dtype\n with suppress(ImportError):\n from pandas import (\n Int8Dtype,\n Int16Dtype,\n Int32Dtype,\n Int64Dtype,\n UInt8Dtype,\n UInt16Dtype,\n UInt32Dtype,\n UInt64Dtype,\n )\n\n if isinstance(\n dtype_iter,\n (\n Int8Dtype,\n Int16Dtype,\n Int32Dtype,\n Int64Dtype,\n UInt8Dtype,\n UInt16Dtype,\n UInt32Dtype,\n UInt64Dtype,\n ),\n ):\n has_pd_integer_array = True\n\n if all(isinstance(dtype, np.dtype) for dtype in dtypes_orig):\n dtype_orig = np.result_type(*dtypes_orig)\n\n if dtype_numeric:\n if dtype_orig is not None and dtype_orig.kind == \"O\":\n # if input is object, convert to float.\n dtype = np.float64\n else:\n dtype = None\n\n if isinstance(dtype, (list, tuple)):\n if dtype_orig is not None and dtype_orig in dtype:\n # no dtype conversion required\n dtype = None\n else:\n # dtype conversion required. Let's select the first element of the\n # list of accepted types.\n dtype = dtype[0]\n\n if has_pd_integer_array:\n # If there are any pandas integer extension arrays,\n array = array.astype(dtype)\n\n if force_all_finite not in (True, False, \"allow-nan\"):\n raise ValueError(\n 'force_all_finite should be a bool or \"allow-nan\". Got {!r} instead'.format(\n force_all_finite\n )\n )\n\n if estimator is not None:\n if isinstance(estimator, str):\n estimator_name = estimator\n else:\n estimator_name = estimator.__class__.__name__\n else:\n estimator_name = \"Estimator\"\n context = \" by %s\" % estimator_name if estimator is not None else \"\"\n\n # When all dataframe columns are sparse, convert to a sparse array\n if hasattr(array, \"sparse\") and array.ndim > 1:\n # DataFrame.sparse only supports `to_coo`\n array = array.sparse.to_coo()\n if array.dtype == np.dtype(\"object\"):\n unique_dtypes = set([dt.subtype.name for dt in array_orig.dtypes])\n if len(unique_dtypes) > 1:\n raise ValueError(\n \"Pandas DataFrame with mixed sparse extension arrays \"\n \"generated a sparse matrix with object dtype which \"\n \"can not be converted to a scipy sparse matrix.\"\n \"Sparse extension arrays should all have the same \"\n \"numeric type.\"\n )\n\n if sp.issparse(array):\n _ensure_no_complex_data(array)\n array = _ensure_sparse_format(\n array,\n accept_sparse=accept_sparse,\n dtype=dtype,\n copy=copy,\n force_all_finite=force_all_finite,\n accept_large_sparse=accept_large_sparse,\n )\n else:\n # If np.array(..) gives ComplexWarning, then we convert the warning\n # to an error. This is needed because specifying a non complex\n # dtype to the function converts complex to real dtype,\n # thereby passing the test made in the lines following the scope\n # of warnings context manager.\n with warnings.catch_warnings():\n try:\n warnings.simplefilter(\"error\", ComplexWarning)\n if dtype is not None and np.dtype(dtype).kind in \"iu\":\n # Conversion float -> int should not contain NaN or\n # inf (numpy#14412). We cannot use casting='safe' because\n # then conversion float -> int would be disallowed.\n array = np.asarray(array, order=order)\n if array.dtype.kind == \"f\":\n _assert_all_finite(array, allow_nan=False, msg_dtype=dtype)\n array = array.astype(dtype, casting=\"unsafe\", copy=False)\n else:\n array = np.asarray(array, order=order, dtype=dtype)\n except ComplexWarning as complex_warning:\n raise ValueError(\n \"Complex data not supported\\n{}\\n\".format(array)\n ) from complex_warning\n\n # It is possible that the np.array(..) gave no warning. This happens\n # when no dtype conversion happened, for example dtype = None. The\n # result is that np.array(..) produces an array of complex dtype\n # and we need to catch and raise exception for such cases.\n _ensure_no_complex_data(array)\n\n if ensure_2d:\n # If input is scalar raise error\n if array.ndim == 0:\n raise ValueError(\n \"Expected 2D array, got scalar array instead:\\narray={}.\\n\"\n \"Reshape your data either using array.reshape(-1, 1) if \"\n \"your data has a single feature or array.reshape(1, -1) \"\n \"if it contains a single sample.\".format(array)\n )\n # If input is 1D raise error\n if array.ndim == 1:\n raise ValueError(\n \"Expected 2D array, got 1D array instead:\\narray={}.\\n\"\n \"Reshape your data either using array.reshape(-1, 1) if \"\n \"your data has a single feature or array.reshape(1, -1) \"\n \"if it contains a single sample.\".format(array)\n )\n\n # make sure we actually converted to numeric:\n if dtype_numeric and array.dtype.kind in \"OUSV\":\n warnings.warn(\n \"Arrays of bytes/strings is being converted to decimal \"\n \"numbers if dtype='numeric'. This behavior is deprecated in \"\n \"0.24 and will be removed in 1.1 (renaming of 0.26). Please \"\n \"convert your data to numeric values explicitly instead.\",\n FutureWarning,\n stacklevel=2,\n )\n try:\n array = array.astype(np.float64)\n except ValueError as e:\n raise ValueError(\n \"Unable to convert array of bytes/strings \"\n \"into decimal numbers with dtype='numeric'\"\n ) from e\n if not allow_nd and array.ndim >= 3:\n raise ValueError(\n \"Found array with dim %d. %s expected <= 2.\"\n % (array.ndim, estimator_name)\n )\n\n if force_all_finite:\n _assert_all_finite(array, allow_nan=force_all_finite == \"allow-nan\")\n\n if ensure_min_samples > 0:\n n_samples = _num_samples(array)\n if n_samples < ensure_min_samples:\n raise ValueError(\n \"Found array with %d sample(s) (shape=%s) while a\"\n \" minimum of %d is required%s.\"\n % (n_samples, array.shape, ensure_min_samples, context)\n )\n\n if ensure_min_features > 0 and array.ndim == 2:\n n_features = array.shape[1]\n if n_features < ensure_min_features:\n raise ValueError(\n \"Found array with %d feature(s) (shape=%s) while\"\n \" a minimum of %d is required%s.\"\n % (n_features, array.shape, ensure_min_features, context)\n )\n\n if copy and np.may_share_memory(array, array_orig):\n array = np.array(array, dtype=dtype, order=order)\n\n return array\n\n\ndef _check_large_sparse(X, accept_large_sparse=False):\n \"\"\"Raise a ValueError if X has 64bit indices and accept_large_sparse=False\"\"\"\n if not accept_large_sparse:\n supported_indices = [\"int32\"]\n if X.getformat() == \"coo\":\n index_keys = [\"col\", \"row\"]\n elif X.getformat() in [\"csr\", \"csc\", \"bsr\"]:\n index_keys = [\"indices\", \"indptr\"]\n else:\n return\n for key in index_keys:\n indices_datatype = getattr(X, key).dtype\n if indices_datatype not in supported_indices:\n raise ValueError(\n \"Only sparse matrices with 32-bit integer\"\n \" indices are accepted. Got %s indices.\" % indices_datatype\n )\n\n\ndef check_X_y(\n X,\n y,\n accept_sparse=False,\n *,\n accept_large_sparse=True,\n dtype=\"numeric\",\n order=None,\n copy=False,\n force_all_finite=True,\n ensure_2d=True,\n allow_nd=False,\n multi_output=False,\n ensure_min_samples=1,\n ensure_min_features=1,\n y_numeric=False,\n estimator=None,\n):\n \"\"\"Input validation for standard estimators.\n\n Checks X and y for consistent length, enforces X to be 2D and y 1D. By\n default, X is checked to be non-empty and containing only finite values.\n Standard input checks are also applied to y, such as checking that y\n does not have np.nan or np.inf targets. For multi-label y, set\n multi_output=True to allow 2D and sparse y. If the dtype of X is\n object, attempt converting to float, raising on failure.\n\n Parameters\n ----------\n X : {ndarray, list, sparse matrix}\n Input data.\n\n y : {ndarray, list, sparse matrix}\n Labels.\n\n accept_sparse : str, bool or list of str, default=False\n String[s] representing allowed sparse matrix formats, such as 'csc',\n 'csr', etc. If the input is sparse but not in the allowed format,\n it will be converted to the first listed format. True allows the input\n to be any format. False means that a sparse matrix input will\n raise an error.\n\n accept_large_sparse : bool, default=True\n If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by\n accept_sparse, accept_large_sparse will cause it to be accepted only\n if its indices are stored with a 32-bit dtype.\n\n .. versionadded:: 0.20\n\n dtype : 'numeric', type, list of type or None, default='numeric'\n Data type of result. If None, the dtype of the input is preserved.\n If \"numeric\", dtype is preserved unless array.dtype is object.\n If dtype is a list of types, conversion on the first type is only\n performed if the dtype of the input is not in the list.\n\n order : {'F', 'C'}, default=None\n Whether an array will be forced to be fortran or c-style.\n\n copy : bool, default=False\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n force_all_finite : bool or 'allow-nan', default=True\n Whether to raise an error on np.inf, np.nan, pd.NA in X. This parameter\n does not influence whether y can have np.inf, np.nan, pd.NA values.\n The possibilities are:\n\n - True: Force all values of X to be finite.\n - False: accepts np.inf, np.nan, pd.NA in X.\n - 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot\n be infinite.\n\n .. versionadded:: 0.20\n ``force_all_finite`` accepts the string ``'allow-nan'``.\n\n .. versionchanged:: 0.23\n Accepts `pd.NA` and converts it into `np.nan`\n\n ensure_2d : bool, default=True\n Whether to raise a value error if X is not 2D.\n\n allow_nd : bool, default=False\n Whether to allow X.ndim > 2.\n\n multi_output : bool, default=False\n Whether to allow 2D y (array or sparse matrix). If false, y will be\n validated as a vector. y cannot have np.nan or np.inf values if\n multi_output=True.\n\n ensure_min_samples : int, default=1\n Make sure that X has a minimum number of samples in its first\n axis (rows for a 2D array).\n\n ensure_min_features : int, default=1\n Make sure that the 2D array has some minimum number of features\n (columns). The default value of 1 rejects empty datasets.\n This check is only enforced when X has effectively 2 dimensions or\n is originally 1D and ``ensure_2d`` is True. Setting to 0 disables\n this check.\n\n y_numeric : bool, default=False\n Whether to ensure that y has a numeric type. If dtype of y is object,\n it is converted to float64. Should only be used for regression\n algorithms.\n\n estimator : str or estimator instance, default=None\n If passed, include the name of the estimator in warning messages.\n\n Returns\n -------\n X_converted : object\n The converted and validated X.\n\n y_converted : object\n The converted and validated y.\n \"\"\"\n if y is None:\n raise ValueError(\"y cannot be None\")\n\n X = check_array(\n X,\n accept_sparse=accept_sparse,\n accept_large_sparse=accept_large_sparse,\n dtype=dtype,\n order=order,\n copy=copy,\n force_all_finite=force_all_finite,\n ensure_2d=ensure_2d,\n allow_nd=allow_nd,\n ensure_min_samples=ensure_min_samples,\n ensure_min_features=ensure_min_features,\n estimator=estimator,\n )\n\n y = _check_y(y, multi_output=multi_output, y_numeric=y_numeric)\n\n check_consistent_length(X, y)\n\n return X, y\n\n\ndef _check_y(y, multi_output=False, y_numeric=False):\n \"\"\"Isolated part of check_X_y dedicated to y validation\"\"\"\n if multi_output:\n y = check_array(\n y, accept_sparse=\"csr\", force_all_finite=True, ensure_2d=False, dtype=None\n )\n else:\n y = column_or_1d(y, warn=True)\n _assert_all_finite(y)\n _ensure_no_complex_data(y)\n if y_numeric and y.dtype.kind == \"O\":\n y = y.astype(np.float64)\n\n return y\n\n\ndef column_or_1d(y, *, warn=False):\n \"\"\"Ravel column or 1d numpy array, else raises an error.\n\n Parameters\n ----------\n y : array-like\n\n warn : bool, default=False\n To control display of warnings.\n\n Returns\n -------\n y : ndarray\n\n \"\"\"\n y = np.asarray(y)\n shape = np.shape(y)\n if len(shape) == 1:\n return np.ravel(y)\n if len(shape) == 2 and shape[1] == 1:\n if warn:\n warnings.warn(\n \"A column-vector y was passed when a 1d array was\"\n \" expected. Please change the shape of y to \"\n \"(n_samples, ), for example using ravel().\",\n DataConversionWarning,\n stacklevel=2,\n )\n return np.ravel(y)\n\n raise ValueError(\n \"y should be a 1d array, got an array of shape {} instead.\".format(shape)\n )\n\n\ndef check_random_state(seed):\n \"\"\"Turn seed into a np.random.RandomState instance\n\n Parameters\n ----------\n seed : None, int or instance of RandomState\n If seed is None, return the RandomState singleton used by np.random.\n If seed is an int, return a new RandomState instance seeded with seed.\n If seed is already a RandomState instance, return it.\n Otherwise raise ValueError.\n \"\"\"\n if seed is None or seed is np.random:\n return np.random.mtrand._rand\n if isinstance(seed, numbers.Integral):\n return np.random.RandomState(seed)\n if isinstance(seed, np.random.RandomState):\n return seed\n raise ValueError(\n \"%r cannot be used to seed a numpy.random.RandomState instance\" % seed\n )\n\n\ndef has_fit_parameter(estimator, parameter):\n \"\"\"Checks whether the estimator's fit method supports the given parameter.\n\n Parameters\n ----------\n estimator : object\n An estimator to inspect.\n\n parameter : str\n The searched parameter.\n\n Returns\n -------\n is_parameter: bool\n Whether the parameter was found to be a named parameter of the\n estimator's fit method.\n\n Examples\n --------\n >>> from sklearn.svm import SVC\n >>> from sklearn.utils.validation import has_fit_parameter\n >>> has_fit_parameter(SVC(), \"sample_weight\")\n True\n\n \"\"\"\n return parameter in signature(estimator.fit).parameters\n\n\ndef check_symmetric(array, *, tol=1e-10, raise_warning=True, raise_exception=False):\n \"\"\"Make sure that array is 2D, square and symmetric.\n\n If the array is not symmetric, then a symmetrized version is returned.\n Optionally, a warning or exception is raised if the matrix is not\n symmetric.\n\n Parameters\n ----------\n array : {ndarray, sparse matrix}\n Input object to check / convert. Must be two-dimensional and square,\n otherwise a ValueError will be raised.\n\n tol : float, default=1e-10\n Absolute tolerance for equivalence of arrays. Default = 1E-10.\n\n raise_warning : bool, default=True\n If True then raise a warning if conversion is required.\n\n raise_exception : bool, default=False\n If True then raise an exception if array is not symmetric.\n\n Returns\n -------\n array_sym : {ndarray, sparse matrix}\n Symmetrized version of the input array, i.e. the average of array\n and array.transpose(). If sparse, then duplicate entries are first\n summed and zeros are eliminated.\n \"\"\"\n if (array.ndim != 2) or (array.shape[0] != array.shape[1]):\n raise ValueError(\n \"array must be 2-dimensional and square. shape = {0}\".format(array.shape)\n )\n\n if sp.issparse(array):\n diff = array - array.T\n # only csr, csc, and coo have `data` attribute\n if diff.format not in [\"csr\", \"csc\", \"coo\"]:\n diff = diff.tocsr()\n symmetric = np.all(abs(diff.data) < tol)\n else:\n symmetric = np.allclose(array, array.T, atol=tol)\n\n if not symmetric:\n if raise_exception:\n raise ValueError(\"Array must be symmetric\")\n if raise_warning:\n warnings.warn(\n \"Array is not symmetric, and will be converted \"\n \"to symmetric by average with its transpose.\",\n stacklevel=2,\n )\n if sp.issparse(array):\n conversion = \"to\" + array.format\n array = getattr(0.5 * (array + array.T), conversion)()\n else:\n array = 0.5 * (array + array.T)\n\n return array\n\n\ndef check_is_fitted(estimator, attributes=None, *, msg=None, all_or_any=all):\n \"\"\"Perform is_fitted validation for estimator.\n\n Checks if the estimator is fitted by verifying the presence of\n fitted attributes (ending with a trailing underscore) and otherwise\n raises a NotFittedError with the given message.\n\n If an estimator does not set any attributes with a trailing underscore, it\n can define a ``__sklearn_is_fitted__`` method returning a boolean to specify if the\n estimator is fitted or not.\n\n Parameters\n ----------\n estimator : estimator instance\n estimator instance for which the check is performed.\n\n attributes : str, list or tuple of str, default=None\n Attribute name(s) given as string or a list/tuple of strings\n Eg.: ``[\"coef_\", \"estimator_\", ...], \"coef_\"``\n\n If `None`, `estimator` is considered fitted if there exist an\n attribute that ends with a underscore and does not start with double\n underscore.\n\n msg : str, default=None\n The default error message is, \"This %(name)s instance is not fitted\n yet. Call 'fit' with appropriate arguments before using this\n estimator.\"\n\n For custom messages if \"%(name)s\" is present in the message string,\n it is substituted for the estimator name.\n\n Eg. : \"Estimator, %(name)s, must be fitted before sparsifying\".\n\n all_or_any : callable, {all, any}, default=all\n Specify whether all or any of the given attributes must exist.\n\n Returns\n -------\n None\n\n Raises\n ------\n NotFittedError\n If the attributes are not found.\n \"\"\"\n if isclass(estimator):\n raise TypeError(\"{} is a class, not an instance.\".format(estimator))\n if msg is None:\n msg = (\n \"This %(name)s instance is not fitted yet. Call 'fit' with \"\n \"appropriate arguments before using this estimator.\"\n )\n\n if not hasattr(estimator, \"fit\"):\n raise TypeError(\"%s is not an estimator instance.\" % (estimator))\n\n if attributes is not None:\n if not isinstance(attributes, (list, tuple)):\n attributes = [attributes]\n fitted = all_or_any([hasattr(estimator, attr) for attr in attributes])\n elif hasattr(estimator, \"__sklearn_is_fitted__\"):\n fitted = estimator.__sklearn_is_fitted__()\n else:\n fitted = [\n v for v in vars(estimator) if v.endswith(\"_\") and not v.startswith(\"__\")\n ]\n\n if not fitted:\n raise NotFittedError(msg % {\"name\": type(estimator).__name__})\n\n\ndef check_non_negative(X, whom):\n \"\"\"\n Check if there is any negative value in an array.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}\n Input data.\n\n whom : str\n Who passed X to this function.\n \"\"\"\n # avoid X.min() on sparse matrix since it also sorts the indices\n if sp.issparse(X):\n if X.format in [\"lil\", \"dok\"]:\n X = X.tocsr()\n if X.data.size == 0:\n X_min = 0\n else:\n X_min = X.data.min()\n else:\n X_min = X.min()\n\n if X_min < 0:\n raise ValueError(\"Negative values in data passed to %s\" % whom)\n\n\ndef check_scalar(\n x,\n name,\n target_type,\n *,\n min_val=None,\n max_val=None,\n include_boundaries=\"both\",\n):\n \"\"\"Validate scalar parameters type and value.\n\n Parameters\n ----------\n x : object\n The scalar parameter to validate.\n\n name : str\n The name of the parameter to be printed in error messages.\n\n target_type : type or tuple\n Acceptable data types for the parameter.\n\n min_val : float or int, default=None\n The minimum valid value the parameter can take. If None (default) it\n is implied that the parameter does not have a lower bound.\n\n max_val : float or int, default=False\n The maximum valid value the parameter can take. If None (default) it\n is implied that the parameter does not have an upper bound.\n\n include_boundaries : {\"left\", \"right\", \"both\", \"neither\"}, default=\"both\"\n Whether the interval defined by `min_val` and `max_val` should include\n the boundaries. Possible choices are:\n\n - `\"left\"`: only `min_val` is included in the valid interval;\n - `\"right\"`: only `max_val` is included in the valid interval;\n - `\"both\"`: `min_val` and `max_val` are included in the valid interval;\n - `\"neither\"`: neither `min_val` nor `max_val` are included in the\n valid interval.\n\n Returns\n -------\n x : numbers.Number\n The validated number.\n\n Raises\n ------\n TypeError\n If the parameter's type does not match the desired type.\n\n ValueError\n If the parameter's value violates the given bounds.\n \"\"\"\n\n if not isinstance(x, target_type):\n raise TypeError(f\"{name} must be an instance of {target_type}, not {type(x)}.\")\n\n expected_include_boundaries = (\"left\", \"right\", \"both\", \"neither\")\n if include_boundaries not in expected_include_boundaries:\n raise ValueError(\n f\"Unknown value for `include_boundaries`: {repr(include_boundaries)}. \"\n f\"Possible values are: {expected_include_boundaries}.\"\n )\n\n comparison_operator = (\n operator.lt if include_boundaries in (\"left\", \"both\") else operator.le\n )\n if min_val is not None and comparison_operator(x, min_val):\n raise ValueError(\n f\"{name} == {x}, must be\"\n f\" {'>=' if include_boundaries in ('left', 'both') else '>'} {min_val}.\"\n )\n\n comparison_operator = (\n operator.gt if include_boundaries in (\"right\", \"both\") else operator.ge\n )\n if max_val is not None and comparison_operator(x, max_val):\n raise ValueError(\n f\"{name} == {x}, must be\"\n f\" {'<=' if include_boundaries in ('right', 'both') else '<'} {max_val}.\"\n )\n\n return x\n\n\ndef _check_psd_eigenvalues(lambdas, enable_warnings=False):\n \"\"\"Check the eigenvalues of a positive semidefinite (PSD) matrix.\n\n Checks the provided array of PSD matrix eigenvalues for numerical or\n conditioning issues and returns a fixed validated version. This method\n should typically be used if the PSD matrix is user-provided (e.g. a\n Gram matrix) or computed using a user-provided dissimilarity metric\n (e.g. kernel function), or if the decomposition process uses approximation\n methods (randomized SVD, etc.).\n\n It checks for three things:\n\n - that there are no significant imaginary parts in eigenvalues (more than\n 1e-5 times the maximum real part). If this check fails, it raises a\n ``ValueError``. Otherwise all non-significant imaginary parts that may\n remain are set to zero. This operation is traced with a\n ``PositiveSpectrumWarning`` when ``enable_warnings=True``.\n\n - that eigenvalues are not all negative. If this check fails, it raises a\n ``ValueError``\n\n - that there are no significant negative eigenvalues with absolute value\n more than 1e-10 (1e-6) and more than 1e-5 (5e-3) times the largest\n positive eigenvalue in double (simple) precision. If this check fails,\n it raises a ``ValueError``. Otherwise all negative eigenvalues that may\n remain are set to zero. This operation is traced with a\n ``PositiveSpectrumWarning`` when ``enable_warnings=True``.\n\n Finally, all the positive eigenvalues that are too small (with a value\n smaller than the maximum eigenvalue multiplied by 1e-12 (2e-7)) are set to\n zero. This operation is traced with a ``PositiveSpectrumWarning`` when\n ``enable_warnings=True``.\n\n Parameters\n ----------\n lambdas : array-like of shape (n_eigenvalues,)\n Array of eigenvalues to check / fix.\n\n enable_warnings : bool, default=False\n When this is set to ``True``, a ``PositiveSpectrumWarning`` will be\n raised when there are imaginary parts, negative eigenvalues, or\n extremely small non-zero eigenvalues. Otherwise no warning will be\n raised. In both cases, imaginary parts, negative eigenvalues, and\n extremely small non-zero eigenvalues will be set to zero.\n\n Returns\n -------\n lambdas_fixed : ndarray of shape (n_eigenvalues,)\n A fixed validated copy of the array of eigenvalues.\n\n Examples\n --------\n >>> from sklearn.utils.validation import _check_psd_eigenvalues\n >>> _check_psd_eigenvalues([1, 2]) # nominal case\n array([1, 2])\n >>> _check_psd_eigenvalues([5, 5j]) # significant imag part\n Traceback (most recent call last):\n ...\n ValueError: There are significant imaginary parts in eigenvalues (1\n of the maximum real part). Either the matrix is not PSD, or there was\n an issue while computing the eigendecomposition of the matrix.\n >>> _check_psd_eigenvalues([5, 5e-5j]) # insignificant imag part\n array([5., 0.])\n >>> _check_psd_eigenvalues([-5, -1]) # all negative\n Traceback (most recent call last):\n ...\n ValueError: All eigenvalues are negative (maximum is -1). Either the\n matrix is not PSD, or there was an issue while computing the\n eigendecomposition of the matrix.\n >>> _check_psd_eigenvalues([5, -1]) # significant negative\n Traceback (most recent call last):\n ...\n ValueError: There are significant negative eigenvalues (0.2 of the\n maximum positive). Either the matrix is not PSD, or there was an issue\n while computing the eigendecomposition of the matrix.\n >>> _check_psd_eigenvalues([5, -5e-5]) # insignificant negative\n array([5., 0.])\n >>> _check_psd_eigenvalues([5, 4e-12]) # bad conditioning (too small)\n array([5., 0.])\n\n \"\"\"\n\n lambdas = np.array(lambdas)\n is_double_precision = lambdas.dtype == np.float64\n\n # note: the minimum value available is\n # - single-precision: np.finfo('float32').eps = 1.2e-07\n # - double-precision: np.finfo('float64').eps = 2.2e-16\n\n # the various thresholds used for validation\n # we may wish to change the value according to precision.\n significant_imag_ratio = 1e-5\n significant_neg_ratio = 1e-5 if is_double_precision else 5e-3\n significant_neg_value = 1e-10 if is_double_precision else 1e-6\n small_pos_ratio = 1e-12 if is_double_precision else 2e-7\n\n # Check that there are no significant imaginary parts\n if not np.isreal(lambdas).all():\n max_imag_abs = np.abs(np.imag(lambdas)).max()\n max_real_abs = np.abs(np.real(lambdas)).max()\n if max_imag_abs > significant_imag_ratio * max_real_abs:\n raise ValueError(\n \"There are significant imaginary parts in eigenvalues (%g \"\n \"of the maximum real part). Either the matrix is not PSD, or \"\n \"there was an issue while computing the eigendecomposition \"\n \"of the matrix.\" % (max_imag_abs / max_real_abs)\n )\n\n # warn about imaginary parts being removed\n if enable_warnings:\n warnings.warn(\n \"There are imaginary parts in eigenvalues (%g \"\n \"of the maximum real part). Either the matrix is not\"\n \" PSD, or there was an issue while computing the \"\n \"eigendecomposition of the matrix. Only the real \"\n \"parts will be kept.\" % (max_imag_abs / max_real_abs),\n PositiveSpectrumWarning,\n )\n\n # Remove all imaginary parts (even if zero)\n lambdas = np.real(lambdas)\n\n # Check that there are no significant negative eigenvalues\n max_eig = lambdas.max()\n if max_eig < 0:\n raise ValueError(\n \"All eigenvalues are negative (maximum is %g). \"\n \"Either the matrix is not PSD, or there was an \"\n \"issue while computing the eigendecomposition of \"\n \"the matrix.\" % max_eig\n )\n\n else:\n min_eig = lambdas.min()\n if (\n min_eig < -significant_neg_ratio * max_eig\n and min_eig < -significant_neg_value\n ):\n raise ValueError(\n \"There are significant negative eigenvalues (%g\"\n \" of the maximum positive). Either the matrix is \"\n \"not PSD, or there was an issue while computing \"\n \"the eigendecomposition of the matrix.\" % (-min_eig / max_eig)\n )\n elif min_eig < 0:\n # Remove all negative values and warn about it\n if enable_warnings:\n warnings.warn(\n \"There are negative eigenvalues (%g of the \"\n \"maximum positive). Either the matrix is not \"\n \"PSD, or there was an issue while computing the\"\n \" eigendecomposition of the matrix. Negative \"\n \"eigenvalues will be replaced with 0.\" % (-min_eig / max_eig),\n PositiveSpectrumWarning,\n )\n lambdas[lambdas < 0] = 0\n\n # Check for conditioning (small positive non-zeros)\n too_small_lambdas = (0 < lambdas) & (lambdas < small_pos_ratio * max_eig)\n if too_small_lambdas.any():\n if enable_warnings:\n warnings.warn(\n \"Badly conditioned PSD matrix spectrum: the largest \"\n \"eigenvalue is more than %g times the smallest. \"\n \"Small eigenvalues will be replaced with 0.\"\n \"\" % (1 / small_pos_ratio),\n PositiveSpectrumWarning,\n )\n lambdas[too_small_lambdas] = 0\n\n return lambdas\n\n\ndef _check_sample_weight(\n sample_weight, X, dtype=None, copy=False, only_non_negative=False\n):\n \"\"\"Validate sample weights.\n\n Note that passing sample_weight=None will output an array of ones.\n Therefore, in some cases, you may want to protect the call with:\n if sample_weight is not None:\n sample_weight = _check_sample_weight(...)\n\n Parameters\n ----------\n sample_weight : {ndarray, Number or None}, shape (n_samples,)\n Input sample weights.\n\n X : {ndarray, list, sparse matrix}\n Input data.\n\n only_non_negative : bool, default=False,\n Whether or not the weights are expected to be non-negative.\n\n .. versionadded:: 1.0\n\n dtype : dtype, default=None\n dtype of the validated `sample_weight`.\n If None, and the input `sample_weight` is an array, the dtype of the\n input is preserved; otherwise an array with the default numpy dtype\n is be allocated. If `dtype` is not one of `float32`, `float64`,\n `None`, the output will be of dtype `float64`.\n\n copy : bool, default=False\n If True, a copy of sample_weight will be created.\n\n Returns\n -------\n sample_weight : ndarray of shape (n_samples,)\n Validated sample weight. It is guaranteed to be \"C\" contiguous.\n \"\"\"\n n_samples = _num_samples(X)\n\n if dtype is not None and dtype not in [np.float32, np.float64]:\n dtype = np.float64\n\n if sample_weight is None:\n sample_weight = np.ones(n_samples, dtype=dtype)\n elif isinstance(sample_weight, numbers.Number):\n sample_weight = np.full(n_samples, sample_weight, dtype=dtype)\n else:\n if dtype is None:\n dtype = [np.float64, np.float32]\n sample_weight = check_array(\n sample_weight,\n accept_sparse=False,\n ensure_2d=False,\n dtype=dtype,\n order=\"C\",\n copy=copy,\n )\n if sample_weight.ndim != 1:\n raise ValueError(\"Sample weights must be 1D array or scalar\")\n\n if sample_weight.shape != (n_samples,):\n raise ValueError(\n \"sample_weight.shape == {}, expected {}!\".format(\n sample_weight.shape, (n_samples,)\n )\n )\n\n if only_non_negative:\n check_non_negative(sample_weight, \"`sample_weight`\")\n\n return sample_weight\n\n\ndef _allclose_dense_sparse(x, y, rtol=1e-7, atol=1e-9):\n \"\"\"Check allclose for sparse and dense data.\n\n Both x and y need to be either sparse or dense, they\n can't be mixed.\n\n Parameters\n ----------\n x : {array-like, sparse matrix}\n First array to compare.\n\n y : {array-like, sparse matrix}\n Second array to compare.\n\n rtol : float, default=1e-7\n Relative tolerance; see numpy.allclose.\n\n atol : float, default=1e-9\n absolute tolerance; see numpy.allclose. Note that the default here is\n more tolerant than the default for numpy.testing.assert_allclose, where\n atol=0.\n \"\"\"\n if sp.issparse(x) and sp.issparse(y):\n x = x.tocsr()\n y = y.tocsr()\n x.sum_duplicates()\n y.sum_duplicates()\n return (\n np.array_equal(x.indices, y.indices)\n and np.array_equal(x.indptr, y.indptr)\n and np.allclose(x.data, y.data, rtol=rtol, atol=atol)\n )\n elif not sp.issparse(x) and not sp.issparse(y):\n return np.allclose(x, y, rtol=rtol, atol=atol)\n raise ValueError(\n \"Can only compare two sparse matrices, not a sparse matrix and an array\"\n )\n\n\ndef _check_fit_params(X, fit_params, indices=None):\n \"\"\"Check and validate the parameters passed during `fit`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Data array.\n\n fit_params : dict\n Dictionary containing the parameters passed at fit.\n\n indices : array-like of shape (n_samples,), default=None\n Indices to be selected if the parameter has the same size as `X`.\n\n Returns\n -------\n fit_params_validated : dict\n Validated parameters. We ensure that the values support indexing.\n \"\"\"\n from . import _safe_indexing\n\n fit_params_validated = {}\n for param_key, param_value in fit_params.items():\n if not _is_arraylike(param_value) or _num_samples(param_value) != _num_samples(\n X\n ):\n # Non-indexable pass-through (for now for backward-compatibility).\n # https://github.com/scikit-learn/scikit-learn/issues/15805\n fit_params_validated[param_key] = param_value\n else:\n # Any other fit_params should support indexing\n # (e.g. for cross-validation).\n fit_params_validated[param_key] = _make_indexable(param_value)\n fit_params_validated[param_key] = _safe_indexing(\n fit_params_validated[param_key], indices\n )\n\n return fit_params_validated\n\n\ndef _get_feature_names(X):\n \"\"\"Get feature names from X.\n\n Support for other array containers should place its implementation here.\n\n Parameters\n ----------\n X : {ndarray, dataframe} of shape (n_samples, n_features)\n Array container to extract feature names.\n\n - pandas dataframe : The columns will be considered to be feature\n names. If the dataframe contains non-string feature names, `None` is\n returned.\n - All other array containers will return `None`.\n\n Returns\n -------\n names: ndarray or None\n Feature names of `X`. Unrecognized array containers will return `None`.\n \"\"\"\n feature_names = None\n\n # extract feature names for support array containers\n if hasattr(X, \"columns\"):\n feature_names = np.asarray(X.columns, dtype=object)\n\n if feature_names is None or len(feature_names) == 0:\n return\n\n types = sorted(t.__qualname__ for t in set(type(v) for v in feature_names))\n\n # Warn when types are mixed.\n # ints and strings do not warn\n if len(types) > 1 or not (types[0].startswith(\"int\") or types[0] == \"str\"):\n # TODO: Convert to an error in 1.2\n warnings.warn(\n \"Feature names only support names that are all strings. \"\n f\"Got feature names with dtypes: {types}. An error will be raised \"\n \"in 1.2.\",\n FutureWarning,\n )\n return\n\n # Only feature names of all strings are supported\n if types[0] == \"str\":\n return feature_names\n\n\ndef _check_feature_names_in(estimator, input_features=None):\n \"\"\"Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features.\n\n - If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then names are generated: `[x0, x1, ..., x(n_features_in_)]`.\n - If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n Returns\n -------\n feature_names_in : ndarray of str\n Feature names in.\n \"\"\"\n\n feature_names_in_ = getattr(estimator, \"feature_names_in_\", None)\n n_features_in_ = getattr(estimator, \"n_features_in_\", None)\n\n if input_features is not None:\n input_features = np.asarray(input_features, dtype=object)\n if feature_names_in_ is not None and not np.array_equal(\n feature_names_in_, input_features\n ):\n raise ValueError(\"input_features is not equal to feature_names_in_\")\n\n if n_features_in_ is not None and len(input_features) != n_features_in_:\n raise ValueError(\n \"input_features should have length equal to number of \"\n f\"features ({n_features_in_}), got {len(input_features)}\"\n )\n return input_features\n\n if feature_names_in_ is not None:\n return feature_names_in_\n\n # Generates feature names if `n_features_in_` is defined\n if n_features_in_ is None:\n raise ValueError(\"Unable to generate feature names without n_features_in_\")\n\n return np.asarray([f\"x{i}\" for i in range(n_features_in_)], dtype=object)\n" ]
[ [ "numpy.array_equal", "numpy.imag", "numpy.dtype", "numpy.full", "numpy.isreal", "numpy.isfinite", "scipy.sparse.issparse", "numpy.array", "numpy.real", "numpy.shape", "numpy.allclose", "numpy.isinf", "numpy.result_type", "numpy.asarray", "numpy.random.RandomState", "numpy.ones", "numpy.may_share_memory", "numpy.ravel", "numpy.asanyarray", "numpy.unique" ] ]