repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
ModelEngineering/topics-course
|
[
"cd0d73e4056663d170465669ecd699e8e74e35a0"
] |
[
"archived_lectures/Fall_2019/common_python/common_python/statistics/density.py"
] |
[
"\"\"\"Creation, Analysis, and Manipulation of Discrete Distributions.\"\"\"\n\n\"\"\"\nTODO\n1. Bar plot of density\n\"\"\"\n\n\nimport common_python.constants as cn\nfrom common_python.plots import util_plots\n\nimport collections\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nINDEX_MULT = 100\n\n\nclass Density(object):\n \"\"\"\n self.ser_density is a Series whose index is the variate\n and the value is its density.\n \"\"\"\n\n def __init__(self, ser, variates=None):\n \"\"\"\n :param pd.Series: variate values for which a density is created\n :param list-object: expected values in the density\n \"\"\"\n if variates is None:\n variates = ser.unique().tolist()\n self.variates = variates\n self.ser_density = self.__class__._makeDensity(ser, self.variates)\n\n @staticmethod\n def _makeDensity(ser, variates):\n \"\"\"\n :param pd.Series ser:\n :param list-object variates: required to be present in result\n :return pd.Series:\n \"\"\"\n counter = dict(collections.Counter(ser))\n length = len(ser)\n counter = {k: v/length for k, v in counter.items()}\n for variate in variates:\n if not variate in counter.keys():\n counter[variate] = 0\n values = list(counter.values())\n keys = list(counter.keys())\n return pd.Series(values, index=keys)\n\n def get(self):\n return self.ser_density\n\n # TODO: Write tests\n def isLessEqual(self, other):\n \"\"\"\n Determines if lower values have higher probabilities.\n :param Density other:\n :return bool:\n \"\"\"\n is_less = True\n for key in self.ser_density.keys():\n if is_less:\n if self.ser_density.loc[key][0] > \\\n other.ser_density.loc[key][0]:\n is_less = False\n else:\n if self.ser_density.loc[key][0] < \\\n other.ser_density.loc[key][0]:\n return False\n return True\n\n def plot(self, is_plot=True, **kwds):\n \"\"\"\n Creates a bar plot of the density.\n \"\"\"\n self.ser_density.plot.bar(**kwds)\n if is_plot:\n plt.show()\n"
] |
[
[
"matplotlib.pyplot.show",
"pandas.Series"
]
] |
EmreHakguder/SL_VSMs
|
[
"5f44ea4a760407465d0966e73f54961dfff3f715",
"5f44ea4a760407465d0966e73f54961dfff3f715"
] |
[
"preprocess/funcs.py",
"semantics/funcs.py"
] |
[
"import csv\nimport pandas as pd\nimport os\n\ndef clear_phonology_df(somePhonDF_Path):\n intersect = intersect_phon_sem(somePhonDF_Path)\n \n #Identifying the language\n language = somePhonDF_Path.split(\"/\")[-1].split(\"_\")[0]\n phon_df = pd.read_csv(somePhonDF_Path).set_index(\"video\")\n \n phon_df = phon_df[phon_df.index.isin(intersect)]\n phon_df = phon_df.sort_index()\n phon_df = phon_df.reset_index()\n \n masterPath = \"data/output/phonologyData/\"\n if not os.path.exists(masterPath):\n os.makedirs(masterPath)\n \n phon_df.to_csv(masterPath+language+\"_Phonology_clean.csv.gz\", index=False)\n del phon_df\n \ndef clear_semantics_df(somePhonDF_Path, some_glove_path):\n #Identifying the language\n language = somePhonDF_Path.split(\"/\")[-1].split(\"_\")[0]\n \n intersect = intersect_phon_sem(somePhonDF_Path)\n dim = some_glove_path.split(\".\")[-2]\n\n sem_df = pd.read_table(some_glove_path, sep=\" \", header=None, quoting=csv.QUOTE_NONE)\n sem_df.columns = [\"video\"]+[i for i in range(len(sem_df.columns)-1)]\n sem_df[\"video\"] = sem_df[\"video\"].str.lower()\n\n sem_df = sem_df.set_index(\"video\")\n sem_df = sem_df[sem_df.index.isin(intersect)]\n \n #Splitting df into unique rows and non-unique rows // keeping only the first occurrence of duplicate rows\n index = sem_df.index\n is_duplicate = index.duplicated(keep=\"first\")\n not_duplicate = ~is_duplicate\n \n #Keeping only the first of duplicate signs\n sem_df = sem_df[not_duplicate].copy()\n sem_df = sem_df.sort_index()\n sem_df = sem_df.reset_index()\n \n masterPath = \"data/output/semanticsData/\"\n if not os.path.exists(masterPath):\n os.makedirs(masterPath)\n \n sem_df.to_csv(masterPath+language+\"_Semantics_\"+str(dim)+\"_clean.csv.gz\", index=False)\n del sem_df\n \ndef intersect_phon_sem(somePhonDF_Path, glovePath = \"../../../Downloads/glove/glove.6B.50d.txt\"):\n #Identifying the language\n language = somePhonDF_Path.split(\"/\")[-1].split(\"_\")[0]\n \n \"\"\"PHONOLOGY DATA\"\"\"\n phon_df = pd.read_csv(somePhonDF_Path).set_index(\"video\")\n phon_signs = list(phon_df.index)\n \n \"\"\"SEMANTICS DATA\"\"\"\n sem_df = pd.read_table(glovePath, sep=\" \", header=None, quoting=csv.QUOTE_NONE)\n sem_df[0] = sem_df[0].str.lower()\n sem_df = sem_df.set_index(0)\n sem_signs = list(sem_df.index)\n \n \"\"\"OVERLAPPING PHONOLOGY AND SEMANICS DATA\"\"\"\n intersect = list_intersect(phon_signs, sem_signs)\n \n return intersect\n\ndef list_intersect(list1, list2):\n return list(set(list1) & set(list2))\n\ndef remove_phonology_duplicate_videos(somePhonDF_Path):\n #Identifying the language\n language = somePhonDF_Path.split(\"/\")[-1].split(\"_\")[0]\n \n #Reading in the phonology excel file \n phon_df = pd.read_excel(somePhonDF_Path)\n \n #making sure all cells are of data type 'string'\n phon_df = phon_df.astype(str) \n \n #Lowering case of video names\n phon_df.video = phon_df.video.str.lower()\n \n #Removing numbers and parentheses that identify duplicate signs\n phon_df.video = phon_df.video.str.rstrip(\"()0123456789\")\n \n #Setting the 'video' column as dataframe index\n phon_df = phon_df.set_index(\"video\")\n \n #Splitting df into unique rows and non-unique rows // keeping only the first occurrence of duplicate rows\n index = phon_df.index\n is_duplicate = index.duplicated(keep=\"first\")\n not_duplicate = ~is_duplicate\n \n #Keeping only the first of duplicate signs\n phon_df = phon_df[not_duplicate].copy()\n phon_df = phon_df.reset_index()\n \n #Lower-casing certain column values\n phon_df = lower_column_values(phon_df)\n \n phon_df.to_csv(\"data/transforming/phonologyData/unique_signs/\"+language+\"_unique.csv.gz\", index=False, compression='gzip')\n \n del phon_df\n\n\ndef lower_column_values(somePhonDF):\n #Lower-casing values of phonology columns if lower-upper case distinction in the transcription does not make a difference\n #This is necessary because there are some irregularities in transcriptions\n lowerCols = [col for col in somePhonDF.columns if (\"HandShape\" not in col) and (\"Thumb\" not in col) and (\"JointConfiguration\" not in col) and (\"SelectedFingers\" not in col)]\n\n for lowerCol in lowerCols:\n somePhonDF[lowerCol] = somePhonDF[lowerCol].str.lower()\n \n return somePhonDF\n",
"import numpy as np\nimport os\nimport pandas as pd\nfrom itertools import combinations\n\nfrom scipy.spatial.distance import cosine\n\ndef find_semantics_cosine_similarity_pairwise(someGlovePath):\n dim = someGlovePath.split(\"_\")[-2]\n language = someGlovePath.split(\"/\")[-1].split(\"_\")[0]\n \n print(dim, language)\n sem_df = pd.read_csv(someGlovePath).set_index(\"video\")\n \n sign_pairs = list(combinations(sem_df.index, r=2))\n print(len(sign_pairs))\n #!#!#!#!\n \"\"\"CREATING A DATAFRAME FOR SEM COSINE SIMILARITY\"\"\"\n index = pd.MultiIndex.from_tuples(sign_pairs, names=[\"s1\", \"s2\"])\n cosSim_col = \"sem_cosineSim\"\n sem_output_df = pd.DataFrame(columns=[cosSim_col] , index=index)\n\n masterPath = \"data/output/SemSim/\"+language+\"/\"+dim+\"/\"\n if not os.path.exists(masterPath):\n os.makedirs(masterPath)\n\n k = 0\n for i, (a, b) in enumerate(sign_pairs): \n sem_output_df.loc(axis=0)[a,b][cosSim_col] = round(1 - cosine(sem_df.loc[a], sem_df.loc[b]), 4)\n\n if ( i%10000 == 0 ) or ( i == ( len(sign_pairs)-1 ) ):\n sem_output_df = sem_output_df.dropna()\n sem_output_df = sem_output_df.reset_index()\n \n path = masterPath+language+\"_\"+dim+\"_SemSim_part\"+str(k).zfill(3)+\".csv.gz\"\n sem_output_df.to_csv(path, compression=\"gzip\", index=False)\n\n #Progress\n print(\"Progress...\", round(i/len(sign_pairs),3))\n \n k+=1\n sem_output_df = pd.DataFrame(columns = [cosSim_col], index = index)\n\n if ( i == ( len(sign_pairs)-1 ) ):\n del sem_output_df\n\n del sem_df\n \n"
] |
[
[
"pandas.read_table",
"pandas.read_excel",
"pandas.read_csv"
],
[
"pandas.read_csv",
"scipy.spatial.distance.cosine",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame"
]
] |
jxcbecker/trappist1
|
[
"cf99216928c92ffa308ecf9482d3cdf91db37d43"
] |
[
"VPlanetRuns/run03/makeplot.py"
] |
[
"import matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np\nimport sys\ntry:\n import vplot as vpl\nexcept:\n print('Cannot import vplot. Please install vplot.')\n\n# Check correct number of arguments\nif (len(sys.argv) != 3):\n print('ERROR: Incorrect number of arguments.')\n print('Usage: '+sys.argv[0]+' <pdf | png>')\n exit(1)\nif (sys.argv[1] != 'pdf' and sys.argv[1] != 'png'):\n print('ERROR: Unknown file format: '+sys.argv[1])\n print('Options are: pdf, png')\n exit(1)\nif (sys.argv[2] != 'linear' and sys.argv[2] != 'log'):\n print ('ERROR: unknown x axis scale argument:' + sys.argv[2])\n print ('Choose \"linear\" or \"log\"')\n exit(1)\nelse:\n xaxscale = sys.argv[2]\n\n\n#Typical plot parameters that make for pretty plot\nmpl.rcParams['figure.figsize'] = (6.5,9)\nmpl.rcParams['font.size'] = 12.0\n\n# Load data\noutput = vpl.GetOutput()\ntime=output.star.Time\n\n# Plot\nfig, axes = plt.subplots(nrows=3, ncols=2)\n#plt.figure(figsize=(4,9))\ncolor = \"k\"\n\n## Top Left: Luminosity ##\naxes[0,0].plot(time, output.star.Luminosity,'k')\n\n# Format\naxes[0,0].set_xlim(1e6,time.max())\naxes[0,0].set_ylim(1e-4,0.06)\naxes[0,0].set_xlabel(\"Time [yr]\")\naxes[0,0].set_ylabel(r\"Luminosity (L$_\\odot$)\")\naxes[0,0].set_xscale(xaxscale)\naxes[0,0].set_yscale('log')\n\n## Top Right: XUV Luminosity ##\naxes[0,1].plot(time, output.star.LXUVTot,'k')\n\n# Format\naxes[0,1].set_xlim(1e6,time.max())\naxes[0,1].set_ylim(1e-8,1.1*output.star.LXUVTot.max())\naxes[0,1].set_xlabel(\"Time [yr]\")\naxes[0,1].set_ylabel(r\"L$_{XUV}$ (L$_\\odot$)\")\naxes[0,1].set_xscale(xaxscale)\naxes[0,1].set_yscale('log')\n\n## Middle Left: Effective Temperature ##\naxes[1,0].plot(time, output.star.Temperature,'k')\n\n# Format\naxes[1,0].set_xlim(1e6,time.max())\naxes[1,0].set_ylim(0.9*output.star.Temperature.min(),1.1*output.star.Temperature.max())\naxes[1,0].set_xlabel(\"Time [yr]\")\naxes[1,0].set_ylabel(r\"T$_{eff}$ (K)\")\naxes[1,0].set_xscale(xaxscale)\n\n## Middle Right: Habitable Zone ##\naxes[1,1].plot(time, output.star.HZLimRecVenus,'k',linestyle='dashed')\naxes[1,1].plot(time, output.star.HZLimRunaway,'k')\naxes[1,1].plot(time, output.star.HZLimMaxGreenhouse,'k')\naxes[1,1].plot(time, output.star.HZLimEarlyMars,'k',linestyle='dashed')\n\n# Format\naxes[1,1].set_xlim(1e6,time.max())\naxes[1,1].set_ylim(0,0.5)\naxes[1,1].set_xlabel(\"Time [yr]\")\naxes[1,1].set_ylabel(\"Habitable Zone (AU)\")\naxes[1,1].set_xscale(xaxscale)\n\n## Bottom Left: Surface water ##\naxes[2,0].plot(time, output.b.SurfWaterMass,'k',label='b')\naxes[2,0].plot(time, output.c.SurfWaterMass,color=vpl.colors.red,label='c')\naxes[2,0].plot(time, output.d.SurfWaterMass,color=vpl.colors.orange,label='d')\naxes[2,0].plot(time, output.e.SurfWaterMass,color=vpl.colors.pale_blue,label='e')\naxes[2,0].plot(time, output.f.SurfWaterMass,color=vpl.colors.purple,label='f')\naxes[2,0].plot(time, output.g.SurfWaterMass,color=vpl.colors.red,linestyle=\"--\",label='g')\naxes[2,0].plot(time, output.h.SurfWaterMass,color=vpl.colors.orange,linestyle=\"--\",label='h')\n\n# Format\naxes[2,0].set_xlim(1e6,time.max())\naxes[2,0].set_ylim(0,11)\naxes[2,0].set_xlabel(\"Time [yr]\")\naxes[2,0].set_ylabel(\"Surface Water (TO)\")\naxes[2,0].set_xscale(xaxscale)\naxes[2,0].legend(loc='best')\n\n## Bottom Right: Abiotic Oxygen ##\naxes[2,1].plot(time, output.b.OxygenMass,'k')\naxes[2,1].plot(time, output.c.OxygenMass,color=vpl.colors.red)\naxes[2,1].plot(time, output.d.OxygenMass,color=vpl.colors.orange)\naxes[2,1].plot(time, output.e.OxygenMass,color=vpl.colors.pale_blue)\naxes[2,1].plot(time, output.f.OxygenMass,color=vpl.colors.purple)\naxes[2,1].plot(time, output.g.OxygenMass,color=vpl.colors.red, linestyle=\"--\")\naxes[2,1].plot(time, output.h.OxygenMass,color=vpl.colors.orange, linestyle=\"--\")\n# Format\naxes[2,1].set_xlim(1e6,time.max())\naxes[2,1].set_ylim(0,2000)\naxes[2,1].set_xlabel(\"Time [yr]\")\naxes[2,1].set_ylabel(\"Atm. Oxygen (bar)\")\naxes[2,1].set_xscale(xaxscale)\n\n# Final formating\nfig.tight_layout()\n\nif (sys.argv[1] == 'pdf'):\n fig.savefig('Trappist1.atmesc.pdf', bbox_inches=\"tight\", dpi=600)\nif (sys.argv[1] == 'png'):\n fig.savefig('Trappist1.atmesc.png', bbox_inches=\"tight\", dpi=600)\n"
] |
[
[
"matplotlib.pyplot.subplots"
]
] |
ryanaustincarlson/great_expectations
|
[
"0af627e763d9dd17be7e548b767aba0d7af0e670"
] |
[
"great_expectations/execution_engine/sqlalchemy_execution_engine.py"
] |
[
"import copy\nimport datetime\nimport logging\nimport uuid\nfrom pathlib import Path\nfrom typing import Any, Dict, Iterable, List, Optional, Tuple, Union\nfrom urllib.parse import urlparse\n\nimport pandas as pd\n\nfrom great_expectations.core import IDDict\nfrom great_expectations.core.batch import Batch, BatchMarkers\nfrom great_expectations.core.util import convert_to_json_serializable\nfrom great_expectations.exceptions import (\n DatasourceKeyPairAuthBadPassphraseError,\n GreatExpectationsError,\n InvalidConfigError,\n)\nfrom great_expectations.execution_engine import ExecutionEngine\nfrom great_expectations.execution_engine.execution_engine import MetricDomainTypes\nfrom great_expectations.expectations.row_conditions import parse_condition_to_sqlalchemy\nfrom great_expectations.util import (\n filter_properties_dict,\n get_currently_executing_function_call_arguments,\n import_library_module,\n)\nfrom great_expectations.validator.validation_graph import MetricConfiguration\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import sqlalchemy as sa\nexcept ImportError:\n sa = None\n\ntry:\n from sqlalchemy.engine import reflection\n from sqlalchemy.engine.default import DefaultDialect\n from sqlalchemy.sql import Select\n from sqlalchemy.sql.elements import TextClause, quoted_name\nexcept ImportError:\n reflection = None\n DefaultDialect = None\n Select = None\n TextClause = None\n quoted_name = None\n\n\ntry:\n import psycopg2\n import sqlalchemy.dialects.postgresql.psycopg2 as sqlalchemy_psycopg2\nexcept (ImportError, KeyError):\n sqlalchemy_psycopg2 = None\n\ntry:\n import sqlalchemy_redshift.dialect\nexcept ImportError:\n sqlalchemy_redshift = None\n\ntry:\n import snowflake.sqlalchemy.snowdialect\n\n # Sometimes \"snowflake-sqlalchemy\" fails to self-register in certain environments, so we do it explicitly.\n # (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake)\n sa.dialects.registry.register(\"snowflake\", \"snowflake.sqlalchemy\", \"dialect\")\nexcept (ImportError, KeyError):\n snowflake = None\n\ntry:\n import pybigquery.sqlalchemy_bigquery\n\n # Sometimes \"pybigquery.sqlalchemy_bigquery\" fails to self-register in certain environments, so we do it explicitly.\n # (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake)\n sa.dialects.registry.register(\n \"bigquery\", \"pybigquery.sqlalchemy_bigquery\", \"BigQueryDialect\"\n )\n try:\n getattr(pybigquery.sqlalchemy_bigquery, \"INTEGER\")\n bigquery_types_tuple = None\n except AttributeError:\n # In older versions of the pybigquery driver, types were not exported, so we use a hack\n logger.warning(\n \"Old pybigquery driver version detected. Consider upgrading to 0.4.14 or later.\"\n )\n from collections import namedtuple\n\n BigQueryTypes = namedtuple(\n \"BigQueryTypes\", sorted(pybigquery.sqlalchemy_bigquery._type_map)\n )\n bigquery_types_tuple = BigQueryTypes(**pybigquery.sqlalchemy_bigquery._type_map)\nexcept ImportError:\n bigquery_types_tuple = None\n pybigquery = None\n\n\ndef _get_dialect_type_module(dialect):\n \"\"\"Given a dialect, returns the dialect type, which is defines the engine/system that is used to communicates\n with the database/database implementation. Currently checks for RedShift/BigQuery dialects\"\"\"\n if dialect is None:\n logger.warning(\n \"No sqlalchemy dialect found; relying in top-level sqlalchemy types.\"\n )\n return sa\n try:\n # Redshift does not (yet) export types to top level; only recognize base SA types\n if isinstance(dialect, sqlalchemy_redshift.dialect.RedshiftDialect):\n return dialect.sa\n except (TypeError, AttributeError):\n pass\n\n # Bigquery works with newer versions, but use a patch if we had to define bigquery_types_tuple\n try:\n if (\n isinstance(dialect, pybigquery.sqlalchemy_bigquery.BigQueryDialect,)\n and bigquery_types_tuple is not None\n ):\n return bigquery_types_tuple\n except (TypeError, AttributeError):\n pass\n\n return dialect\n\n\nclass SqlAlchemyBatchData(object):\n \"\"\"A class which represents a SQL alchemy batch, with properties including the construction of the batch itself\n and several getters used to access various properties.\"\"\"\n\n def __init__(\n self,\n engine,\n record_set_name: str = None,\n # Option 1\n schema_name: str = None,\n table_name: str = None,\n # Option 2\n query: str = None,\n # Option 3\n selectable=None,\n create_temp_table: bool = True,\n temp_table_name: str = None,\n temp_table_schema_name: str = None,\n use_quoted_name: bool = False,\n ):\n \"\"\"A Constructor used to initialize and SqlAlchemy Batch, create an id for it, and verify that all necessary\n parameters have been provided. If a Query is given, also builds a temporary table for this query\n\n Args:\n engine (SqlAlchemy Engine): \\\n A SqlAlchemy Engine or connection that will be used to access the data\n record_set_name: (string or None): \\\n The name of the record set available as a domain kwarg for Great Expectations validations. record_set_name\n can usually be None, but is required when there are multiple record_sets in the same Batch.\n schema_name (string or None): \\\n The name of the schema_name in which the databases lie\n table_name (string or None): \\\n The name of the table that will be accessed. Either this parameter or the query parameter must be\n specified. Default is 'None'.\n query (string or None): \\\n A query string representing a domain, which will be used to create a temporary table\n selectable (Sqlalchemy Selectable or None): \\\n A SqlAlchemy selectable representing a domain, which will be used to create a temporary table\n create_temp_table (bool): \\\n When building the batch data object from a query, this flag determines whether a temporary table should\n be created against which to validate data from the query. If False, a subselect statement will be used\n in each validation.\n temp_table_name (str or None): \\\n The name to use for a temporary table if one should be created. If None, a default name will be generated.\n temp_table_schema_name (str or None): \\\n The name of the schema in which a temporary table should be created. If None, the default schema will be\n used if a temporary table is requested.\n use_quoted_name (bool): \\\n If true, names should be quoted to preserve case sensitivity on databases that usually normalize them\n\n The query that will be executed against the DB can be determined in any of three ways:\n\n 1. Specify a `schema_name` and `table_name`. This will query the whole table as a record_set. If schema_name is None, then the default schema will be used.\n 2. Specify a `query`, which will be executed as-is to fetch the record_set. NOTE Abe 20201118 : This functionality is currently untested.\n 3. Specify a `selectable`, which will be to fetch the record_set. This is the primary path used by DataConnectors.\n\n In the case of (2) and (3) you have the option to execute the query either as a temporary table, or as a subselect statement.\n\n In general, temporary tables invite more optimization from the query engine itself. Subselect statements may sometimes be preffered, because they do not require write access on the database.\n\n\n \"\"\"\n self._engine = engine\n self._record_set_name = record_set_name or \"great_expectations_sub_selection\"\n if not isinstance(self._record_set_name, str):\n raise TypeError(\n f\"record_set_name should be of type str, not {type(record_set_name)}\"\n )\n\n self._schema_name = schema_name\n self._use_quoted_name = use_quoted_name\n\n if sum(bool(x) for x in [table_name, query, selectable is not None]) != 1:\n raise ValueError(\n \"Exactly one of table_name, query, or selectable must be specified\"\n )\n elif (query and schema_name) or (selectable is not None and schema_name):\n raise ValueError(\n \"schema_name can only be used with table_name. Use temp_table_schema_name to provide a target schema for creating a temporary table.\"\n )\n\n if table_name:\n # Suggestion: pull this block out as its own _function\n if use_quoted_name:\n table_name = quoted_name(table_name, quote=True)\n if engine.dialect.name.lower() == \"bigquery\":\n if schema_name is not None:\n logger.warning(\n \"schema_name should not be used when passing a table_name for biquery. Instead, include the schema name in the table_name string.\"\n )\n # In BigQuery the table name is already qualified with its schema name\n self._selectable = sa.Table(\n table_name, sa.MetaData(), schema_name=None,\n )\n else:\n self._selectable = sa.Table(\n table_name, sa.MetaData(), schema_name=schema_name,\n )\n\n elif create_temp_table:\n if temp_table_name:\n generated_table_name = temp_table_name\n else:\n # Suggestion: Pull this into a separate \"_generate_temporary_table_name\" method\n generated_table_name = f\"ge_tmp_{str(uuid.uuid4())[:8]}\"\n # mssql expects all temporary table names to have a prefix '#'\n if engine.dialect.name.lower() == \"mssql\":\n generated_table_name = f\"#{generated_table_name}\"\n if engine.dialect.name.lower() == \"bigquery\":\n raise ValueError(\n \"No BigQuery dataset specified. Include bigquery_temp_table in \"\n \"batch_spec_passthrough or a specify a default dataset in engine url\"\n )\n if selectable is not None:\n # compile selectable to sql statement\n query = selectable.compile(\n dialect=self.sql_engine_dialect,\n compile_kwargs={\"literal_binds\": True},\n )\n self._create_temporary_table(\n generated_table_name,\n query,\n temp_table_schema_name=temp_table_schema_name,\n )\n self._selectable = sa.Table(\n generated_table_name, sa.MetaData(), schema_name=temp_table_schema_name,\n )\n else:\n if query:\n self._selectable = sa.text(query)\n else:\n self._selectable = selectable.alias(self._record_set_name)\n\n @property\n def sql_engine_dialect(self) -> DefaultDialect:\n \"\"\"Returns the Batches' current engine dialect\"\"\"\n return self._engine.dialect\n\n @property\n def record_set_name(self):\n return self._record_set_name\n\n @property\n def selectable(self):\n return self._selectable\n\n @property\n def use_quoted_name(self):\n return self._use_quoted_name\n\n def _create_temporary_table(\n self, temp_table_name, query, temp_table_schema_name=None\n ):\n \"\"\"\n Create Temporary table based on sql query. This will be used as a basis for executing expectations.\n :param query:\n \"\"\"\n if self.sql_engine_dialect.name.lower() == \"bigquery\":\n stmt = \"CREATE OR REPLACE TABLE `{temp_table_name}` AS {query}\".format(\n temp_table_name=temp_table_name, query=query\n )\n elif self.sql_engine_dialect.name.lower() == \"snowflake\":\n if temp_table_schema_name is not None:\n temp_table_name = temp_table_schema_name + \".\" + temp_table_name\n stmt = \"CREATE OR REPLACE TEMPORARY TABLE {temp_table_name} AS {query}\".format(\n temp_table_name=temp_table_name, query=query\n )\n elif self.sql_engine_dialect.name == \"mysql\":\n # Note: We can keep the \"MySQL\" clause separate for clarity, even though it is the same as the\n # generic case.\n stmt = \"CREATE TEMPORARY TABLE {temp_table_name} AS {query}\".format(\n temp_table_name=temp_table_name, query=query\n )\n elif self.sql_engine_dialect.name == \"mssql\":\n # Insert \"into #{temp_table_name}\" in the custom sql query right before the \"from\" clause\n # Split is case sensitive so detect case.\n # Note: transforming query to uppercase/lowercase has unintended consequences (i.e.,\n # changing column names), so this is not an option!\n query = query.string # extracting string from MSSQLCompiler object\n if \"from\" in query:\n strsep = \"from\"\n else:\n strsep = \"FROM\"\n querymod = query.split(strsep, maxsplit=1)\n stmt = (querymod[0] + \"into {temp_table_name} from\" + querymod[1]).format(\n temp_table_name=temp_table_name\n )\n else:\n stmt = 'CREATE TEMPORARY TABLE \"{temp_table_name}\" AS {query}'.format(\n temp_table_name=temp_table_name, query=query\n )\n self._engine.execute(stmt)\n\n def head(self, n=5, fetch_all=False):\n \"\"\"Fetches the head of the table\"\"\"\n\n if fetch_all:\n result_object = self._engine.execute(\n sa.select(\"*\").select_from(self._selectable)\n )\n else:\n result_object = self._engine.execute(\n sa.select(\"*\").limit(n).select_from(self._selectable)\n )\n\n rows = result_object.fetchall()\n\n # Note: Abe 20201119: This should be a GE type\n head_df = pd.DataFrame(rows, columns=result_object._metadata.keys)\n\n return head_df\n\n def row_count(self):\n \"\"\"Gets the number of rows\"\"\"\n\n result_object = self._engine.execute(\n sa.select([sa.func.count()]).select_from(self._selectable)\n )\n rows = result_object.fetchall()\n\n return rows[0][0]\n\n\nclass SqlAlchemyExecutionEngine(ExecutionEngine):\n def __init__(\n self,\n name=None,\n credentials=None,\n data_context=None,\n engine=None,\n connection_string=None,\n url=None,\n batch_data_dict=None,\n **kwargs, # These will be passed as optional parameters to the SQLAlchemy engine, **not** the ExecutionEngine\n ):\n \"\"\"Builds a SqlAlchemyExecutionEngine, using a provided connection string/url/engine/credentials to access the\n desired database. Also initializes the dialect to be used and configures usage statistics.\n\n Args:\n name (str): \\\n The name of the SqlAlchemyExecutionEngine\n credentials: \\\n If the Execution Engine is not provided, the credentials can be used to build the Execution\n Engine. If the Engine is provided, it will be used instead\n data_context (DataContext): \\\n An object representing a Great Expectations project that can be used to access Expectation\n Suites and the Project Data itself\n engine (Engine): \\\n A SqlAlchemy Engine used to set the SqlAlchemyExecutionEngine being configured, useful if an\n Engine has already been configured and should be reused. Will override Credentials\n if provided.\n connection_string (string): \\\n If neither the engines nor the credentials have been provided, a connection string can be used\n to access the data. This will be overridden by both the engine and credentials if those are\n provided.\n url (string): \\\n If neither the engines, the credentials, nor the connection_string have been provided,\n a url can be used to access the data. This will be overridden by all other configuration\n options if any are provided.\n \"\"\"\n super().__init__(name=name, batch_data_dict=batch_data_dict) # , **kwargs)\n self._name = name\n\n self._credentials = credentials\n self._connection_string = connection_string\n self._url = url\n\n if engine is not None:\n if credentials is not None:\n logger.warning(\n \"Both credentials and engine were provided during initialization of SqlAlchemyExecutionEngine. \"\n \"Ignoring credentials.\"\n )\n self.engine = engine\n elif credentials is not None:\n self.engine = self._build_engine(credentials=credentials, **kwargs)\n elif connection_string is not None:\n self.engine = sa.create_engine(connection_string, **kwargs)\n elif url is not None:\n self.drivername = urlparse(url).scheme\n self.engine = sa.create_engine(url, **kwargs)\n else:\n raise InvalidConfigError(\n \"Credentials or an engine are required for a SqlAlchemyExecutionEngine.\"\n )\n\n # Get the dialect **for purposes of identifying types**\n if self.engine.dialect.name.lower() in [\n \"postgresql\",\n \"mysql\",\n \"sqlite\",\n \"oracle\",\n \"mssql\",\n \"oracle\",\n ]:\n # These are the officially included and supported dialects by sqlalchemy\n self.dialect = import_library_module(\n module_name=\"sqlalchemy.dialects.\" + self.engine.dialect.name\n )\n\n elif self.engine.dialect.name.lower() == \"snowflake\":\n self.dialect = import_library_module(\n module_name=\"snowflake.sqlalchemy.snowdialect\"\n )\n elif self.engine.dialect.name.lower() == \"redshift\":\n self.dialect = import_library_module(\n module_name=\"sqlalchemy_redshift.dialect\"\n )\n elif self.engine.dialect.name.lower() == \"bigquery\":\n self.dialect = import_library_module(\n module_name=\"pybigquery.sqlalchemy_bigquery\"\n )\n else:\n self.dialect = None\n\n if self.engine and self.engine.dialect.name.lower() in [\n \"sqlite\",\n \"mssql\",\n \"snowflake\",\n ]:\n # sqlite/mssql temp tables only persist within a connection so override the engine\n self.engine = self.engine.connect()\n\n # Send a connect event to provide dialect type\n if data_context is not None and getattr(\n data_context, \"_usage_statistics_handler\", None\n ):\n handler = data_context._usage_statistics_handler\n handler.send_usage_message(\n event=\"execution_engine.sqlalchemy.connect\",\n event_payload={\n \"anonymized_name\": handler._execution_engine_anonymizer.anonymize(\n self.name\n ),\n \"sqlalchemy_dialect\": self.engine.name,\n },\n success=True,\n )\n\n # Gather the call arguments of the present function (and add the \"class_name\"), filter out the Falsy values,\n # and set the instance \"_config\" variable equal to the resulting dictionary.\n self._config = get_currently_executing_function_call_arguments(\n **{\"class_name\": self.__class__.__name__}\n )\n filter_properties_dict(\n properties=self._config, inplace=True,\n )\n\n @property\n def credentials(self):\n return self._credentials\n\n @property\n def connection_string(self):\n return self._connection_string\n\n @property\n def url(self):\n return self._url\n\n def _build_engine(self, credentials, **kwargs) -> \"sa.engine.Engine\":\n \"\"\"\n Using a set of given credentials, constructs an Execution Engine , connecting to a database using a URL or a\n private key path.\n \"\"\"\n # Update credentials with anything passed during connection time\n drivername = credentials.pop(\"drivername\")\n schema_name = credentials.pop(\"schema_name\", None)\n if schema_name is not None:\n logger.warning(\n \"schema_name specified creating a URL with schema is not supported. Set a default \"\n \"schema on the user connecting to your database.\"\n )\n\n create_engine_kwargs = kwargs\n connect_args = credentials.pop(\"connect_args\", None)\n if connect_args:\n create_engine_kwargs[\"connect_args\"] = connect_args\n\n if \"private_key_path\" in credentials:\n options, create_engine_kwargs = self._get_sqlalchemy_key_pair_auth_url(\n drivername, credentials\n )\n else:\n options = sa.engine.url.URL(drivername, **credentials)\n\n self.drivername = drivername\n engine = sa.create_engine(options, **create_engine_kwargs)\n return engine\n\n def _get_sqlalchemy_key_pair_auth_url(\n self, drivername: str, credentials: dict\n ) -> Tuple[str, dict]:\n \"\"\"\n Utilizing a private key path and a passphrase in a given credentials dictionary, attempts to encode the provided\n values into a private key. If passphrase is incorrect, this will fail and an exception is raised.\n\n Args:\n drivername(str) - The name of the driver class\n credentials(dict) - A dictionary of database credentials used to access the database\n\n Returns:\n a tuple consisting of a url with the serialized key-pair authentication, and a dictionary of engine kwargs.\n \"\"\"\n from cryptography.hazmat.backends import default_backend\n from cryptography.hazmat.primitives import serialization\n\n private_key_path = credentials.pop(\"private_key_path\")\n private_key_passphrase = credentials.pop(\"private_key_passphrase\")\n\n with Path(private_key_path).expanduser().resolve().open(mode=\"rb\") as key:\n try:\n p_key = serialization.load_pem_private_key(\n key.read(),\n password=private_key_passphrase.encode()\n if private_key_passphrase\n else None,\n backend=default_backend(),\n )\n except ValueError as e:\n if \"incorrect password\" in str(e).lower():\n raise DatasourceKeyPairAuthBadPassphraseError(\n datasource_name=\"SqlAlchemyDatasource\",\n message=\"Decryption of key failed, was the passphrase incorrect?\",\n ) from e\n else:\n raise e\n pkb = p_key.private_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption(),\n )\n\n credentials_driver_name = credentials.pop(\"drivername\", None)\n create_engine_kwargs = {\"connect_args\": {\"private_key\": pkb}}\n return (\n sa.engine.url.URL(drivername or credentials_driver_name, **credentials),\n create_engine_kwargs,\n )\n\n def get_compute_domain(\n self,\n domain_kwargs: Dict,\n domain_type: Union[str, \"MetricDomainTypes\"],\n accessor_keys: Optional[Iterable[str]] = None,\n ) -> Tuple[\"sa.sql.Selectable\", dict, dict]:\n \"\"\"Uses a given batch dictionary and domain kwargs to obtain a SqlAlchemy column object.\n\n Args:\n domain_kwargs (dict) - A dictionary consisting of the domain kwargs specifying which data to obtain\n domain_type (str or \"MetricDomainTypes\") - an Enum value indicating which metric domain the user would\n like to be using, or a corresponding string value representing it. String types include \"identity\", \"column\",\n \"column_pair\", \"table\" and \"other\". Enum types include capitalized versions of these from the class\n MetricDomainTypes.\n accessor_keys (str iterable) - keys that are part of the compute domain but should be ignored when describing\n the domain and simply transferred with their associated values into accessor_domain_kwargs.\n\n Returns:\n SqlAlchemy column\n \"\"\"\n # Extracting value from enum if it is given for future computation\n domain_type = MetricDomainTypes(domain_type)\n batch_id = domain_kwargs.get(\"batch_id\")\n if batch_id is None:\n # We allow no batch id specified if there is only one batch\n if self.active_batch_data:\n data_object = self.active_batch_data\n else:\n raise GreatExpectationsError(\n \"No batch is specified, but could not identify a loaded batch.\"\n )\n else:\n if batch_id in self.loaded_batch_data_dict:\n data_object = self.loaded_batch_data_dict[batch_id]\n else:\n raise GreatExpectationsError(\n f\"Unable to find batch with batch_id {batch_id}\"\n )\n\n compute_domain_kwargs = copy.deepcopy(domain_kwargs)\n accessor_domain_kwargs = dict()\n if \"table\" in domain_kwargs and domain_kwargs[\"table\"] is not None:\n if domain_kwargs[\"table\"] != data_object.record_set_name:\n raise ValueError(\"Unrecognized table name.\")\n else:\n selectable = data_object.selectable\n elif \"query\" in domain_kwargs:\n raise ValueError(\n \"query is not currently supported by SqlAlchemyExecutionEngine\"\n )\n else:\n selectable = data_object.selectable\n\n if (\n \"row_condition\" in domain_kwargs\n and domain_kwargs[\"row_condition\"] is not None\n ):\n condition_parser = domain_kwargs[\"condition_parser\"]\n if condition_parser == \"great_expectations__experimental__\":\n parsed_condition = parse_condition_to_sqlalchemy(\n domain_kwargs[\"row_condition\"]\n )\n selectable = sa.select(\n \"*\", from_obj=selectable, whereclause=parsed_condition\n )\n\n else:\n raise GreatExpectationsError(\n \"SqlAlchemyExecutionEngine only supports the great_expectations condition_parser.\"\n )\n\n # Warning user if accessor keys are in any domain that is not of type table, will be ignored\n if (\n domain_type != MetricDomainTypes.TABLE\n and accessor_keys is not None\n and len(accessor_keys) > 0\n ):\n logger.warning(\n \"Accessor keys ignored since Metric Domain Type is not 'table'\"\n )\n\n if domain_type == MetricDomainTypes.TABLE:\n if accessor_keys is not None and len(accessor_keys) > 0:\n for key in accessor_keys:\n accessor_domain_kwargs[key] = compute_domain_kwargs.pop(key)\n if len(domain_kwargs.keys()) > 0:\n for key in compute_domain_kwargs.keys():\n # Warning user if kwarg not \"normal\"\n if key not in [\n \"batch_id\",\n \"table\",\n \"row_condition\",\n \"condition_parser\",\n ]:\n logger.warning(\n f\"Unexpected key {key} found in domain_kwargs for domain type {domain_type.value}\"\n )\n return selectable, compute_domain_kwargs, accessor_domain_kwargs\n\n # If user has stated they want a column, checking if one is provided, and\n elif domain_type == MetricDomainTypes.COLUMN:\n if \"column\" in compute_domain_kwargs:\n # Checking if case- sensitive and using appropriate name\n if self.active_batch_data.use_quoted_name:\n accessor_domain_kwargs[\"column\"] = quoted_name(\n compute_domain_kwargs.pop(\"column\")\n )\n else:\n accessor_domain_kwargs[\"column\"] = compute_domain_kwargs.pop(\n \"column\"\n )\n else:\n # If column not given\n raise GreatExpectationsError(\n \"Column not provided in compute_domain_kwargs\"\n )\n\n # Else, if column pair values requested\n elif domain_type == MetricDomainTypes.COLUMN_PAIR:\n # Ensuring column_A and column_B parameters provided\n if (\n \"column_A\" in compute_domain_kwargs\n and \"column_B\" in compute_domain_kwargs\n ):\n if self.active_batch_data.use_quoted_name:\n # If case matters...\n accessor_domain_kwargs[\"column_A\"] = quoted_name(\n compute_domain_kwargs.pop(\"column_A\")\n )\n accessor_domain_kwargs[\"column_B\"] = quoted_name(\n compute_domain_kwargs.pop(\"column_B\")\n )\n else:\n accessor_domain_kwargs[\"column_A\"] = compute_domain_kwargs.pop(\n \"column_A\"\n )\n accessor_domain_kwargs[\"column_B\"] = compute_domain_kwargs.pop(\n \"column_B\"\n )\n else:\n raise GreatExpectationsError(\n \"column_A or column_B not found within compute_domain_kwargs\"\n )\n\n # Checking if table or identity or other provided, column is not specified. If it is, warning the user\n elif domain_type == MetricDomainTypes.MULTICOLUMN:\n if \"columns\" in compute_domain_kwargs:\n # If columns exist\n accessor_domain_kwargs[\"columns\"] = compute_domain_kwargs.pop(\"columns\")\n\n # Filtering if identity\n elif domain_type == MetricDomainTypes.IDENTITY:\n # If we would like our data to become a single column\n if \"column\" in compute_domain_kwargs:\n if self.active_batch_data.use_quoted_name:\n selectable = sa.select(\n [sa.column(quoted_name(compute_domain_kwargs[\"column\"]))]\n ).select_from(selectable)\n else:\n selectable = sa.select(\n [sa.column(compute_domain_kwargs[\"column\"])]\n ).select_from(selectable)\n\n # If we would like our data to now become a column pair\n elif (\"column_A\" in compute_domain_kwargs) and (\n \"column_B\" in compute_domain_kwargs\n ):\n if self.active_batch_data.use_quoted_name:\n selectable = sa.select(\n [\n sa.column(quoted_name(compute_domain_kwargs[\"column_A\"])),\n sa.column(quoted_name(compute_domain_kwargs[\"column_B\"])),\n ]\n ).select_from(selectable)\n else:\n selectable = sa.select(\n [\n sa.column(compute_domain_kwargs[\"column_A\"]),\n sa.column(compute_domain_kwargs[\"column_B\"]),\n ]\n ).select_from(selectable)\n else:\n # If we would like our data to become a multicolumn\n if \"columns\" in compute_domain_kwargs:\n if self.active_batch_data.use_quoted_name:\n # Building a list of column objects used for sql alchemy selection\n to_select = [\n sa.column(quoted_name(col))\n for col in compute_domain_kwargs[\"columns\"]\n ]\n selectable = sa.select(to_select).select_from(selectable)\n else:\n to_select = [\n sa.column(col) for col in compute_domain_kwargs[\"columns\"]\n ]\n selectable = sa.select(to_select).select_from(selectable)\n\n # Letting selectable fall through\n return selectable, compute_domain_kwargs, accessor_domain_kwargs\n\n def resolve_metric_bundle(\n self, metric_fn_bundle: Iterable[Tuple[MetricConfiguration, Any, dict, dict]],\n ) -> dict:\n \"\"\"For every metrics in a set of Metrics to resolve, obtains necessary metric keyword arguments and builds a\n bundles the metrics into one large query dictionary so that they are all executed simultaneously. Will fail if\n bundling the metrics together is not possible.\n\n Args:\n metric_fn_bundle (Iterable[Tuple[MetricConfiguration, Callable, dict]): \\\n A Dictionary containing a MetricProvider's MetricConfiguration (its unique identifier), its metric provider function\n (the function that actually executes the metric), and the arguments to pass to the metric provider function.\n metrics (Dict[Tuple, Any]): \\\n A dictionary of metrics defined in the registry and corresponding arguments\n\n Returns:\n A dictionary of metric names and their corresponding now-queried values.\n \"\"\"\n resolved_metrics = dict()\n\n # We need a different query for each domain (where clause).\n queries: Dict[Tuple, dict] = dict()\n for (\n metric_to_resolve,\n engine_fn,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n metric_provider_kwargs,\n ) in metric_fn_bundle:\n if not isinstance(compute_domain_kwargs, IDDict):\n compute_domain_kwargs = IDDict(compute_domain_kwargs)\n domain_id = compute_domain_kwargs.to_id()\n if domain_id not in queries:\n queries[domain_id] = {\n \"select\": [],\n \"ids\": [],\n \"domain_kwargs\": compute_domain_kwargs,\n }\n queries[domain_id][\"select\"].append(\n engine_fn.label(metric_to_resolve.metric_name)\n )\n queries[domain_id][\"ids\"].append(metric_to_resolve.id)\n for query in queries.values():\n selectable, compute_domain_kwargs, _ = self.get_compute_domain(\n query[\"domain_kwargs\"], domain_type=\"identity\"\n )\n assert len(query[\"select\"]) == len(query[\"ids\"])\n res = self.engine.execute(\n sa.select(query[\"select\"]).select_from(selectable)\n ).fetchall()\n logger.debug(\n f\"SqlAlchemyExecutionEngine computed {len(res[0])} metrics on domain_id {IDDict(compute_domain_kwargs).to_id()}\"\n )\n assert (\n len(res) == 1\n ), \"all bundle-computed metrics must be single-value statistics\"\n assert len(query[\"ids\"]) == len(\n res[0]\n ), \"unexpected number of metrics returned\"\n for idx, id in enumerate(query[\"ids\"]):\n resolved_metrics[id] = convert_to_json_serializable(res[0][idx])\n\n # Convert metrics to be serializable\n return resolved_metrics\n\n ### Splitter methods for partitioning tables ###\n\n def _split_on_whole_table(\n self,\n table_name: str,\n # column_name: str,\n partition_definition: dict,\n ):\n \"\"\"'Split' by returning the whole table\"\"\"\n\n # return sa.column(column_name) == partition_definition[column_name]\n return 1 == 1\n\n def _split_on_column_value(\n self, table_name: str, column_name: str, partition_definition: dict,\n ):\n \"\"\"Split using the values in the named column\"\"\"\n\n return sa.column(column_name) == partition_definition[column_name]\n\n def _split_on_converted_datetime(\n self,\n table_name: str,\n column_name: str,\n partition_definition: dict,\n date_format_string: str = \"%Y-%m-%d\",\n ):\n \"\"\"Convert the values in the named column to the given date_format, and split on that\"\"\"\n\n return (\n sa.func.strftime(date_format_string, sa.column(column_name),)\n == partition_definition[column_name]\n )\n\n def _split_on_divided_integer(\n self,\n table_name: str,\n column_name: str,\n divisor: int,\n partition_definition: dict,\n ):\n \"\"\"Divide the values in the named column by `divisor`, and split on that\"\"\"\n\n return (\n sa.cast(sa.column(column_name) / divisor, sa.Integer)\n == partition_definition[column_name]\n )\n\n def _split_on_mod_integer(\n self, table_name: str, column_name: str, mod: int, partition_definition: dict,\n ):\n \"\"\"Divide the values in the named column by `divisor`, and split on that\"\"\"\n\n return sa.column(column_name) % mod == partition_definition[column_name]\n\n def _split_on_multi_column_values(\n self, table_name: str, column_names: List[str], partition_definition: dict,\n ):\n \"\"\"Split on the joint values in the named columns\"\"\"\n\n return sa.and_(\n *[\n sa.column(column_name) == column_value\n for column_name, column_value in partition_definition.items()\n ]\n )\n\n def _split_on_hashed_column(\n self,\n table_name: str,\n column_name: str,\n hash_digits: int,\n partition_definition: dict,\n ):\n \"\"\"Split on the hashed value of the named column\"\"\"\n\n return (\n sa.func.right(sa.func.md5(sa.column(column_name)), hash_digits)\n == partition_definition[column_name]\n )\n\n ### Sampling methods ###\n\n # _sample_using_limit\n # _sample_using_random\n # _sample_using_mod\n # _sample_using_a_list\n # _sample_using_md5\n\n def _sample_using_random(\n self, p: float = 0.1,\n ):\n \"\"\"Take a random sample of rows, retaining proportion p\n\n Note: the Random function behaves differently on different dialects of SQL\n \"\"\"\n return sa.func.random() < p\n\n def _sample_using_mod(\n self, column_name, mod: int, value: int,\n ):\n \"\"\"Take the mod of named column, and only keep rows that match the given value\"\"\"\n return sa.column(column_name) % mod == value\n\n def _sample_using_a_list(\n self, column_name: str, value_list: list,\n ):\n \"\"\"Match the values in the named column against value_list, and only keep the matches\"\"\"\n return sa.column(column_name).in_(value_list)\n\n def _sample_using_md5(\n self, column_name: str, hash_digits: int = 1, hash_value: str = \"f\",\n ):\n \"\"\"Hash the values in the named column, and split on that\"\"\"\n return (\n sa.func.right(\n sa.func.md5(sa.cast(sa.column(column_name), sa.Text)), hash_digits\n )\n == hash_value\n )\n\n def _build_selectable_from_batch_spec(self, batch_spec):\n table_name = batch_spec[\"table_name\"]\n table_name: str = batch_spec[\"table_name\"]\n\n if \"splitter_method\" in batch_spec:\n splitter_fn = getattr(self, batch_spec[\"splitter_method\"])\n split_clause = splitter_fn(\n table_name=table_name,\n partition_definition=batch_spec[\"partition_definition\"],\n **batch_spec[\"splitter_kwargs\"],\n )\n\n else:\n split_clause = True\n\n if \"sampling_method\" in batch_spec:\n if batch_spec[\"sampling_method\"] == \"_sample_using_limit\":\n # SQLalchemy's semantics for LIMIT are different than normal WHERE clauses,\n # so the business logic for building the query needs to be different.\n\n return (\n sa.select(\"*\")\n .select_from(sa.text(table_name))\n .where(split_clause)\n .limit(batch_spec[\"sampling_kwargs\"][\"n\"])\n )\n\n else:\n\n sampler_fn = getattr(self, batch_spec[\"sampling_method\"])\n return (\n sa.select(\"*\")\n .select_from(sa.text(table_name))\n .where(\n sa.and_(\n split_clause, sampler_fn(**batch_spec[\"sampling_kwargs\"]),\n )\n )\n )\n return sa.select(\"*\").select_from(sa.text(table_name)).where(split_clause)\n\n def get_batch_data_and_markers(\n self, batch_spec\n ) -> Tuple[SqlAlchemyBatchData, BatchMarkers]:\n\n selectable = self._build_selectable_from_batch_spec(batch_spec=batch_spec)\n if \"bigquery_temp_table\" in batch_spec:\n temp_table_name = batch_spec.get(\"bigquery_temp_table\")\n else:\n temp_table_name = None\n batch_data = SqlAlchemyBatchData(\n engine=self.engine, selectable=selectable, temp_table_name=temp_table_name\n )\n\n batch_markers = BatchMarkers(\n {\n \"ge_load_time\": datetime.datetime.now(datetime.timezone.utc).strftime(\n \"%Y%m%dT%H%M%S.%fZ\"\n )\n }\n )\n\n return batch_data, batch_markers\n"
] |
[
[
"pandas.DataFrame"
]
] |
dohyeoklee/Control-Furuta-Pendulum
|
[
"00fbcfdf43c517f8d76ca7e664cbc3598e744af4"
] |
[
"qube_ppo_balancing.py"
] |
[
"import os\nimport torch\nimport numpy as np\nimport torch.optim as optim\nfrom collections import deque\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\n\nimport matplotlib.pyplot as plt\nimport pickle\nimport time\n\nimport gym\nfrom gym_brt.envs import QubeBeginDownEnv\nfrom gym_brt.envs import QubeBeginUprightEnv\nfrom random import *\n\nhidden_size = 128\ngamma = 0.99\nlamda = 0.98\nbatch_size = 64\nclip_param = 0.2\nactor_lr = 1e-3\ncritic_lr = 1e-4\nl2_rate = 0.001\nrender = False\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n\nclass RunningStat(object):\n def __init__(self, shape):\n self._n = 0\n self._M = np.zeros(shape)\n self._S = np.zeros(shape)\n\n def push(self, x):\n x = np.asarray(x)\n assert x.shape == self._M.shape\n self._n += 1\n if self._n == 1:\n self._M[...] = x\n else:\n oldM = self._M.copy()\n self._M[...] = oldM + (x - oldM) / self._n\n self._S[...] = self._S + (x - oldM) * (x - self._M)\n\n @property\n def n(self):\n return self._n\n\n @n.setter\n def n(self, n):\n self._n = n\n\n @property\n def mean(self):\n return self._M\n\n @mean.setter\n def mean(self, M):\n self._M = M\n\n @property\n def var(self):\n return self._S / (self._n - 1) if self._n > 1 else np.square(self._M)\n\n @property\n def std(self):\n return np.sqrt(self.var)\n\n @property\n def shape(self):\n return self._M.shape\n\n\nclass ZFilter:\n \n\n def __init__(self, shape, demean=True, destd=True, clip=10.0):\n self.demean = demean\n self.destd = destd\n self.clip = clip\n\n self.rs = RunningStat(shape)\n\n def __call__(self, x, update=True):\n if update: self.rs.push(x)\n if self.demean:\n x = x - self.rs.mean\n if self.destd:\n x = x / (self.rs.std + 1e-8)\n if self.clip:\n x = np.clip(x, -self.clip, self.clip)\n return x\n\n def output_shape(self, input_space):\n return input_space.shape\n\nclass Actor(nn.Module):\n def __init__(self, num_inputs, num_outputs,init_w=3e-3):\n self.num_inputs = num_inputs\n self.num_outputs = num_outputs\n super(Actor, self).__init__()\n self.fc1 = nn.Linear(num_inputs, hidden_size)\n self.fc2 = nn.Linear(hidden_size, hidden_size)\n self.fc3 = nn.Linear(hidden_size, num_outputs)\n self.fc3.weight.data.uniform_(-init_w,init_w)\n self.fc3.bias.data.uniform_(-init_w, init_w)\n\n def forward(self, x):\n x = torch.tanh(self.fc1(x))\n x = torch.tanh(self.fc2(x))\n mu = self.fc3(x)\n logstd = torch.zeros_like(mu)\n std = torch.exp(logstd)\n return mu, std, logstd\n\n\nclass Critic(nn.Module):\n def __init__(self, num_inputs,init_w=3e-3):\n super(Critic, self).__init__()\n self.fc1 = nn.Linear(num_inputs, hidden_size)\n self.fc2 = nn.Linear(hidden_size, hidden_size)\n self.fc3 = nn.Linear(hidden_size, 1)\n self.fc3.weight.data.uniform_(-init_w, init_w)\n self.fc3.bias.data.uniform_(-init_w, init_w)\n\n def forward(self, x):\n x = torch.tanh(self.fc1(x))\n x = torch.tanh(self.fc2(x))\n v = self.fc3(x)\n return v\n\n\ndef get_action(mu, std):\n action = torch.normal(mu, std)\n action = action.data.numpy()\n return action\n\n\ndef log_density(x, mu, std, logstd):\n var = std.pow(2)\n log_density = -(x - mu).pow(2) / (2 * var) \\\n - 0.5 * math.log(2 * math.pi) - logstd\n return log_density.sum(1, keepdim=True)\n\n\ndef get_gae(rewards, masks, values):\n rewards = torch.Tensor(rewards)\n masks = torch.Tensor(masks)\n returns = torch.zeros_like(rewards)\n advants = torch.zeros_like(rewards)\n\n running_returns = 0\n previous_value = 0\n running_advants = 0\n\n for t in reversed(range(0, len(rewards))):\n running_returns = rewards[t] + gamma * running_returns * masks[t]\n running_tderror = rewards[t] + gamma * previous_value * masks[t] - \\\n values.data[t]\n running_advants = running_tderror + gamma * lamda * \\\n running_advants * masks[t]\n\n returns[t] = running_returns\n previous_value = values.data[t]\n advants[t] = running_advants\n\n advants = (advants - advants.mean()) / advants.std()\n return returns, advants\n\n\ndef surrogate_loss(actor, advants, states, old_policy, actions, index):\n mu, std, logstd = actor(torch.Tensor(states))\n new_policy = log_density(actions, mu, std, logstd)\n old_policy = old_policy[index]\n\n ratio = torch.exp(new_policy - old_policy)\n surrogate = ratio * advants\n return surrogate, ratio\n\n\ndef train_model(actor, critic, memory, actor_optim, critic_optim):\n memory = np.array(memory)\n states = np.vstack(memory[:, 0])\n actions = list(memory[:, 1])\n rewards = list(memory[:, 2])\n masks = list(memory[:, 3])\n values = critic(torch.Tensor(states))\n\n \n returns, advants = get_gae(rewards, masks, values)\n mu, std, logstd = actor(torch.Tensor(states))\n old_policy = log_density(torch.Tensor(actions), mu, std, logstd)\n old_values = critic(torch.Tensor(states))\n\n criterion = torch.nn.MSELoss()\n n = len(states)\n arr = np.arange(n)\n\n losses = torch.zeros(int(n // batch_size))\n\n \n for epoch in range(10):\n np.random.shuffle(arr)\n\n for i in range(n // batch_size):\n batch_index = arr[batch_size * i: batch_size * (i + 1)]\n batch_index = torch.LongTensor(batch_index)\n inputs = torch.Tensor(states)[batch_index]\n returns_samples = returns.unsqueeze(1)[batch_index]\n advants_samples = advants.unsqueeze(1)[batch_index]\n actions_samples = torch.Tensor(actions)[batch_index]\n oldvalue_samples = old_values[batch_index].detach()\n\n loss, ratio = surrogate_loss(actor, advants_samples, inputs,\n old_policy.detach(), actions_samples,\n batch_index)\n\n values = critic(inputs)\n clipped_values = oldvalue_samples + \\\n torch.clamp(values - oldvalue_samples,\n -clip_param,\n clip_param)\n critic_loss1 = criterion(clipped_values, returns_samples)\n critic_loss2 = criterion(values, returns_samples)\n critic_loss = torch.max(critic_loss1, critic_loss2).mean()\n\n clipped_ratio = torch.clamp(ratio,\n 1.0 - clip_param,\n 1.0 + clip_param)\n clipped_loss = clipped_ratio * advants_samples\n actor_loss = -torch.min(loss, clipped_loss).mean()\n\n loss = actor_loss + 0.5 * critic_loss\n losses[i] = loss\n critic_optim.zero_grad()\n loss.backward(retain_graph=True)\n critic_optim.step()\n\n actor_optim.zero_grad()\n loss.backward()\n actor_optim.step()\n return losses\n\nif __name__==\"__main__\":\n env = QubeBeginUprightEnv() \n env.seed(500)\n torch.manual_seed(500)\n num_inputs = env.observation_space.shape[0]\n num_actions = env.action_space.shape[0]\n\n print('state size:', num_inputs)\n print('action size:', num_actions)\n\n actor = Actor(num_inputs, num_actions).to(device)\n critic = Critic(num_inputs).to(device)\n\n running_state = ZFilter((num_inputs,), clip=5)\n\n actor_optim = optim.Adam(actor.parameters(), lr=actor_lr)\n critic_optim = optim.Adam(critic.parameters(), lr=critic_lr,\n weight_decay=l2_rate)\n\n episodes = 0\n reward_list = []\n losses = []\n plt.ion()\n for iteration in range(1,15000):\n actor.eval(), critic.eval()\n memory = deque()\n\n steps = 0\n scores = []\n episode_score = 0\n while steps < 2000:\n episodes += 1\n state = env.reset()\n state = running_state(state)\n \n start_vect=time.time()\n for _ in range(2000):\n steps += 1\n mu, std, _ = actor(torch.Tensor(state).unsqueeze(0))\n action = get_action(mu, std)[0]\n print(action)\n next_state, reward, done, _ = env.step(action) \n next_state = running_state(next_state)\n\n if done:\n mask = 0\n else:\n mask = 1\n\n memory.append([state, action, reward, mask])\n\n episode_score += reward\n state = next_state\n\n if done:\n break\n print(\"training Runtime: %0.2f seconds\"%(time.time() - start_vect))\n env.step([0.0])\n print('{} episode score is {:.2f}'.format(episodes, episode_score))\n actor.train(), critic.train()\n loss = train_model(actor, critic, memory, actor_optim, critic_optim)\n losses.append(loss.mean().detach().numpy())\n reward_list.append(episode_score)\n plt.figure(1,figsize=(20,5))\n plt.subplot(1,2,1)\n plt.title('iteration %s. reward: %s' % (iteration, reward_list[-1]))\n plt.plot(list(range(1,iteration+1)),reward_list,'b')\n\n plt.subplot(1,2,2)\n plt.title('iteration %s. loss: %s' % (iteration, losses[-1]))\n plt.plot(list(range(1,iteration+1)),losses,'darkorange')\n\n plt.pause(0.001) \n plt.show()\n\n if iteration % 50 == 0:\n plt.savefig('/home/ctrllab/19_urp/code_test/qube_servo/result_img/bal_ppo_img'+str(iteration)+'.png')\n with open('/home/ctrllab/19_urp/code_test/qube_servo/result_data/bal_ppo_result_reward_'+str(iteration)+'.pickle','wb') as savedata:\n pickle.dump([iteration,reward_list],savedata)\n with open('/home/ctrllab/19_urp/code_test/qube_servo/result_data/bal_ppo_result_loss_'+str(iteration)+'.pickle','wb') as savedata:\n pickle.dump([iteration,losses],savedata)\n torch.save(actor.state_dict(),'./ppo_real_'+str(iteration)+'.pth')\n env.close() \n plt.waitforbuttonpress(0)\n plt.close()\n"
] |
[
[
"numpy.sqrt",
"torch.max",
"numpy.asarray",
"torch.cuda.is_available",
"torch.device",
"numpy.square",
"matplotlib.pyplot.pause",
"numpy.clip",
"numpy.arange",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.waitforbuttonpress",
"numpy.zeros",
"matplotlib.pyplot.figure",
"torch.normal",
"torch.LongTensor",
"matplotlib.pyplot.title",
"torch.min",
"torch.zeros_like",
"torch.exp",
"torch.nn.Linear",
"matplotlib.pyplot.ion",
"numpy.array",
"matplotlib.pyplot.show",
"torch.Tensor",
"torch.manual_seed",
"numpy.random.shuffle",
"torch.clamp",
"torch.nn.MSELoss",
"numpy.vstack"
]
] |
dorinapetra/probing
|
[
"41a31f343b6cb8122bc19cba7651f967f48de021"
] |
[
"probing/models/contextual_embedding_classifier.py"
] |
[
"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2019 Judit Acs <[email protected]>\n#\n# Distributed under terms of the MIT license.\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport logging\nfrom transformers import AutoModel, AutoConfig\n\nfrom probing.models.base import BaseModel\nfrom probing.models.mlp import MLP\n\nuse_cuda = torch.cuda.is_available()\n\n\ndef to_cuda(var):\n if use_cuda:\n return var.cuda()\n return var\n\n\nclass Embedder(nn.Module):\n def __init__(self, model_name, layer_pooling,\n randomize_embedding_weights=False,\n train_base_model=False):\n super().__init__()\n if train_base_model:\n logging.info(f\"Loading {model_name}. Model caching is not \"\n \"supported when finetuning.\")\n self.load_base_model(model_name, randomize_embedding_weights)\n else:\n global_key = (f'{model_name}_model', randomize_embedding_weights)\n if global_key not in globals():\n self.load_base_model(model_name, randomize_embedding_weights)\n globals()[global_key] = self.embedder\n self.embedder = globals()[global_key]\n for p in self.embedder.parameters():\n p.requires_grad = False\n self.train_base_model = train_base_model\n self.get_sizes()\n try:\n layer_pooling = int(layer_pooling)\n except ValueError:\n pass\n self.layer_pooling = layer_pooling\n if self.layer_pooling == 'weighted_sum':\n self.weights = nn.Parameter(\n torch.ones(self.n_layer, dtype=torch.float))\n self.softmax = nn.Softmax(0)\n\n def load_base_model(self, model_name, randomize_embedding_weights):\n self.config = AutoConfig.from_pretrained(\n model_name, output_hidden_states=True)\n if randomize_embedding_weights:\n logging.info(f\"Loading {model_name} with random weights.\")\n self.embedder = AutoModel.from_config(self.config)\n else:\n logging.info(f\"Loading {model_name}.\")\n self.embedder = AutoModel.from_pretrained(\n model_name, config=self.config)\n\n def forward(self, sentences, sentence_lens):\n if self.train_base_model:\n self.embedder.train(True)\n mask = torch.arange(sentences.size(1)) < \\\n torch.LongTensor(sentence_lens).unsqueeze(1)\n mask = to_cuda(mask.long())\n out = self.embedder(sentences, attention_mask=mask)[-1]\n else:\n self.embedder.train(False)\n with torch.no_grad():\n mask = torch.arange(sentences.size(1)) < \\\n torch.LongTensor(sentence_lens).unsqueeze(1)\n mask = to_cuda(mask.long())\n out = self.embedder(sentences, attention_mask=mask)[-1]\n if self.layer_pooling == 'weighted_sum':\n w = self.softmax(self.weights)\n return (w[:, None, None, None] * torch.stack(out)).sum(0).detach()\n if self.layer_pooling == 'all':\n return torch.stack(out)\n if self.layer_pooling == 'sum':\n return torch.sum(torch.stack(out), axis=0)\n if self.layer_pooling == 'last':\n return out[-1]\n if self.layer_pooling == 'first':\n return out[0]\n if isinstance(self.layer_pooling, int):\n return out[self.layer_pooling]\n raise ValueError(f\"Unknown pooling mechanism: {self.layer_pooling}\")\n\n def get_sizes(self):\n with torch.no_grad():\n d = self.embedder.dummy_inputs\n if next(self.parameters()).is_cuda:\n for param in d:\n if isinstance(d[param], torch.Tensor):\n d[param] = d[param].cuda()\n out = self.embedder(**d)[-1]\n self.n_layer = len(out)\n self.hidden_size = out[0].size(-1)\n\n def state_dict(self, *args, **kwargs):\n if self.train_base_model:\n return super().state_dict(*args, **kwargs)\n if self.layer_pooling == 'weighted_sum':\n args[0]['{}weights'.format(args[1])] = self.weights\n return args[0]\n\n\nclass SentenceRepresentationProber(BaseModel):\n def __init__(self, config, dataset):\n super().__init__(config)\n self.dataset = dataset\n randweights = self.config.randomize_embedding_weights\n self.embedder = Embedder(self.config.model_name,\n layer_pooling='all',\n randomize_embedding_weights=randweights,\n train_base_model=self.config.train_base_model,\n )\n self.output_size = len(dataset.vocabs.label)\n self.dropout = nn.Dropout(self.config.dropout)\n self.criterion = nn.CrossEntropyLoss()\n\n mlp_input_size = self.embedder.hidden_size\n if self.config.subword_pooling == 'f+l':\n self.subword_w = nn.Parameter(torch.ones(1, dtype=torch.float) / 2)\n elif self.config.subword_pooling == 'lstm':\n sw_lstm_size = getattr(self.config, 'subword_lstm_size',\n self.embedder.hidden_size)\n mlp_input_size = sw_lstm_size\n self.pool_lstm = nn.LSTM(\n self.embedder.hidden_size,\n sw_lstm_size // 2,\n num_layers=1,\n batch_first=True,\n bidirectional=True,\n )\n elif self.config.subword_pooling == 'attn':\n self.subword_mlp = MLP(\n self.embedder.hidden_size,\n layers=[self.config.subword_mlp_size],\n nonlinearity='ReLU',\n output_size=1\n )\n self.softmax = nn.Softmax(dim=0)\n elif self.config.subword_pooling == 'last2':\n mlp_input_size *= 2\n self.mlp = MLP(\n input_size=mlp_input_size,\n layers=self.config.mlp_layers,\n nonlinearity=self.config.mlp_nonlinearity,\n output_size=self.output_size,\n )\n self.pooling_func = {\n 'first': self._forward_first_last,\n 'last': self._forward_first_last,\n 'max': self._forward_elementwise_pool,\n 'sum': self._forward_elementwise_pool,\n 'avg': self._forward_elementwise_pool,\n 'last2': self._forward_last2,\n 'f+l': self._forward_first_plus_last,\n 'lstm': self._forward_lstm,\n 'attn': self._forward_mlp,\n }\n self.layer_pooling = config.layer_pooling\n if self.layer_pooling == 'weighted_sum':\n self.weights = nn.Parameter(\n torch.ones(self.embedder.n_layer, dtype=torch.float))\n self.softmax = nn.Softmax(0)\n self._cache = {}\n\n def check_params(self):\n if self.config.shift_target != 0:\n if self.config.subword_pooling not in ('first', 'last'):\n raise ValueError(\n \"Shift target is only supported for first and \"\n \"last subword pooling.\"\n )\n if self.config.subword_pooling not in ('first', 'last') and \\\n self.config.layer_pooling == 'weighted_sum':\n raise ValueError(\n \"Weighted sum of layers is only supported for first and \"\n \"last subword pooling.\"\n )\n\n def _forward_elementwise_pool(self, embedded, batch):\n subword_pooling = self.config.subword_pooling\n batch_size = embedded.size(0)\n helper = np.arange(batch_size)\n target_idx = np.array(batch.probe_target_idx)\n last = batch.token_starts[helper, target_idx + 1]\n first = batch.token_starts[helper, target_idx]\n target_vecs = []\n for wi in range(batch_size):\n if subword_pooling == 'max':\n o = embedded[wi, first[wi]:last[wi]].max(axis=0).values\n if subword_pooling == 'sum':\n o = embedded[wi, first[wi]:last[wi]].sum(axis=0)\n else:\n o = embedded[wi, first[wi]:last[wi]].mean(axis=0)\n target_vecs.append(o)\n return torch.stack(target_vecs)\n\n def _forward_last2(self, embedded, batch):\n target_vecs = []\n batch_size = embedded.size(0)\n helper = np.arange(batch_size)\n target_idx = np.array(batch.probe_target_idx)\n last = batch.token_starts[helper, target_idx + 1] - 1\n first = batch.token_starts[helper, target_idx]\n for wi in range(batch_size):\n last1 = embedded[wi, last[wi]]\n if first[wi] == last[wi]:\n last2 = to_cuda(torch.zeros_like(last1))\n else:\n last2 = embedded[wi, last[wi]-1]\n target_vecs.append(torch.cat((last1, last2), 0))\n return torch.stack(target_vecs)\n\n def _forward_first_plus_last(self, embedded, batch):\n batch_size = embedded.size(0)\n helper = np.arange(batch_size)\n w = self.subword_w\n target_idx = np.array(batch.probe_target_idx)\n last_idx = batch.token_starts[helper, target_idx + 1] - 1\n first_idx = batch.token_starts[helper, target_idx]\n first = embedded[helper, first_idx]\n last = embedded[helper, last_idx]\n target_vecs = w * first + (1 - w) * last\n return target_vecs\n\n def _forward_lstm(self, embedded, batch):\n batch_size = embedded.size(0)\n helper = np.arange(batch_size)\n target_vecs = []\n\n target_idx = np.array(batch.probe_target_idx)\n last_idx = batch.token_starts[helper, target_idx + 1]\n first_idx = batch.token_starts[helper, target_idx]\n\n for wi in range(batch_size):\n lstm_in = embedded[wi, first_idx[wi]:last_idx[wi]].unsqueeze(0)\n _, (h, c) = self.pool_lstm(lstm_in)\n h = torch.cat((h[0], h[1]), dim=-1)\n target_vecs.append(h[0])\n return torch.stack(target_vecs)\n\n def _forward_mlp(self, embedded, batch):\n batch_size = embedded.size(0)\n helper = np.arange(batch_size)\n\n target_idx = np.array(batch.probe_target_idx)\n last_idx = batch.token_starts[helper, target_idx + 1]\n first_idx = batch.token_starts[helper, target_idx]\n\n target_vecs = []\n for wi in range(batch_size):\n mlp_in = embedded[wi, first_idx[wi]:last_idx[wi]]\n weights = self.subword_mlp(mlp_in)\n sweights = self.softmax(weights).transpose(0, 1)\n target = sweights.mm(mlp_in).squeeze(0)\n target_vecs.append(target)\n return torch.stack(target_vecs)\n\n def forward(self, batch):\n subword_pooling = self.config.subword_pooling\n if subword_pooling in ('first', 'last'):\n target_vecs = self.pooling_func[subword_pooling](batch)\n else:\n # caching not supported\n X = torch.LongTensor(batch.input)\n X = to_cuda(X)\n embedded = self.embedder(X, batch.input_len)\n if self.layer_pooling == 'sum':\n embedded = embedded.sum(0)\n else:\n embedded = embedded[self.layer_pooling]\n target_vecs = self.pooling_func[subword_pooling](embedded, batch)\n mlp_out = self.mlp(target_vecs)\n return mlp_out\n\n def _get_first_last_tensors(self, batch):\n input = torch.LongTensor(batch.input)\n input = to_cuda(input)\n embedded = self.embedder(input, batch.input_len)\n batch_size = embedded.size(1)\n helper = np.arange(batch_size)\n target_idx = np.array(batch.probe_target_idx)\n subword_pooling = self.config.subword_pooling\n if subword_pooling == 'first':\n idx = batch.token_starts[helper, target_idx]\n elif subword_pooling == 'last':\n idx = batch.token_starts[helper, target_idx + 1] - 1\n else:\n raise ValueError(f\"Subword pooling {subword_pooling} \"\n \"with caching is not supported.\")\n if self.config.shift_target:\n shift_max = np.array(batch.input_len) - 1\n idx += self.config.shift_target\n idx = np.minimum(idx, shift_max)\n idx = np.clip(idx, 0, shift_max.max())\n\n target_vecs = embedded[:, helper, idx]\n return target_vecs\n\n def _get_layer_pooled(self, target_vecs):\n if self.layer_pooling == 'weighted_sum':\n return target_vecs\n elif self.layer_pooling == 'sum':\n return target_vecs.sum(0)\n else:\n return target_vecs[self.layer_pooling]\n\n def _forward_first_last(self, batch):\n\n if self.config.train_base_model:\n target_vecs = self._get_first_last_tensors(batch)\n target_vecs = self._get_layer_pooled(target_vecs)\n else:\n cache_target_idx = np.array(batch.probe_target_idx)\n cache_key = (\n tuple(np.array(batch.input).flat),\n tuple(cache_target_idx.flat))\n if cache_key not in self._cache:\n target_vecs = self._get_first_last_tensors(batch)\n target_vecs = self._get_layer_pooled(target_vecs)\n self._cache[cache_key] = target_vecs\n target_vecs = self._cache[cache_key]\n\n if self.layer_pooling == 'weighted_sum':\n w = self.softmax(self.weights)\n target_vecs = (w[:, None, None] * target_vecs).sum(0)\n return target_vecs\n\n def compute_loss(self, target, output):\n target = to_cuda(torch.LongTensor(target.label)).view(-1)\n loss = self.criterion(output, target)\n return loss\n\n\nclass TransformerForSequenceTagging(BaseModel):\n def __init__(self, config, dataset):\n super().__init__(config)\n self.dataset = dataset\n randweights = self.config.randomize_embedding_weights\n self.embedder = Embedder(\n self.config.model_name,\n layer_pooling=self.config.layer_pooling,\n randomize_embedding_weights=randweights,\n train_base_model=config.train_base_model)\n self.output_size = len(dataset.vocabs.labels)\n self.dropout = nn.Dropout(self.config.dropout)\n mlp_input_size = self.embedder.hidden_size\n if self.config.subword_pooling == 'lstm':\n sw_lstm_size = self.config.subword_lstm_size\n mlp_input_size = sw_lstm_size\n self.subword_lstm = nn.LSTM(\n self.embedder.hidden_size,\n sw_lstm_size // 2,\n num_layers=1,\n batch_first=True,\n bidirectional=True,\n )\n elif self.config.subword_pooling == 'attn':\n self.subword_mlp = MLP(\n input_size=self.embedder.hidden_size,\n layers=[self.config.subword_mlp_size],\n nonlinearity='ReLU',\n output_size=1\n )\n self.softmax = nn.Softmax(dim=0)\n elif self.config.subword_pooling == 'last2':\n mlp_input_size *= 2\n self.mlp = MLP(\n input_size=mlp_input_size,\n layers=self.config.mlp_layers,\n nonlinearity=self.config.mlp_nonlinearity,\n output_size=self.output_size,\n )\n if self.config.subword_pooling == 'f+l':\n self.subword_w = nn.Parameter(torch.ones(1, dtype=torch.float) / 2)\n self._cache = {}\n self.criterion = nn.CrossEntropyLoss()\n self.pooling_func = {\n 'first': self._forward_with_cache,\n 'last': self._forward_with_cache,\n 'max': self._forward_with_cache,\n 'sum': self._forward_with_cache,\n 'avg': self._forward_with_cache,\n 'last2': self._forward_last2,\n 'f+l': self._forward_first_plus_last,\n 'lstm': self._forward_lstm,\n 'attn': self._forward_mlp,\n }\n\n def forward(self, batch):\n subword_pooling = self.config.subword_pooling\n out = self.pooling_func[subword_pooling](batch)\n out = self.dropout(out)\n pred = self.mlp(out)\n return pred\n\n def _forward_lstm(self, batch):\n X = torch.LongTensor(batch.input)\n X = to_cuda(X)\n embedded = self.embedder(X, batch.sentence_subword_len)\n batch_size, seqlen, hidden_size = embedded.size()\n token_lens = batch.token_starts[:, 1:] - batch.token_starts[:, :-1]\n token_maxlen = token_lens.max()\n pad = to_cuda(torch.zeros((1, hidden_size)))\n all_token_vectors = []\n all_token_lens = []\n for bi in range(batch_size):\n for ti in range(batch.sentence_len[bi]):\n first = batch.token_starts[bi][ti+1]\n last = batch.token_starts[bi][ti+2]\n tok_vecs = embedded[bi, first:last]\n this_size = tok_vecs.size(0)\n if this_size < token_maxlen:\n this_pad = pad.repeat((token_maxlen - this_size, 1))\n tok_vecs = torch.cat((tok_vecs, this_pad))\n all_token_vectors.append(tok_vecs)\n all_token_lens.append(this_size)\n lstm_in = torch.stack(all_token_vectors)\n seq = torch.nn.utils.rnn.pack_padded_sequence(\n lstm_in, all_token_lens, enforce_sorted=False, batch_first=True)\n _, (h, c) = self.subword_lstm(seq)\n h = torch.cat((h[0], h[1]), dim=-1)\n return h\n\n def _forward_mlp(self, batch):\n X = torch.LongTensor(batch.input)\n X = to_cuda(X)\n embedded = self.embedder(X, batch.sentence_subword_len)\n batch_size, seqlen, hidden = embedded.size()\n mlp_weights = self.subword_mlp(embedded).view(batch_size, seqlen)\n outputs = []\n for bi in range(batch_size):\n for ti in range(batch.sentence_len[bi]):\n first = batch.token_starts[bi][ti+1]\n last = batch.token_starts[bi][ti+2]\n if last - 1 == first:\n outputs.append(embedded[bi, first])\n else:\n weights = mlp_weights[bi][first:last]\n weights = self.softmax(weights).unsqueeze(1)\n v = weights * embedded[bi, first:last]\n v = v.sum(axis=0)\n outputs.append(v)\n return torch.stack(outputs)\n\n def _forward_first_plus_last(self, batch):\n X = torch.LongTensor(batch.input)\n X = to_cuda(X)\n embedded = self.embedder(X, batch.sentence_subword_len)\n batch_size, seqlen, hidden = embedded.size()\n w = self.subword_w\n outputs = []\n for bi in range(batch_size):\n for ti in range(batch.sentence_len[bi]):\n first = batch.token_starts[bi][ti+1]\n last = batch.token_starts[bi][ti+2] - 1\n f = embedded[bi, first]\n la = embedded[bi, last]\n outputs.append(w * f + (1-w) * la)\n return torch.stack(outputs)\n\n def _forward_last2(self, batch):\n X = torch.LongTensor(batch.input)\n X = to_cuda(X)\n embedded = self.embedder(X, batch.sentence_subword_len)\n batch_size, seqlen, hidden_size = embedded.size()\n outputs = []\n pad = to_cuda(torch.zeros(hidden_size))\n for bi in range(batch_size):\n for ti in range(batch.sentence_len[bi]):\n first = batch.token_starts[bi][ti+1]\n last = batch.token_starts[bi][ti+2] - 1\n if first == last:\n vec = torch.cat((embedded[bi, last], pad), 0)\n else:\n vec = torch.cat(\n (embedded[bi, last], embedded[bi, last-1]), 0)\n outputs.append(vec)\n return torch.stack(outputs)\n\n def _get_first_last_tensors(self, batch):\n subword_pooling = self.config.subword_pooling\n X = torch.LongTensor(batch.input)\n batch_size = X.size(0)\n batch_ids = []\n token_ids = []\n X = to_cuda(X)\n embedded = self.embedder(X, batch.sentence_subword_len)\n for bi in range(batch_size):\n sentence_len = batch.sentence_len[bi]\n batch_ids.append(np.repeat(bi, sentence_len))\n if subword_pooling == 'first':\n token_ids.append(batch.token_starts[bi][1:sentence_len + 1])\n elif subword_pooling == 'last':\n token_ids.append(\n np.array(batch.token_starts[bi][2:sentence_len + 2]) - 1)\n batch_ids = np.concatenate(batch_ids)\n token_ids = np.concatenate(token_ids)\n return embedded[batch_ids, token_ids]\n\n def _get_elementwise_pooled(self, batch):\n subword_pooling = self.config.subword_pooling\n X = torch.LongTensor(batch.input)\n batch_size = X.size(0)\n batch_ids = []\n token_ids = []\n X = to_cuda(X)\n embedded = self.embedder(X, batch.sentence_subword_len)\n outs = []\n for bi in range(batch_size):\n for ti in range(batch.sentence_len[bi]):\n first = batch.token_starts[bi][ti+1]\n last = batch.token_starts[bi][ti+2]\n if subword_pooling == 'sum':\n vec = embedded[bi, first:last].sum(axis=0)\n elif subword_pooling == 'avg':\n vec = embedded[bi, first:last].mean(axis=0)\n elif subword_pooling == 'max':\n vec = embedded[bi, first:last].max(axis=0).values\n outs.append(vec)\n return torch.stack(outs)\n\n def _forward_with_cache(self, batch):\n\n subword_pooling = self.config.subword_pooling\n\n if self.config.train_base_model:\n if subword_pooling in ('first', 'last'):\n out = self._get_first_last_tensors(batch)\n elif subword_pooling in ('max', 'sum', 'avg'):\n out = self._get_elementwise_pooled(batch)\n else:\n cache_key = tuple(np.array(batch.input).flat)\n if cache_key not in self._cache:\n if subword_pooling in ('first', 'last'):\n out = self._get_first_last_tensors(batch)\n elif subword_pooling in ('max', 'sum', 'avg'):\n out = self._get_elementwise_pooled(batch)\n self._cache[cache_key] = out\n out = self._cache[cache_key]\n return out\n\n def compute_loss(self, target, output):\n target = to_cuda(torch.LongTensor(target.labels)).view(-1)\n loss = self.criterion(output, target)\n return loss\n\n\nclass Word2vecEmbeddingClassifier(BaseModel):\n\n def __init__(self, config, dataset):\n super().__init__(config)\n self.dataset = dataset\n self.output_size = len(dataset.vocabs.label)\n self.dropout = nn.Dropout(self.config.dropout)\n self.mlp = MLP(\n input_size=self.dataset.embedding_size,\n layers=self.config.mlp_layers,\n nonlinearity=self.config.mlp_nonlinearity,\n output_size=self.output_size,\n )\n self.criterion = nn.CrossEntropyLoss()\n\n def forward(self, batch):\n mlp_in = to_cuda(torch.FloatTensor(batch.input))\n mlp_in = self.dropout(mlp_in)\n return self.mlp(mlp_in)\n\n def compute_loss(self, target, output):\n target = to_cuda(torch.LongTensor(target.label)).view(-1)\n loss = self.criterion(output, target)\n return loss\n"
] |
[
[
"torch.nn.Softmax",
"numpy.minimum",
"torch.cat",
"torch.zeros",
"numpy.concatenate",
"torch.no_grad",
"torch.FloatTensor",
"torch.cuda.is_available",
"torch.nn.CrossEntropyLoss",
"torch.nn.Dropout",
"torch.ones",
"numpy.arange",
"torch.nn.utils.rnn.pack_padded_sequence",
"numpy.repeat",
"torch.LongTensor",
"torch.zeros_like",
"torch.stack",
"numpy.array",
"torch.nn.LSTM"
]
] |
asonnino/fastpay
|
[
"d5e07a7ff1bd25174071f454872927bf58865fff"
] |
[
"scripts/latency_with_crash.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\nimport re\nimport sys\nimport os.path\nimport os, fnmatch\n\n\ndef parse(row_log_file, parsed_log_file):\n\tfname = os.path.abspath(row_log_file)\n\tdata = open(fname).read()\n\n\tlatency = ''.join(re.findall(r'Received certificate after [0-9]* us', data))\n\tlatency = re.findall(r'\\d+',latency)\n\tlatency = [int(v)/1000 for v in latency]\n\n\tprint(row_log_file)\n\tprint('%d ms (average), %d ms (std)' % (np.mean(latency), np.std(latency)))\n\tprint('\\n')\n\ndef find(pattern, path):\n result = []\n for root, dirs, files in os.walk(path):\n for name in files:\n if fnmatch.fnmatch(name, pattern):\n result.append(os.path.join(root, name))\n return result\n\n'''\nExperiment stes:\n\t1. Run a testnet with 10 authorities:\n\t\tfab set_hosts reset deploy\n\n\t2. Submit transactions:\n\t\tfab set_hosts quick_transfer\n\n\t3. Kill one node, and go at step 2; then repeat.\n'''\nif __name__== '__main__':\n\traw_logs = find('raw_log_latency_with_crash-*.txt', '.')\n\tparsed_logs = ['parsed_%s' % os.path.basename(raw_log) for raw_log in raw_logs]\n\t[parse(raw_log, parsed_log) for (raw_log, parsed_log) in zip(raw_logs, parsed_logs)]\n"
] |
[
[
"numpy.std",
"numpy.mean"
]
] |
matewszz/Python
|
[
"18b7fc96d3ed294d2002ed484941a0ee8cf18108"
] |
[
"OpenCV/Histogramas/h3.py"
] |
[
"import cv2 as cv\nimport numpy as np\n\nimg = cv.imread(cv.samples.findFile(\"everest.jpg\"))\nbgr_planes = cv.split(img)\nhistSize = 256\nhistRange = (0, 256)\naccumulate = False\nb_hist = cv.calcHist(bgr_planes, [0], None, [histSize], histRange, accumulate=accumulate)\ng_hist = cv.calcHist(bgr_planes, [1], None, [histSize], histRange, accumulate=accumulate)\nr_hist = cv.calcHist(bgr_planes, [2], None, [histSize], histRange, accumulate=accumulate)\n\nhist_w = 512\nhist_h = 400\nbin_w = int(round(hist_w/histSize))\nhistImage = np.zeros((hist_h, hist_w, 3), dtype=np.uint8)\n\ncv.normalize(b_hist, b_hist, alpha=0, beta=hist_h, norm_type=cv.NORM_MINMAX)\ncv.normalize(g_hist, g_hist, alpha=0, beta=hist_h, norm_type=cv.NORM_MINMAX)\ncv.normalize(r_hist, r_hist, alpha=0, beta=hist_h, norm_type=cv.NORM_MINMAX)\nfor i in range(1, histSize):\n\tcv.line(histImage, (bin_w * (i - 1), hist_h - int(np.round(b_hist[i - 1]))),\n\t\t\t(bin_w * (i), hist_h - int(np.round(b_hist[i]))),\n\t\t\t(255, 0, 0), thickness=1)\n\tcv.line(histImage, (bin_w * (i - 1), hist_h - int(np.round(g_hist[i - 1]))),\n\t\t\t(bin_w * (i), hist_h - int(np.round(g_hist[i]))),\n\t\t\t(0, 255, 0), thickness=1)\n\tcv.line(histImage, (bin_w * (i - 1), hist_h - int(np.round(r_hist[i - 1]))),\n\t\t\t(bin_w * (i), hist_h - int(np.round(r_hist[i]))),\n\t\t\t(0, 0, 255), thickness=1)\n\ncv.imshow('Source image', img)\ncv.imshow('calcHist Demo', histImage)\ncv.waitKey()"
] |
[
[
"numpy.round",
"numpy.zeros"
]
] |
swenkel/mish-cuda-dummy
|
[
"4b621fb90ff75e77adc185ddba74687f5dcd4794"
] |
[
"mish_cuda/__init__.py"
] |
[
"import torch\nimport torch.nn.functional as F\n\n\nclass MishCuda(torch.nn.Module):\n def __init__(self):\n super().__init__()\n \n def foward(self,x):\n return x * torch.tanh(F.softplus(x))"
] |
[
[
"torch.nn.functional.softplus"
]
] |
nocdoggo/Random-Data-Visualization
|
[
"6b67419e50411c65d1e70ce2c6d0b7c1cf8a13d4"
] |
[
"coddeC/data_analyze.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# ___\n#\n# <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>\n# ___\n# # Logistic Regression with Python\n#\n# For this lecture we will be working with the [Titanic Data Set from Kaggle](https://www.kaggle.com/c/titanic). This is a very famous data set and very often is a student's first step in machine learning!\n#\n# We'll be trying to predict a classification- survival or deceased.\n# Let's begin our understanding of implementing Logistic Regression in Python for classification.\n#\n# We'll use a \"semi-cleaned\" version of the titanic data set, if you use the data set hosted directly on Kaggle, you may need to do some additional cleaning not shown in this lecture notebook.\n#\n# ## Import Libraries\n# Let's import some libraries to get started!\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport re\nimport os\nimport multiprocessing\nimport sys\n\n#validates the date\ndef validate(date_text):\n try:\n datetime.datetime.strptime(date_text, '%Y-%m-%d')\n return True\n except ValueError:\n return False\n\n# output: pandas dataframe\ndef readfile_noh(filename):\n #performing task 1\n trash_offset = 25\n trash_index = 0\n train = pd.read_csv(filename, skiprows= range(0,7), dtype = str)\n train = train.loc[:, ~train.columns.str.contains('^Unnamed')]\n nrows = train.shape[0]\n #print(nrows)\n for x in range(nrows-trash_offset,nrows):\n if type(train.loc[x]['TMAX']) != str:\n trash_index = x\n break\n train.drop(range(trash_index,nrows), inplace = True)\n\n # performing task 2\n # check if the date data is in the right form\n date_pattern = re.compile(r'\\d\\d\\d\\d-\\d\\d-\\d\\d')\n searchObj = re.search(date_pattern, train['Date'][0])\n if not searchObj:\n nrows = train.shape[0]\n for x in range(0,nrows):\n train.at[x,'Date'] = datetime.datetime.strptime(train.at[x,'Date'], \"%m/%d/%Y\").strftime(\"%Y-%m-%d\")\n\n\n return train\n\n#train_1958\n\n# this function reads a csv file and process it by\n# 1. removing the trash\n# 2. get date into the same format\n# 3. get time into the same format\n# 4. fix the wind speed (change into string)\n# input: filename str ---eg.'2011-2018ord.csv'\n# output: pandas dataframe\ndef readfile_ord(filename):\n\n # performing task 1\n trash_offset = 25\n trash_index = 0\n train = pd.read_csv(filename, skiprows= range(0,8), dtype = {'Temp ('+'F)':str, 'Dewpt ('+'F)':str, 'Wind Spd ('+'mph)':str, 'Wind Direction ('+'deg)':str, 'Peak Wind Gust('+'mph)':str, 'Atm Press ('+'hPa)':str, 'Sea Lev Press ('+'hPa)':str, 'Precip ('+'in)':str} )\n train = train.loc[:, ~train.columns.str.contains('^Unnamed')]\n nrows = train.shape[0]\n #print(nrows)\n for x in range(nrows-trash_offset,nrows):\n if type(train.loc[x]['Time']) != str:\n trash_index = x\n break\n train.drop(range(trash_index,nrows), inplace = True)\n\n # performing task 2\n # check if the date data is in the right form\n date_pattern = re.compile(r'\\d\\d\\d\\d-\\d\\d-\\d\\d')\n searchObj = re.search(date_pattern, train['Date'][0])\n if not searchObj:\n nrows = train.shape[0]\n for x in range(0,nrows):\n train.at[x,'Date'] = datetime.datetime.strptime(train.at[x,'Date'], \"%m/%d/%Y\").strftime(\"%Y-%m-%d\")\n\n # performing task 3\n # check if time data is in the right form\n time_pattern = re.compile(r'^\\d:\\d\\d')\n searchObj = re.search(time_pattern, train['Time'][0])\n if searchObj:\n nrows = train.shape[0]\n for x in range(0,nrows):\n # task 3\n searchObj = re.search(time_pattern, train['Time'][x])\n if searchObj:\n train.at[x,'Time'] = '0' + train.at[x,'Time']\n\n # performing task 4\n train = train.astype({train.columns[4]:'str'})\n\n return train\n\n\n# In[ ]:\n\n\n# this function takes in a date and calculate the mean min max for the features\n# input: date -- string in the form of 'yyyy-mm-dd' eg:'1958-11-01'\n# train -- the main datafram to analyze\n# output-- list containing:\n# mean_result -- datafram for mean of this feature\n# min_result -- datafram of min of this feature\n# max_result -- datafram of max of this feature\n# invalid_feature\ndef analyze_by_day(date, train):\n #initialize\n mean_result = float('nan')\n min_result = float('nan')\n max_result = float('nan')\n invalid_feature = 0\n #readin feature data\n\n train_found = train[train['Date'] == date]\n\n #print(train_found)\n\n #train_found.shape[0]\n # calculate how many 'm' there are for each feature out of 24 days\n m_count = 0\n for x in range(0, train_found.shape[0]):\n # count the number of 'm'\n if train_found.iloc[x,2].lower() == 'm':\n m_count += 1\n # if there are total of 6 or more 'm' make this feature invalid\n if m_count >= 6:\n invalid_feature = 1\n\n #print(invalid_feature)\n if invalid_feature != 1:\n # now we caculate the info from this legit feature\n df2 = train_found.drop(columns =['Date','Time'])\n df1 = df2.apply(pd.to_numeric, errors='coerce')\n df1.fillna(value=df1.mean(), inplace = True)\n\n mean_result = df1.mean()[0]\n min_result = df1.min()[0]\n max_result = df1.max()[0]\n\n\n\n return mean_result,min_result,max_result,invalid_feature\n\n# this function takes in a date and calculate the mean min max for the features\n# input: date -- string in the form of 'yyyy-mm-dd' eg:'1958-11-01'\n# train -- the main datafram to analyze\n# output-- list containing:\n# sum_result -- datafram for sum of each day\n# invalid_feature\ndef analyze_by_day_precip(date, train):\n #initialize\n sum_result = float('nan')\n min_result = float('nan')\n max_result = float('nan')\n invalid_feature = 0\n #readin feature data\n\n train_found = train[train['Date'] == date]\n\n #print(train_found)\n\n #train_found.shape[0]\n # calculate how many 'm' there are for each feature out of 24 days\n m_count = 0\n for x in range(0, train_found.shape[0]):\n # count the number of 'm'\n if train_found.iloc[x,2].lower() == 'm':\n m_count += 1\n # if there are total of 6 or more 'm' make this feature invalid\n if m_count >= 6:\n invalid_feature = 1\n\n #print(invalid_feature)\n if invalid_feature != 1:\n # now we caculate the info from this legit feature\n df2 = train_found.drop(columns =['Date','Time'])\n df1 = df2.apply(pd.to_numeric, errors='coerce')\n df1.fillna(value=0, inplace = True)\n\n sum_result = df1.sum()[0]\n\n return sum_result,min_result,max_result,invalid_feature\n\n\ndef analyze_by_feature_1(feature):\n mean_temp = []\n min_temp =[]\n max_temp = []\n invalid_temp = []\n train_feature = pd.read_csv(feature+'.csv', dtype = str)\n\n\n train_index = pd.unique(train_feature['Date'])\n train_index = list(train_index)\n #print(train_index)\n for i in range(len(train_index)):\n #print(feature)\n #print(train_index[i])\n temp = analyze_by_day(train_index[i], train_feature)\n\n mean_temp.append(temp[0])\n min_temp.append(temp[1])\n max_temp.append(temp[2])\n invalid_temp.append(temp[3])\n\n '''\n print('this is the ')\n print(i)\n print('\\n')\n print(mean_temp)\n print('\\n')\n print(min_temp)\n print('\\n')\n print(max_temp)\n print('\\n')\n print(invalid_temp)\n '''\n # group them together\n\n\n mean_df = pd.DataFrame(mean_temp)\n min_df = pd.DataFrame(min_temp)\n max_df = pd.DataFrame(max_temp)\n invalid_df = pd.DataFrame(invalid_temp)\n\n\n # calculate mean and other stuff\n mean_df.fillna(value=mean_df.mean(), inplace = True)\n min_df.fillna(value=min_df.mean(), inplace = True)\n max_df.fillna(value=mean_df.mean(), inplace = True)\n\n mean_final = mean_df.mean()\n min_final = min_df.min()\n max_final = max_df.max()\n invalid_final = invalid_df.sum()\n\n\n\n\n return mean_final[0],min_final[0],max_final[0],invalid_final[0]\n\n\"\"\"\ntakes the feature, read the data from feature.csv, for each day, calculate the daily mean/max/min and invalid days\n\n\"\"\"\ndef analyze_by_feature_2(feature):\n\n print('im here')\n sum_temp = []\n mean_temp = []\n min_temp =[]\n max_temp = []\n invalid_temp = []\n train_feature = pd.read_csv(feature+'.csv', dtype = str)\n\n\n train_index = pd.unique(train_feature['Date'])\n train_index = list(train_index)\n #print(train_index)\n if feature == 'precip':\n\n #if current feature is precip, calculate the sum for each day and invalid\n for i in range(len(train_index)):\n #print(feature)\n #print(train_index[i])\n temp = analyze_by_day_precip(train_index[i], train_feature)\n\n sum_temp.append(temp[0])\n min_temp.append(temp[1])\n max_temp.append(temp[2])\n invalid_temp.append(temp[3])\n\n #group these days together\n train_df = pd.DataFrame(train_index)\n sum_df = pd.DataFrame(sum_temp)\n min_df = pd.DataFrame(min_temp)\n max_df = pd.DataFrame(max_temp)\n invalid_df = pd.DataFrame(invalid_temp)\n\n #write the feature out into another file\n temp_write = pd.concat([train_df,max_df,min_df,sum_df],axis = 1)\n temp_write.columns = ['Date','max','min','sum']\n temp_write.to_csv(feature+'_test.csv',index = False, na_rep = float('nan'))\n return\n\n else:\n # for other features, do the same except get max/min/mean\n for i in range(len(train_index)):\n #print(feature)\n #print(train_index[i])\n temp = analyze_by_day(train_index[i], train_feature)\n\n mean_temp.append(temp[0])\n min_temp.append(temp[1])\n max_temp.append(temp[2])\n invalid_temp.append(temp[3])\n\n '''\n print('this is the ')\n print(i)\n print('\\n')\n print(mean_temp)\n print('\\n')\n print(min_temp)\n print('\\n')\n print(max_temp)\n print('\\n')\n print(invalid_temp)\n '''\n # group them together\n train_df = pd.DataFrame(train_index)\n mean_df = pd.DataFrame(mean_temp)\n min_df = pd.DataFrame(min_temp)\n max_df = pd.DataFrame(max_temp)\n invalid_df = pd.DataFrame(invalid_temp)\n\n\n #write the feature out into another file\n temp_write = pd.concat([train_df,max_df,min_df,mean_df],axis = 1)\n temp_write.columns = ['Date','max','min','mean']\n temp_write.to_csv(feature+'_test.csv',index = False, na_rep = float('nan'))\n return\n\ndef main():\n multiprocessing.freeze_support()\n flag = input('Please input what you want to do( 1 for outputting a range and 2 for outputting daily result)')\n path = input('Please type in the path of your data folder:')\n # read all the csv files\n file_selection = ''\n while 1==1:\n file_selection = input('Please input the location of data you want to select '+ '(ugn, ord, or noh'+'):')\n\n if file_selection == 'ugn' or file_selection == 'ord' or file_selection == 'noh':\n break\n listOfFiles = os.listdir(path)\n listOfFiles.sort()\n file_pattern_ord = re.compile(r'\\d\\d\\d\\dord.csv')\n file_pattern_ugn = re.compile(r'\\d\\d\\d\\dugn.csv')\n file_pattern_noh = re.compile(r'\\d\\d\\d\\ddugn.csv')\n if file_selection == 'ugn':\n file_pattern = file_pattern_ugn\n elif file_selection == 'ord':\n file_pattern = file_pattern_ord\n else:\n file_pattern = file_pattern_noh\n train_temp = pd.DataFrame()\n for x in range(0,len(listOfFiles)):\n searchObj = re.search(file_pattern, listOfFiles[x])\n if searchObj:\n print (listOfFiles[x] )\n if file_selection == 'ugn' or file_selection == 'ord':\n train_temp = pd.concat([train_temp,readfile_ord(path+'/'+listOfFiles[x])], axis = 0, ignore_index=True)\n else:\n train_temp = pd.concat([train_temp,readfile_noh(path+'/'+listOfFiles[x])], axis = 0, ignore_index=True)\n if train_temp.empty:\n print('Cannot find any file please check your file name again.')\n return\n #print(train_temp)\n # check ord time span\n while file_selection == 'ord':\n first_date = input(\"From 1958-11-01 to 2018-12-30, please input a valid starting date as in yyyy-mm-dd: \")\n while validate(first_date) == False:\n first_date = input(\"Wrong input! From 1958-11-01 to 2018-12-30, please input a valid starting date as in yyyy-mm-dd: \")\n\n d1 = datetime.datetime.strptime(first_date, \"%Y-%m-%d\").date()\n if d1 >=datetime.date(1958,11,1) and d1 <=datetime.date(2018,12,30):\n break\n\n while file_selection == 'ord':\n second_date = input(\"From 1958-11-02 to 2018-12-31, please input the ending date as in yyyy-mm-dd: \")\n while validate(second_date) == False:\n second_date = input(\"Wrong input! From 1958-11-02 to 2018-12-31, please input the ending date as in yyyy-mm-dd: \")\n d2 = datetime.datetime.strptime(second_date, \"%Y-%m-%d\").date()\n if d2 >=datetime.date(1958,11,2) and d2 <=datetime.date(2018,12,31):\n break\n\n # check ugn time span\n while file_selection == 'ugn':\n first_date = input(\"From 1989-04-21 to 2018-12-30, please input a valid starting date as in yyyy-mm-dd: \")\n while validate(first_date) == False:\n first_date = input(\"Wrong input! From 1989-04-21 to 2018-12-30, please input a valid starting date as in yyyy-mm-dd: \")\n d1 = datetime.datetime.strptime(first_date, \"%Y-%m-%d\").date()\n if d1 >=datetime.date(1989,4,21) and d1 <=datetime.date(2018,12,30):\n break\n\n while file_selection == 'ugn':\n second_date = input(\"From 1989-04-22 to 2018-12-31, please input the ending date as in yyyy-mm-dd: \")\n while validate(second_date) == False:\n second_date = input(\"Wrong input! From 1989-04-22 to 2018-12-31, please input the ending date as in yyyy-mm-dd: \")\n d2 = datetime.datetime.strptime(second_date, \"%Y-%m-%d\").date()\n if d2 >=datetime.date(1989,4,22) and d2 <=datetime.date(2018,12,31):\n break\n\n # check noh time span\n while file_selection == 'noh':\n first_date = input(\"From 1923-01-01 to 2002-07-30, please input a valid starting date as in yyyy-mm-dd: \")\n while validate(first_date) == False:\n first_date = input(\"Wrrong input! From 1923-01-01 to 2002-07-30, please input a valid starting date as in yyyy-mm-dd: \")\n d1 = datetime.datetime.strptime(first_date, \"%Y-%m-%d\").date()\n if d1 >=datetime.date(1923,1,1) and d1 <=datetime.date(2002,7,30):\n break\n\n while file_selection == 'noh':\n second_date = input(\"From 1923-01-02 to 2002-07-31, please input the ending date as in yyyy-mm-dd: \")\n while validate(second_date) == False:\n second_date = input(\"Wrong input! From 1923-01-02 to 2002-07-31, please input the ending date as in yyyy-mm-dd: \")\n d2 = datetime.datetime.strptime(second_date, \"%Y-%m-%d\").date()\n if d2 >=datetime.date(1923,1,2) and d2 <=datetime.date(2002,7,31):\n break\n\n delta = d2-d1\n\n while delta.days <= 0:\n print('Your starting date is later than your ending date, try again please')\n first_date = input(\"Please input a valid starting date as in yyyy-mm-dd: \")\n d1 = datetime.datetime.strptime(first_date, \"%Y-%m-%d\").date()\n second_date = input(\"Please input a valid ending date as in yyyy-mm-dd: \")\n d2 = datetime.datetime.strptime(second_date, \"%Y-%m-%d\").date()\n delta = d2-d1\n\n if delta.days >0:\n\n first_index_list = train_temp.index[train_temp['Date'] == first_date].tolist()\n second_index_list = train_temp.index[train_temp['Date'] == second_date].tolist()\n\n while(len(first_index_list) == 0):\n d1 = d1 + datetime.timedelta(days=1)\n first_date = d1.strftime('%Y-%m-%d')\n #print(second_date)\n first_index_list = train_temp.index[train_temp['Date'] == first_date].tolist()\n first_index = first_index_list[0]\n\n while(len(second_index_list) == 0):\n d2 = d2 - datetime.timedelta(days=1)\n second_date = d2.strftime('%Y-%m-%d')\n #print(second_date)\n second_index_list = train_temp.index[train_temp['Date'] == second_date].tolist()\n second_index = second_index_list[-1]\n\n if d1>d2:\n print('there is no data in your time span')\n sys.exit()\n #print(second_index)\n\n else:\n train_temp = train_temp.iloc[first_index:second_index+1]\n\n\n #print(train_temp)\n # if the file is noh, simply loop through and replace T with 0.001 and M with nan\n if file_selection == 'noh':\n train_temp.replace({'t':'0.001','T':'0.001','m':'nan','M':'nan' },inplace = True)\n train_temp.rename(columns = {'PRCP':'precip','TMAX':'Tmax','TMIN':'Tmin','MEAN':'Tmean'},inplace = True)\n train_temp = train_temp.astype({'precip':'float','Tmax':'float','Tmin':'float','Tmean':'float'})\n\n\n else:\n #split the data into 8 different files\n file_col = ['temp','dewpt','windS','windD','peak','atm','sea','precip']\n train_1 = train_temp.iloc[:,[0,1,2]]\n train_1.to_csv(file_col[0]+'.csv',encoding = 'utf-8',index = False)\n train_2 = train_temp.iloc[:,[0,1,3]]\n train_2.to_csv(file_col[1]+'.csv',encoding = 'utf-8',index = False)\n train_3 = train_temp.iloc[:,[0,1,4]]\n train_3.to_csv(file_col[2]+'.csv',encoding = 'utf-8',index = False)\n train_4 = train_temp.iloc[:,[0,1,5]]\n train_4.to_csv(file_col[3]+'.csv',encoding = 'utf-8',index = False)\n train_5 = train_temp.iloc[:,[0,1,6]]\n train_5.to_csv(file_col[4]+'.csv',encoding = 'utf-8',index = False)\n train_6 = train_temp.iloc[:,[0,1,7]]\n train_6.to_csv(file_col[5]+'.csv',encoding = 'utf-8',index = False)\n train_7 = train_temp.iloc[:,[0,1,8]]\n train_7.to_csv(file_col[6]+'.csv',encoding = 'utf-8',index = False)\n train_8 = train_temp.iloc[:,[0,1,9]]\n train_8.to_csv(file_col[7]+'.csv',encoding = 'utf-8',index = False)\n\n #parallel process each feature\n pool = multiprocessing.Pool(2)\n\n #write out output\n if flag == '1':\n if file_selection == 'ord' or file_selection == 'ugn':\n\n result = pool.map(analyze_by_feature_1, file_col)\n result_index = train_temp.columns[2:11]\n final_result = pd.DataFrame(result, index =result_index, columns = ['mean','min','max','No. of invalid'],dtype=float)\n #final_result = pd.DataFrame(result, columns = ['mean','min','max','No. of invalid'],dtype=float)\n final_result.to_csv( first_date+'-'+second_date+file_selection+'.csv',encoding='utf-8',na_rep = float('nan'))\n\n if file_selection == 'noh':\n train_cal = train_temp[['precip','Tmax','Tmin','Tmean']]\n result_mean = train_cal.mean()\n result_max = train_cal.max()\n result_min = train_cal.min()\n result_inv = train_cal.isnull().sum()\n final_result = pd.DataFrame(list(zip(result_mean,result_max,result_min,result_inv)),columns =['mean', 'max', 'min', 'no.invalid'] , index = ['precip','Tmax','Tmin','Tmean'])\n final_result.to_csv(first_date+'-'+second_date+file_selection+'.csv',encoding='utf-8')\n\n if flag =='2':\n if file_selection == 'ord' or file_selection == 'ugn':\n result = pool.map(analyze_by_feature_2, file_col)\n #get the daily result into a single file\n daily_output = pd.DataFrame()\n for i in range(0,8):\n feature_daily = pd.read_csv(file_col[i]+'_test.csv')\n temp_result = feature_daily.iloc[:,3]\n if i==0:\n daily_output = feature_daily\n else:\n daily_output.insert(i+3,file_col[i],temp_result)\n daily_output.to_csv( first_date+'-'+second_date+file_selection+'.csv',encoding='utf-8',na_rep = float('nan'))\n\n for i in range(0,8):\n os.remove(file_col[i]+\"_test.csv\")\n\n if file_selection == 'noh':\n train_temp.to_csv( first_date+'-'+second_date+file_selection+'.csv',encoding='utf-8',na_rep = float('nan'),index = False)\n\n if file_selection == 'ord' or file_selection == 'ugn':\n for i in range(0,8):\n os.remove(file_col[i]+\".csv\")\n\n\n return\nif __name__ == '__main__':\n main()\n restart = input('Would you like to restart the program? (y for yes, anything else for no)')\n while restart == 'y':\n main()\n restart = input('Would you like to restart the program? (y for yes, anything else for no)')\n\n sys.exit()\n"
] |
[
[
"pandas.concat",
"pandas.read_csv",
"pandas.unique",
"pandas.DataFrame"
]
] |
soraxas/tensorboard-termplot
|
[
"e5c9632c5935d0cef438339bdef6845b91929ee9"
] |
[
"termplot/backend/matplotlib_plot.py"
] |
[
"import io\nimport os\nimport sys\nfrom functools import lru_cache\nfrom shutil import which\nfrom subprocess import Popen, PIPE, STDOUT\n\nimport matplotlib.colors as mcolors\nimport matplotlib.pyplot as plt\n\nfrom .base_plotter import Plotter\n\n\nclass MatplotlibPlot(Plotter):\n @property\n def unsupported_options(self):\n return []\n\n def plot(self, *args, label=\"\", **kwargs):\n self.cur_ax.plot(*args, label=label, **kwargs)\n\n def scatter(self, *args, label=\"\", **kwargs):\n self.cur_ax.scatter(*args, label=label, **kwargs)\n\n def xlim(self, row, col, limits):\n self.cur_ax.set_xlim(limits)\n\n def ylim(self, row, col, limits):\n self.cur_ax.set_ylim(limits)\n\n def xlog(self):\n self.cur_ax.set_xscale(\"log\")\n\n def ylog(self):\n self.cur_ax.set_yscale(\"log\")\n\n def xsymlog(self):\n self.cur_ax.set_xscale(\"symlog\")\n\n def ysymlog(self):\n self.cur_ax.set_yscale(\"symlog\")\n\n def legend(self):\n self.cur_ax.legend()\n\n def xlabel(self, xlabel, **kwargs):\n # only add xlabel to the bottom subplot\n if self.match_subplot(\n [(self.n_row, i) for i in range(1, self.n_col + 1)],\n kwargs[\"cur_row\"],\n kwargs[\"cur_col\"],\n ):\n self.cur_ax.set_xlabel(xlabel)\n\n def ylabel(self, ylabel, **kwargs):\n self.cur_ax.set_ylabel(ylabel)\n\n def canvas_color(self):\n self.fig.set_facecolor(self.args.canvas_color)\n\n def axes_color(self):\n self.cur_ax.set_facecolor(self.args.axes_color)\n\n def ticks_color(self):\n self.cur_ax.tick_params(colors=self.args.ticks_color)\n self.cur_ax.xaxis.label.set_color(self.args.ticks_color)\n self.cur_ax.yaxis.label.set_color(self.args.ticks_color)\n for spine in self.cur_ax.spines.values():\n spine.set_edgecolor(self.args.ticks_color)\n\n def plotsize(self):\n pass\n\n def target_subplot(self, row, col):\n if self.n_row == 1 and self.n_col == 1:\n self.cur_ax = self.axs\n elif self.n_row == 1:\n self.cur_ax = self.axs[col - 1]\n elif self.n_col == 1:\n self.cur_ax = self.axs[row - 1]\n else:\n self.cur_ax = self.axs[row - 1][col - 1]\n\n def create_subplot(self, row, col):\n super().create_subplot(row, col)\n self.fig, self.axs = plt.subplots(row, col, figsize=self.args.plotsize)\n\n def set_title(self, title):\n kwargs = {}\n if self.args.ticks_color is not None:\n kwargs[\"color\"] = self.args.ticks_color\n self.cur_ax.set_title(title, fontsize=10, **kwargs)\n\n def clear_current_figure(self):\n pass\n # self.cur_ax.clf()\n\n def clear_terminal_printed_lines(self):\n pass\n # self.fig.clear()\n\n def show(self):\n self.fig.tight_layout()\n plt.show()\n\n def _get_image_raw_bytes(self):\n self.fig.tight_layout()\n string_io_bytes = io.BytesIO()\n plt.savefig(string_io_bytes, format=\"png\")\n string_io_bytes.seek(0)\n return string_io_bytes.read()\n\n def as_image_raw_bytes(self):\n if self.args.timg:\n size = os.get_terminal_size()\n my_env = os.environ.copy()\n popen_args = [\"timg\", \"-\", f\"-g{size.columns}x{size.lines}\"]\n if my_env[\"TERM\"] == \"xterm-kitty\":\n popen_args += [\"-pkitty\"]\n p = Popen(popen_args, stdout=PIPE, stdin=PIPE, stderr=STDOUT, env=my_env)\n grep_stdout = p.communicate(input=self._get_image_raw_bytes())[0]\n sys.stdout.write(grep_stdout.decode())\n else:\n sys.stdout.buffer.write(self._get_image_raw_bytes())\n\n @property\n def fixed_color_seq(self):\n return mcolors.TABLEAU_COLORS\n\n @property\n def generator_color_seq(self):\n while True:\n yield from mcolors.TABLEAU_COLORS\n\n def close(self):\n plt.close(self.fig)\n\n\nclass MatplotlibPlotTerminal(MatplotlibPlot):\n def __init__(self, args):\n super().__init__(args)\n self.backend_program_cmds = self.get_supported_backend()\n if self.backend_program_cmds is None:\n raise RuntimeError(\"No supported program found (e.g. timg, .\")\n\n @classmethod\n @lru_cache()\n def get_supported_backend(cls):\n \"\"\"Determine if the system has necessary binary to support this plotter.\"\"\"\n if which(\"timg\"):\n return [\"timg\", \"-\"]\n elif which(\"kitty\"):\n return [\"kitty\", \"+kitten\", \"icat\"]\n\n def show(self):\n program = Popen(\n self.backend_program_cmds,\n stdin=PIPE,\n bufsize=-1,\n )\n # pipe image data to program\n self.fig.savefig(program.stdin)\n\n program.stdin.close() # done (no more input)\n"
] |
[
[
"matplotlib.pyplot.close",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
]
] |
1e-to/dpnp
|
[
"4a347d2d93a890a5435629c971ef6f05ae0c1d30"
] |
[
"dpnp/dpnp_iface_sorting.py"
] |
[
"# cython: language_level=3\n# distutils: language = c++\n# -*- coding: utf-8 -*-\n# *****************************************************************************\n# Copyright (c) 2016-2020, Intel Corporation\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# - Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# - Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n# THE POSSIBILITY OF SUCH DAMAGE.\n# *****************************************************************************\n\n\"\"\"\nInterface of the sorting function of the dpnp\n\nNotes\n-----\nThis module is a face or public interface file for the library\nit contains:\n - Interface functions\n - documentation for the functions\n - The functions parameters check\n\n\"\"\"\n\n\nimport numpy\n\nfrom dpnp.dpnp_algo import *\nfrom dpnp.dparray import dparray\nfrom dpnp.dpnp_utils import *\n\n\n__all__ = [\n 'argsort',\n 'sort'\n]\n\n\ndef argsort(in_array1, axis=-1, kind=None, order=None):\n \"\"\"\n Returns the indices that would sort an array.\n\n For full documentation refer to :obj:`numpy.argsort`.\n\n Limitations\n -----------\n Input array is supported as :obj:`dpnp.ndarray`.\n Otherwise the function will be executed sequentially on CPU.\n Prameters ``axis`` is supported only with default value ``-1``.\n Prameters ``kind`` is supported only with default value ``None``.\n Prameters ``order`` is supported only with default value ``None``.\n Input array data types are limited by supported DPNP :ref:`Data types`.\n\n See Also\n --------\n :obj:`dpnp.sort` : Describes sorting algorithms used.\n :obj:`dpnp.lexsort` : Indirect stable sort with multiple keys.\n :obj:`dpnp.argpartition` : Indirect partial sort.\n :obj:`dpnp.take_along_axis` : Apply ``index_array`` from argsort to\n an array as if by calling sort.\n\n Examples\n --------\n >>> import dpnp as np\n >>> x = np.array([3, 1, 2])\n >>> out = np.argsort(x)\n >>> [i for i in out]\n [1, 2, 0]\n\n \"\"\"\n\n is_dparray1 = isinstance(in_array1, dparray)\n\n if (not use_origin_backend(in_array1) and is_dparray1):\n if axis != -1:\n checker_throw_value_error(\"argsort\", \"axis\", axis, -1)\n if kind is not None:\n checker_throw_value_error(\"argsort\", \"kind\", type(kind), None)\n if order is not None:\n checker_throw_value_error(\"argsort\", \"order\", type(order), None)\n\n return dpnp_argsort(in_array1)\n\n return numpy.argsort(in_array1, axis, kind, order)\n\n\ndef sort(x1, **kwargs):\n \"\"\"\n Return a sorted copy of an array.\n\n For full documentation refer to :obj:`numpy.sort`.\n\n Limitations\n -----------\n Input array is supported as :obj:`dpnp.ndarray`.\n Keyword arguments ``kwargs`` are currently unsupported.\n Dimension of input array is supported to be equal to ``1``.\n Otherwise the function will be executed sequentially on CPU.\n Input array data types are limited by supported DPNP :ref:`Data types`.\n\n See Also\n --------\n :obj:`dpnp.argsort` : Indirect sort.\n :obj:`dpnp.lexsort` : Indirect stable sort on multiple keys.\n :obj:`dpnp.searchsorted` : Find elements in a sorted array.\n :obj:`dpnp.partition` : Partial sort.\n\n Examples\n --------\n >>> import dpnp as np\n >>> a = np.array([1, 4, 3, 1])\n >>> out = np.sort(a)\n >>> [i for i in out]\n [1, 1, 3, 4]\n\n \"\"\"\n if not use_origin_backend(x1) and not kwargs:\n if not isinstance(x1, dparray):\n pass\n elif x1.ndim != 1:\n pass\n else:\n return dpnp_sort(x1)\n\n return call_origin(numpy.sort, x1, **kwargs)\n"
] |
[
[
"numpy.argsort"
]
] |
YinAoXiong/ZCls
|
[
"8aeea3640f8456937db35d043e37cf2c03ac9017"
] |
[
"zcls/model/recognizers/resnet/official_resnest.py"
] |
[
"# -*- coding: utf-8 -*-\n\n\"\"\"\n@date: 2021/1/7 下午7:51\n@file: official_resnest.py\n@author: zj\n@description: \n\"\"\"\nfrom abc import ABC\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.modules.module import T\nfrom resnest.torch.resnest import resnest50, resnest101, resnest200, resnest269\nfrom resnest.torch.resnet import ResNet, Bottleneck\n\nfrom zcls.config.key_word import KEY_OUTPUT\nfrom zcls.model import registry\nfrom zcls.model.norm_helper import freezing_bn\n\n\nclass OfficialResNeSt(nn.Module, ABC):\n\n def __init__(self,\n arch='resnest50_2s2x40d',\n dropout_rate=0.,\n num_classes=1000,\n fix_bn=False,\n partial_bn=False,\n pretrained=\"\",\n pretrained_num_classes=1000,\n ):\n super(OfficialResNeSt, self).__init__()\n\n self.num_classes = num_classes\n self.fix_bn = fix_bn\n self.partial_bn = partial_bn\n\n if arch == 'resnest50':\n self.model = resnest50(num_classes=pretrained_num_classes,\n final_drop=dropout_rate\n )\n elif arch == 'resnest50_2s2x40d':\n radix = 2\n groups = 2\n width_per_group = 40\n avg_first = False\n self.model = ResNet(Bottleneck,\n [3, 4, 6, 3],\n radix=radix,\n groups=groups,\n bottleneck_width=width_per_group,\n deep_stem=True,\n stem_width=32,\n avg_down=True,\n avd=True,\n avd_first=avg_first,\n final_drop=dropout_rate,\n num_classes=pretrained_num_classes\n )\n elif arch == 'resnest50_fast_2s2x40d':\n radix = 2\n groups = 2\n width_per_group = 40\n avg_first = True\n self.model = ResNet(Bottleneck,\n [3, 4, 6, 3],\n radix=radix,\n groups=groups,\n bottleneck_width=width_per_group,\n deep_stem=True,\n stem_width=32,\n avg_down=True,\n avd=True,\n avd_first=avg_first,\n final_drop=dropout_rate,\n num_classes=pretrained_num_classes\n )\n elif arch == 'resnest50_fast_2s1x64d':\n radix = 2\n groups = 1\n width_per_group = 64\n avg_first = True\n self.model = ResNet(Bottleneck,\n [3, 4, 6, 3],\n radix=radix,\n groups=groups,\n bottleneck_width=width_per_group,\n deep_stem=True,\n stem_width=32,\n avg_down=True,\n avd=True,\n avd_first=avg_first,\n final_drop=dropout_rate,\n num_classes=pretrained_num_classes\n )\n elif arch == 'resnest101':\n self.model = resnest101(num_classes=pretrained_num_classes,\n final_drop=dropout_rate\n )\n elif arch == 'resnest200':\n self.model = resnest200(num_classes=pretrained_num_classes,\n final_drop=dropout_rate\n )\n elif arch == 'resnest269':\n self.model = resnest269(num_classes=pretrained_num_classes,\n final_drop=dropout_rate\n )\n else:\n raise ValueError('no such value')\n\n self.init_weights(pretrained,\n pretrained_num_classes,\n num_classes)\n\n def init_weights(self, pretrained, pretrained_num_classes, num_classes):\n if pretrained != \"\":\n self.model.load_state_dict(torch.load(pretrained))\n if num_classes != pretrained_num_classes:\n fc = self.model.fc\n fc_features = fc.in_features\n\n fc = nn.Linear(fc_features, num_classes)\n nn.init.normal_(fc.weight, 0, 0.01)\n nn.init.zeros_(fc.bias)\n\n self.model.fc = fc\n\n def train(self, mode: bool = True) -> T:\n super(OfficialResNeSt, self).train(mode=mode)\n\n if mode and (self.partial_bn or self.fix_bn):\n freezing_bn(self, partial_bn=self.partial_bn)\n\n return self\n\n def forward(self, x):\n x = self.model(x)\n\n return {KEY_OUTPUT: x}\n\n\[email protected]('OfficialResNeSt')\ndef build_official_resnest(cfg):\n # for recognizer\n pretrained = cfg.MODEL.RECOGNIZER.PRETRAINED\n pretrained_num_classes = cfg.MODEL.RECOGNIZER.PRETRAINED_NUM_CLASSES\n fix_bn = cfg.MODEL.NORM.FIX_BN\n partial_bn = cfg.MODEL.NORM.PARTIAL_BN\n # for backbone\n arch = cfg.MODEL.BACKBONE.ARCH\n # for head\n dropout_rate = cfg.MODEL.HEAD.DROPOUT_RATE\n num_classes = cfg.MODEL.HEAD.NUM_CLASSES\n\n return OfficialResNeSt(arch=arch,\n dropout_rate=dropout_rate,\n num_classes=num_classes,\n fix_bn=fix_bn,\n partial_bn=partial_bn,\n pretrained=pretrained,\n pretrained_num_classes=pretrained_num_classes\n )\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.load",
"torch.nn.init.zeros_"
]
] |
tangqi/deepxde
|
[
"86fae8f971154557c775899d8389f514b6f09aa6"
] |
[
"deepxde/losses.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\ndef mean_squared_error(y_true, y_pred):\n # Warning:\n # - Do not use ``tf.losses.mean_squared_error``, which casts `y_true` and `y_pred` to ``float32``.\n # - Do not use ``tf.keras.losses.MSE``, which computes the mean value over the last dimension.\n # - Do not use ``tf.keras.losses.MeanSquaredError()``, which casts loss to ``float32``\n # when calling ``compute_weighted_loss()`` calling ``scale_losses_by_sample_weight()``,\n # although it finally casts loss back to the original type.\n return tf.reduce_mean(tf.math.square(y_true - y_pred))\n\n\ndef mean_absolute_percentage_error(y_true, y_pred):\n return 100 * tf.reduce_mean(tf.abs(y_true - y_pred) / y_true)\n\n\ndef softmax_cross_entropy(y_true, y_pred):\n return tf.losses.softmax_cross_entropy(y_true, y_pred)\n\n\ndef get(identifier):\n loss_identifier = {\n \"mean squared error\": mean_squared_error,\n \"MSE\": mean_squared_error,\n \"mse\": mean_squared_error,\n \"mean absolute percentage error\": mean_absolute_percentage_error,\n \"MAPE\": mean_absolute_percentage_error,\n \"mape\": mean_absolute_percentage_error,\n \"softmax cross entropy\": softmax_cross_entropy,\n }\n\n if isinstance(identifier, str):\n return loss_identifier[identifier]\n elif callable(identifier):\n return identifier\n else:\n raise ValueError(\"Could not interpret loss function identifier:\", identifier)\n"
] |
[
[
"tensorflow.losses.softmax_cross_entropy",
"tensorflow.abs",
"tensorflow.math.square"
]
] |
david-a-parry/muver
|
[
"db7ffcc873ed8d7cb6f9abdd898576eed8c3cb08"
] |
[
"muver/depth_distribution.py"
] |
[
"import numpy\r\nfrom scipy.stats import norm\r\nfrom scipy.optimize import curve_fit\r\nfrom utils import read_cnv_bedgraph\r\n\r\nfrom fitting import gaussian\r\n\r\n\r\ndef calculate_depth_distribution(depths, output):\r\n '''\r\n For a list of depths, create a histogram of depth values. Then\r\n fit those values using a normal distribution. Return fit parameters. Output\r\n fit parameters and the histogram in a TXT file.\r\n '''\r\n depth_max = max(depths)\r\n hist = numpy.histogram(depths, bins=range(1, depth_max + 2), density=True)\r\n\r\n p0_mu, p0_sigma = norm.fit(depths)\r\n\r\n popt, pcov = curve_fit(gaussian, hist[1][:-1], hist[0], p0=[p0_mu, p0_sigma])\r\n mu, sigma = popt\r\n sigma = abs(sigma)\r\n\r\n # TODO: this is largely copied from calculate_bias_distribution\r\n with open(output, 'w') as OUT:\r\n\r\n OUT.write('Average depth per copy: {}\\n'.format(str(mu)))\r\n OUT.write('Standard deviation of depths per copy: {}\\n\\n'.format(str(sigma)))\r\n\r\n OUT.write('Depth distribution:\\n\\n')\r\n OUT.write('\\t'.join(['Depth', 'Frequency', 'Fit value']) +\r\n '\\n')\r\n\r\n for hist_value, _bin in zip(hist[0], hist[1]):\r\n OUT.write('\\t'.join((\r\n str(_bin),\r\n str(hist_value),\r\n str(norm.pdf(_bin, mu, sigma)),\r\n )) + '\\n')\r\n\r\n return mu, sigma\r\n\r\n\r\ndef calculate_depth_distribution_bedgraph(in_bedgraph, output, ploidy=2,\r\n cnv_bedgraph_file=None):\r\n '''\r\n Read depths from a bedGraph file, determine coverage per copy,\r\n and pass to calculate_depth_distribution.\r\n '''\r\n if cnv_bedgraph_file is not None:\r\n cnv_regions = read_cnv_bedgraph(cnv_bedgraph_file)\r\n else:\r\n cnv_regions = dict()\r\n\r\n depths = []\r\n\r\n with open(in_bedgraph) as f:\r\n for line in f:\r\n\r\n chromosome, start, end, coverage = line.strip().split()\r\n for i in range(int(start), int(end)):\r\n if (chromosome, i) in cnv_regions:\r\n depths.append(int(float(coverage) / \\\r\n cnv_regions[(chromosome, i)]))\r\n else:\r\n depths.append(int(float(coverage) / ploidy))\r\n\r\n return calculate_depth_distribution(depths, output)\r\n\r\n\r\ndef calculate_depth_distribution_mpileup(input_mpileup, output, ploidy,\r\n cnv_regions):\r\n '''\r\n Read depths from a mpileup TXT file, determine coverage per copy,\r\n and pass to calculate_depth_distribution.\r\n '''\r\n depths = []\r\n\r\n with open(input_mpileup) as f:\r\n for line in f:\r\n\r\n line_split = line.strip().split()\r\n\r\n chromosome, position, reference_base, coverage = line_split[:4]\r\n position = int(position)\r\n coverage = float(coverage)\r\n if int(coverage) > 0:\r\n bases = line_split[4]\r\n else:\r\n bases = ''\r\n\r\n i = 0\r\n while i < len(bases):\r\n if bases[i] == '^':\r\n i += 1\r\n elif bases[i] == '*':\r\n coverage += 1\r\n i += 1\r\n\r\n if coverage > 0:\r\n if (chromosome, position) in cnv_regions:\r\n depths.append(int(coverage / \\\r\n cnv_regions[(chromosome, position)]))\r\n else:\r\n depths.append(int(coverage / ploidy))\r\n\r\n return calculate_depth_distribution(depths, output)\r\n\r\n\r\ndef process_chromosome_values(chromosome, chromosome_values, mu, sigma, OUT,\r\n p_threshold=0.0001, merge_window=1000, window=51):\r\n '''\r\n Go over depth values for a given chromosome in an input list and write\r\n to a list of filtered positions if a position is less or greater than\r\n threshhold values derived the cummulative distribution function of a\r\n normal distribution.\r\n\r\n chromosome -- The name of the chromosome. Used only in printing.\r\n chromosome_values -- list of chromosome depths at every position.\r\n mu -- Describes normal distribution, used to filter abnormal depths.\r\n sigma -- Describes normal distribution, used to filter abnormal depths.\r\n OUT -- File handle to write filtered positions with abnormal depths.\r\n window -- Window to smooth depth values.\r\n p_threshold -- Probability applied to the CDF of the normal distribution\r\n to generate depth thresholds for filtering.\r\n '''\r\n def write_interval_to_filter(chromosome, start, end):\r\n OUT.write('{}\\t{}\\t{}\\n'.format(\r\n chromosome,\r\n str(start),\r\n str(end + 1),\r\n ))\r\n\r\n d = int((window - 1) / 2)\r\n norm_dist = norm(mu, sigma)\r\n\r\n keep_threshold = [mu, mu]\r\n filter_threshold = [float('-inf'), float('inf')]\r\n\r\n first = float('inf')\r\n last = float('-inf')\r\n side = 0\r\n last_side = 0\r\n\r\n max = len(chromosome_values)\r\n\r\n for i in range(0, max):\r\n\r\n if i < d:\r\n window_start = 0\r\n window_end = i + d + 1\r\n elif i >= (max - d):\r\n window_start = i - d\r\n window_end = max\r\n else:\r\n window_start = i - d\r\n window_end = i + d + 1\r\n\r\n window_depth = numpy.mean(chromosome_values[window_start:window_end])\r\n\r\n if not (\r\n window_depth >= keep_threshold[0] and\r\n window_depth <= keep_threshold[1]\r\n ):\r\n if (\r\n window_depth <= filter_threshold[0] or\r\n window_depth >= filter_threshold[1]\r\n ):\r\n if window_depth < mu:\r\n side = -1\r\n else:\r\n side = 1\r\n if i - last > merge_window or last_side * side == -1:\r\n if last - first > 0:\r\n write_interval_to_filter(\r\n chromosome,\r\n first,\r\n last,\r\n )\r\n first = i\r\n last = i\r\n last_side = side\r\n else:\r\n if window_depth < mu:\r\n side = -1\r\n p = norm_dist.cdf(window_depth)\r\n\r\n if p >= p_threshold:\r\n keep_threshold[0] = window_depth\r\n else:\r\n filter_threshold[0] = window_depth\r\n if i - last > merge_window or last_side * side == -1:\r\n if last - first > 0:\r\n write_interval_to_filter(\r\n chromosome,\r\n first,\r\n last,\r\n )\r\n first = i\r\n last = i\r\n last_side = side\r\n\r\n elif window_depth > mu:\r\n side = 1\r\n p = 1. - norm_dist.cdf(window_depth)\r\n\r\n if p >= p_threshold:\r\n keep_threshold[1] = window_depth\r\n else:\r\n filter_threshold[1] = window_depth\r\n if i - last > merge_window or last_side * side == -1:\r\n if last - first > 0:\r\n write_interval_to_filter(\r\n chromosome,\r\n first,\r\n last,\r\n )\r\n first = i\r\n last = i\r\n last_side = side\r\n if last - first > 0:\r\n write_interval_to_filter(\r\n chromosome,\r\n first,\r\n last,\r\n )\r\n\r\n\r\ndef filter_regions_by_depth(depths, chrom_sizes, mu, sigma,\r\n filtered_regions_output, p_threshold=0.0001,\r\n merge_window=1000):\r\n '''\r\n Filter positions by depth observing a normal distribution. See\r\n process_chromosome_values for additional details.\r\n '''\r\n with open(filtered_regions_output, 'w') as OUT:\r\n\r\n for chromosome in sorted(depths.keys()):\r\n\r\n process_chromosome_values(\r\n chromosome, depths[chromosome], mu, sigma, OUT, \\\r\n p_threshold, merge_window)\r\n\r\n\r\ndef filter_regions_by_depth_bedgraph(bedgraph_file, chrom_sizes, mu,\r\n sigma, filtered_regions_output,\r\n ploidy=2, cnv_bedgraph_file=None,\r\n p_threshold=0.0001, merge_window=1000):\r\n '''\r\n Pass depths read from a bedGraph file to filter_regions_by_depth.\r\n '''\r\n depths = dict()\r\n\r\n if cnv_bedgraph_file is not None:\r\n cnv_regions = read_cnv_bedgraph(cnv_bedgraph_file)\r\n else:\r\n cnv_regions = dict()\r\n\r\n with open(bedgraph_file) as f:\r\n for line in f:\r\n\r\n chromosome, start, end, coverage = line.strip().split()\r\n start = int(start) + 1 # Convert from zero-based\r\n end = int(end)\r\n coverage = float(coverage)\r\n\r\n if chromosome not in depths:\r\n depths[chromosome] = numpy.zeros(chrom_sizes[chromosome], \\\r\n dtype=numpy.int32)\r\n\r\n for position in range(start, end + 1):\r\n if (chromosome, position) in cnv_regions:\r\n depths[chromosome][position - 1] = int(coverage / \\\r\n cnv_regions[(chromosome, position)])\r\n else:\r\n depths[chromosome][position - 1] = int(coverage / ploidy)\r\n\r\n filter_regions_by_depth(depths, chrom_sizes, mu, sigma, \\\r\n filtered_regions_output, p_threshold, merge_window)\r\n\r\n\r\ndef filter_regions_by_depth_mpileup(mpileup_file, chrom_sizes, mu,\r\n sigma, filtered_regions_output,\r\n ploidy, cnv_regions, p_threshold=0.0001,\r\n merge_window=1000):\r\n '''\r\n Pass depths read from a mplieup TXT file to filter_regions_by_depth.\r\n '''\r\n depths = dict()\r\n\r\n with open(mpileup_file) as f:\r\n for line in f:\r\n\r\n line_split = line.strip().split()\r\n\r\n chromosome, position, reference_base, coverage = line_split[:4]\r\n position = int(position)\r\n coverage = float(coverage)\r\n\r\n if chromosome not in depths:\r\n depths[chromosome] = numpy.zeros(chrom_sizes[chromosome], \\\r\n dtype=numpy.int32)\r\n\r\n if int(coverage) > 0:\r\n bases = line_split[4]\r\n else:\r\n bases = ''\r\n\r\n i = 0\r\n while i < len(bases):\r\n if bases[i] == '^':\r\n i += 1\r\n elif bases[i] == '*':\r\n coverage += 1\r\n i += 1\r\n\r\n if coverage > 0:\r\n if (chromosome, position) in cnv_regions:\r\n depths[chromosome][position - 1] = int(coverage / \\\r\n cnv_regions[(chromosome, position)])\r\n else:\r\n depths[chromosome][position - 1] = int(coverage / ploidy)\r\n\r\n filter_regions_by_depth(depths, chrom_sizes, mu, sigma,\r\n filtered_regions_output, p_threshold, merge_window)\r\n"
] |
[
[
"scipy.stats.norm.pdf",
"scipy.stats.norm.fit",
"scipy.stats.norm",
"numpy.mean",
"scipy.optimize.curve_fit",
"numpy.zeros"
]
] |
qzzhang/vcontact
|
[
"ae21b0d59d7189e34da9b57843c24622310f4a2a"
] |
[
"lib/vConTACT/vConTACT_utils/vConTACTUtils.py"
] |
[
"import time\nimport os\nimport subprocess\nimport csv\nimport uuid\nfrom collections import OrderedDict\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\nfrom string import Template\nfrom pyparsing import Literal, SkipTo\nimport pandas as pd\n\nfrom KBaseReport.KBaseReportClient import KBaseReport\nfrom Workspace.WorkspaceClient import Workspace\nfrom GenomeAnnotationAPI.GenomeAnnotationAPIClient import GenomeAnnotationAPI\nfrom DataFileUtil.DataFileUtilClient import DataFileUtil as dfu\nfrom installed_clients.AssemblyUtilClient import AssemblyUtil\n# from GenomeFileUtil.GenomeFileUtilClient import GenomeFileUtil as gfu\nfrom KBaseDataObjectToFileUtils.KBaseDataObjectToFileUtilsClient import KBaseDataObjectToFileUtils as ofu\n\n\ndef log(message, prefix_newline=False):\n \"\"\"\n Logging function, provides a hook to suppress or redirect log messages.\n \"\"\"\n print(('\\n' if prefix_newline else '') + '{0:.2f}'.format(time.time()) + ': ' + str(message))\n\n\nhtml_template = Template(\"\"\"<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n \n <link href=\"https://netdna.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css\" rel=\"stylesheet\">\n <link href=\"https://cdn.datatables.net/1.10.22/css/jquery.dataTables.min.css\" rel=\"stylesheet\">\n <link href=\"https://cdn.datatables.net/buttons/1.5.2/css/buttons.dataTables.min.css\" rel=\"stylesheet\">\n\n <link href=\"https://cdn.datatables.net/searchpanes/1.2.0/css/searchPanes.dataTables.min.css\" rel=\"stylesheet\">\n <link href=\"https://cdn.datatables.net/select/1.3.1/css/select.dataTables.min.css\" rel=\"stylesheet\">\n\n <script src=\"https://code.jquery.com/jquery-3.5.1.js\" type=\"text/javascript\"></script>\n <script src=\"https://cdn.datatables.net/1.10.22/js/jquery.dataTables.min.js\" type=\"text/javascript\"></script>\n <script src=\"https://cdn.datatables.net/buttons/1.6.4/js/dataTables.buttons.min.js\" type=\"text/javascript\"></script>\n <script src=\"https://cdn.datatables.net/buttons/1.6.4/js/buttons.flash.min.js\" type=\"text/javascript\"></script>\n <script src=\"https://cdnjs.cloudflare.com/ajax/libs/jszip/3.1.3/jszip.min.js\" type=\"text/javascript\"></script>\n <script src=\"https://cdnjs.cloudflare.com/ajax/libs/pdfmake/0.1.53/pdfmake.min.js\" type=\"text/javascript\"></script>\n <script src=\"https://cdnjs.cloudflare.com/ajax/libs/pdfmake/0.1.53/vfs_fonts.js\" type=\"text/javascript\"></script>\n <script src=\"https://cdn.datatables.net/buttons/1.6.4/js/buttons.html5.min.js\" type=\"text/javascript\"></script>\n <script src=\"https://cdn.datatables.net/buttons/1.6.4/js/buttons.print.min.js\" type=\"text/javascript\"></script>\n\n <script src=\"https://cdn.datatables.net/searchpanes/1.2.0/js/dataTables.searchPanes.min.js\" type=\"text/javascript\"></script>\n <script src=\"https://cdn.datatables.net/select/1.3.1/js/dataTables.select.min.js\" type=\"text/javascript\"></script>\n \n <style>\n tfoot input {\n width: 100%;\n padding: 3px;\n box-sizing: border-box;\n }\n </style>\n \n </head>\n \n <body>\n \n <div class=\"container\">\n <div>\n ${html_table}\n </div>\n </div>\n\n <script type=\"text/javascript\">\n $$(document).ready(function() {\n $$('#my_id tfoot th').each( function () {\n var title = $$(this).text();\n $$(this).html( '<input type=\"text\" placeholder=\"Search '+title+'\" />' );\n });\n\n var table = $$('#my_id').DataTable({\n buttons: [\n 'copy', 'csv', 'excel', 'pdf', 'print'],\n scrollX: true,\n dom: 'lPfrtip' //Necessary for buttons to work\n });\n\n table.columns().every( function () {\n var that = this;\n\n $$( 'input', this.footer() ).on( 'keyup change', function () {\n if ( that.search() !== this.value ) {\n that\n .search( this.value )\n .draw();\n }\n });\n } );\n } );\n </script>\n \n </body>\n</html>\"\"\")\n\n\nclass vConTACTUtils:\n\n def __init__(self, config):\n self.scratch = os.path.abspath(config['scratch'])\n self.callback_url = os.environ['SDK_CALLBACK_URL']\n self.token = os.environ['KB_AUTH_TOKEN']\n self.scratch = os.path.abspath(config['scratch'])\n self.ws = Workspace(config['workspace-url'], token=self.token)\n self.genome_api = GenomeAnnotationAPI(self.callback_url)\n self.au = AssemblyUtil(self.callback_url)\n\n def vcontact_help(self):\n command = \"vcontact --help\"\n self._run_command(command)\n\n def execute(self, command: list):\n \"\"\"\n :param command: Command suitable for running in subprocess, must use a ['ls', '-l'] format\n :return: Response from command\n \"\"\"\n # logger.info('Running command: {}'.format(command))\n print('Running command: {}'.format(' '.join(command)))\n res = subprocess.run(command, shell=False, encoding='utf-8', check=True)\n\n return res\n\n def run_vcontact(self, params):\n\n # Determine KBase \"inputs\" for vConTACT2\n genome = params['genome']\n\n obj_type = self.ws.get_object_info3({'objects': [{'ref': genome}]})['infos'][0][2]\n\n if 'assembly' in obj_type.lower(): # If KBaseGenomeAnnotations.Assembly\n\n # Assembly requires annotation\n genome_fp = self.au.get_assembly_as_fasta({'ref': genome})['path']\n proteins_fp = os.path.join(self.scratch, 'proteins.faa')\n proteins_gbk = os.path.join(self.scratch, 'proteins.gbk')\n gene2genome_fp = os.path.join(self.scratch, 'gene2genome.csv')\n\n prodigal_cmd = ['prodigal', '-a', proteins_fp, '-o', proteins_gbk, '-f', 'gbk',\n '-i', genome_fp, '-p', 'meta']\n res = self.execute(prodigal_cmd)\n\n records = {}\n with open(proteins_fp, 'r') as proteins_fh:\n for record in SeqIO.parse(proteins_fh, 'fasta'):\n\n records[len(records)] = {\n 'protein_id': record.id,\n 'contig_id': record.id.rsplit('_', 1)[0],\n 'keywords': 'None'\n }\n\n g2g_df = pd.DataFrame.from_dict(records, orient='index')\n g2g_df.to_csv(gene2genome_fp, index=False)\n\n # Pass filepaths to the app and run\n params['gene2genome'] = gene2genome_fp\n params['sequences'] = proteins_fp\n\n elif 'kbasegenomes' in obj_type.lower(): # If KBaseGenomes.Genome\n genome_data = self.genome_api.get_genome_v1({\"genomes\": [{\"ref\": genome}]})\n\n # Convert genome data into \"reasonable\" parse form and write to scratch filesystem\n gene2genome, sequences = self.genome_to_inputs(genome_data)\n gene2genome_fp, sequences_fp = self.write_inputs(gene2genome, sequences)\n\n # Pass filepaths to the app and run\n params['gene2genome'] = gene2genome_fp\n params['sequences'] = sequences_fp\n\n elif 'binnedcontigs' in obj_type.lower(): # If KBaseMetagenomes.BinnedContigs\n print('KBaseMetagenomes.BinnedContigs hasnt been enabled. Check back later.')\n exit(1)\n else:\n print('Unknown error in identifying object types')\n\n print('Available database files')\n print(os.listdir('/miniconda/lib/python3.7/site-packages/vcontact2/data/'))\n\n # Just iterate through all parameters\n mappings = {\n 'gene2genome': '--proteins-fp',\n 'sequences': '--raw-proteins',\n 'db': '--db',\n 'pcs_mode': '--pcs-mode',\n 'vcs_mode': '--vcs-mode',\n 'blast_evalue': '--blast-evalue',\n 'pc_max_overlap': '--max-overlap',\n 'pc_penalty': '--penalty',\n 'pc_haircut': '--haircut',\n 'pc_inflation': '--pc-inflation',\n 'vc_inflation': '--vc-inflation',\n 'vc_density': '--min-density',\n 'vc_min_size': '--min-size',\n 'vc_max_overlap': '--vc-overlap',\n 'vc_penalty': '--vc-penalty',\n 'vc_haircut': '--vc-haircut',\n 'merge_method': '--merge-method',\n 'similarity': '--similarity',\n 'seed_method': '--seed-method',\n 'min_significance': '--sig',\n 'max_significance': '--max-sig',\n 'module_inflation': '--mod-inflation',\n 'mod_significance': '--mod-sig',\n 'module_min_shared': '--mod-shared-min',\n 'link_significance': '--link-sig',\n 'link_proportion': '--link-prop'\n }\n\n bool_args = ['optimize', 'permissive']\n\n # Should create build_command?\n command = 'vcontact2 --output-dir outdir'\n # Binaries\n command += ' --diamond-bin /usr/local/bin/diamond --c1-bin /usr/local/bin/cluster_one-1.0.jar'\n\n for param, cmd in mappings.items():\n command += ' {} {}'.format(cmd, params[param])\n\n self._run_command(command)\n\n report = self._generate_report(params)\n\n return report\n\n def _run_command(self, command):\n \"\"\"\n _run_command: run command and print result\n \"\"\"\n\n log('Start executing command:\\n{}'.format(command))\n pipe = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n output = pipe.communicate()[0]\n exitCode = pipe.returncode\n\n if (exitCode == 0):\n log('Executed command:\\n{}\\n'.format(command) +\n 'Exit Code: {}\\nOutput:\\n{}'.format(exitCode, output))\n else:\n error_msg = 'Error running command:\\n{}\\n'.format(command)\n error_msg += 'Exit Code: {}\\nOutput:\\n{}'.format(exitCode, output)\n raise ValueError(error_msg)\n\n def genome_to_inputs(self, genome):\n \"\"\"\n genome_to_inputs: convert genome annotation data (~json) to file inputs required by vConTACT\n :param genome:\n :return:\n \"\"\"\n\n records = []\n gene2genome = OrderedDict()\n\n genome_data = genome['genomes'][0]\n\n for item in genome_data['data']['features']:\n if 'id' not in item:\n continue\n print('This feature does not have a valid id')\n elif 'dna_sequence' not in item or 'protein_translation' not in item:\n continue\n print('This feature {} does not have a valid DNA sequence.'.format(item['id']))\n else:\n # Create FASTA file\n if item['type'] == 'gene':\n desc = (item['functions'] if item.get('functions', None)\n else item.get('function', ''))\n gene_record = SeqRecord(Seq(item['protein_translation']), id=item['id'],\n description=desc)\n records.append(gene_record)\n\n # Build gene2genome\n gene2genome.update({\n item['id']: {\n # 'contig_id': genome_data['data']['contig_ids'][0],\n 'contig_id': item['location'][0][0],\n 'protein_id': item['id'],\n 'keywords': item['function']\n }\n })\n\n return gene2genome, records\n\n def write_inputs(self, mapping, sequences):\n\n fasta_for_proteins_fp = os.path.join(self.scratch, 'vConTACT_proteins.fasta')\n with open(fasta_for_proteins_fp, 'w') as fasta_for_proteins_fh:\n SeqIO.write(sequences, fasta_for_proteins_fh, 'fasta')\n\n genes_to_genomes_mapping_fp = os.path.join(self.scratch, 'vConTACT_gene2genome.csv')\n with open(genes_to_genomes_mapping_fp, 'w') as genes_to_genomes_mapping_fh:\n fields = ['contig_id', 'protein_id', 'keywords']\n writer = csv.DictWriter(genes_to_genomes_mapping_fh, fieldnames=fields)\n writer.writeheader()\n\n for gene in mapping.keys():\n writer.writerow(mapping[gene])\n\n return genes_to_genomes_mapping_fp, fasta_for_proteins_fp\n\n def _generate_report(self, params):\n \"\"\"\n _generate_report: generate summary report\n\n This will contain ALL the logic to generate the report, including areas that should/will be re-factored later\n\n \"\"\"\n\n # Get\n self.dfu = dfu(self.callback_url)\n\n # Get filepath of summary file\n summary_fp = os.path.join(os.getcwd(), 'outdir', 'genome_by_genome_overview.csv')\n\n summary_df = pd.read_csv(summary_fp, header=0, index_col=0)\n html = summary_df.to_html(index=False, classes='my_class table-striped\" id = \"my_id')\n\n # Need to file write below\n direct_html = html_template.substitute(html_table=html)\n\n # Find header so it can be copied to footer, as dataframe.to_html doesn't include footer\n start_header = Literal(\"<thead>\")\n end_header = Literal(\"</thead>\")\n\n text = start_header + SkipTo(end_header)\n\n new_text = ''\n for data, start_pos, end_pos in text.scanString(direct_html):\n new_text = ''.join(data).replace(' style=\"text-align: right;\"', '').replace('thead>',\n 'tfoot>\\n ') + '\\n</tfoot>'\n\n # Get start and end positions to insert new text\n end_tbody = Literal(\"</tbody>\")\n end_table = Literal(\"</table>\")\n\n insertion_pos = end_tbody + SkipTo(end_table)\n\n final_html = ''\n for data, start_pos, end_pos in insertion_pos.scanString(direct_html):\n final_html = direct_html[:start_pos + 8] + '\\n' + new_text + direct_html[start_pos + 8:]\n\n output_dir = os.path.join(self.scratch, str(uuid.uuid4()))\n self._mkdir_p(output_dir)\n result_fp = os.path.join(output_dir, 'index.html')\n\n with open(result_fp, 'w') as result_fh:\n result_fh.write(final_html)\n\n report_shock_id = self.dfu.file_to_shock({\n 'file_path': output_dir,\n 'pack': 'zip'\n })['shock_id']\n\n html_report = [{\n 'shock_id': report_shock_id,\n 'name': os.path.basename(result_fp),\n 'label': os.path.basename(result_fp),\n 'description': 'HTML summary report for vConTACT2'\n }]\n\n report_params = {'message': 'Basic message to show in the report',\n 'workspace_name': params['workspace_name'],\n 'html_links': html_report,\n 'direct_html_link_index': 0,\n 'report_object_name': 'vConTACT_report_{}'.format(str(uuid.uuid4())),\n # Don't use until have files to attach to report\n # 'file_links': [{}],\n # Don't use until data objects that are created as result of running app\n # 'objects_created': [{'ref': matrix_obj_ref,\n # 'description': 'Imported Matrix'}],\n }\n\n kbase_report_client = KBaseReport(self.callback_url, token=self.token)\n output = kbase_report_client.create_extended_report(report_params)\n\n report_output = {'report_name': output['name'], 'report_ref': output['ref']}\n\n return report_output\n\n def _mkdir_p(self, path):\n \"\"\"\n _mkdir_p: make directory for given path\n \"\"\"\n # https://stackoverflow.com/a/600612/643675\n if not path:\n return\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n"
] |
[
[
"pandas.read_csv",
"pandas.DataFrame.from_dict"
]
] |
green-s/ESRGAN
|
[
"fe9d5a8b613325f7b318d8f036123e3e2cc713c5"
] |
[
"trace.py"
] |
[
"import os\nimport cv2\nimport numpy as np\nimport torch\nimport architecture as arch\nimport argparse\nfrom pathlib import Path\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Convert models to TorchScript\")\n parser.add_argument(\n \"models\",\n nargs=\"*\",\n type=Path,\n help=\"The models to process. Defaults to all in models directory.\",\n )\n parser.add_argument(\n \"-o\",\n \"--out-dir\",\n type=Path,\n required=False,\n help=\"The directory to write output to. Defaults to jit_models in ESRGAN directory.\",\n )\n parser.add_argument(\n \"-d\",\n \"--device\",\n default=\"cuda\",\n choices=[\"cuda\", \"cpu\"],\n help=\"The device to use for upscaling. Defaults to cuda.\",\n )\n parser.add_argument(\n \"-s\",\n \"--suffix\",\n default=\"_jit.pth\",\n help=\"The suffix to add to output model name.\",\n )\n parser.add_argument(\"-f\", \"--force\", help=\"Whether to overwrite existing files.\")\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n script_dir = Path(__file__).parent\n model_dir = script_dir / \"models\"\n out_dir = args.out_dir if args.out_dir else script_dir / \"jit_models\"\n out_dir.mkdir(parents=True, exist_ok=True)\n\n if args.models:\n models = args.models\n else:\n models = model_dir.rglob(\"*.pth\")\n\n device = torch.device(args.device)\n\n # read image\n img_path = script_dir / \"LR/baboon.png\"\n img = cv2.imread(str(img_path), cv2.IMREAD_COLOR)\n img = img * 1.0 / 255\n img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()\n img_LR = img.unsqueeze(0)\n img_LR = img_LR.to(device)\n\n for model_path in models:\n if not model_path.is_file():\n print(f\"{str(model_path)} does not exist, skipping...\")\n continue\n\n out_path = out_dir / (model_path.stem + args.suffix)\n if not args.force and out_path.is_file():\n print(f\"{str(out_path)} already exists, skipping...\")\n continue\n\n print(f\"Tracing: {str(model_path)}\")\n\n state_dict = torch.load(model_path)\n if \"conv_first.weight\" in state_dict:\n print(\"Error: Attempted to load a new-format model\")\n return 1\n\n # Extract model information\n scale2 = 0\n max_part = 0\n in_nc = 3\n out_nc = 3\n nf = 64\n nb = 23\n for part in list(state_dict):\n parts = part.split(\".\")\n n_parts = len(parts)\n if n_parts == 5 and parts[2] == \"sub\":\n nb = int(parts[3])\n elif n_parts == 3:\n part_num = int(parts[1])\n if part_num > 6 and parts[2] == \"weight\":\n scale2 += 1\n if part_num > max_part:\n max_part = part_num\n out_nc = state_dict[part].shape[0]\n upscale = 2 ** scale2\n in_nc = state_dict[\"model.0.weight\"].shape[1]\n nf = state_dict[\"model.0.weight\"].shape[0]\n\n device = torch.device(args.device)\n net = arch.RRDB_Net(\n in_nc,\n out_nc,\n nf,\n nb,\n gc=32,\n upscale=upscale,\n norm_type=None,\n act_type=\"leakyrelu\",\n mode=\"CNA\",\n res_scale=1,\n upsample_mode=\"upconv\",\n )\n net.load_state_dict(state_dict, strict=True)\n del state_dict\n net.eval()\n\n for k, v in net.named_parameters():\n v.requires_grad = False\n net = net.to(device)\n\n with torch.jit.optimized_execution(should_optimize=True):\n # traced_script_module = torch.jit.trace(net, img_LR)\n traced_script_module = torch.jit.script(net)\n print(f\"Saving to: {str(out_path)}\")\n try:\n with out_path.open(\"wb\") as out_file:\n torch.jit.save(traced_script_module, out_file)\n except:\n os.remove(out_path)\n raise\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"torch.jit.script",
"torch.jit.save",
"torch.jit.optimized_execution",
"torch.load",
"numpy.transpose",
"torch.device"
]
] |
PyDL/sunkit-image
|
[
"bdfe7b402f204dc204d5e78bf9ca12ad81e1e686"
] |
[
"sunkit_image/tests/test_asda.py"
] |
[
"import numpy as np\nimport pytest\n\nfrom sunkit_image import asda\nfrom sunkit_image.data.test import get_test_filepath\n\n\ndef test_asda_artificial():\n \"\"\"\n Generate an artificial vortex using the Lamb_Oseen class in asda, then\n perform the vortex detection.\n \"\"\"\n # Generate an artificial vortex\n vmax = 2.0 # rotating speed\n rmax = 50 # radius\n ratio = 0.2 # ratio of expanding speed over rotating speed\n with pytest.raises(ValueError, match=\"Keyword 'r' must be an integer\"):\n lo = asda.Lamb_Oseen(vmax=vmax, rmax=rmax, ratio_vradial=ratio, factor=1, r=1.2)\n\n with pytest.raises(ValueError, match=\"Keyword 'factor' must be an integer\"):\n lo = asda.Lamb_Oseen(vmax=vmax, rmax=rmax, ratio_vradial=ratio, factor=1.2, r=1)\n\n with pytest.raises(ValueError, match=\"Keyword 'factor' must be an integer\"):\n lo = asda.Lamb_Oseen(vmax=vmax, rmax=rmax, ratio_vradial=ratio, factor=1.2, r=1)\n\n with pytest.warns(\n UserWarning, match=\"One of the input parameters is missing,\" + \"setting both to 'None'\"\n ):\n lo = asda.Lamb_Oseen(vmax=vmax, rmax=rmax, gamma=0.5, ratio_vradial=ratio, factor=1)\n\n lo = asda.Lamb_Oseen(vmax=vmax, rmax=rmax, ratio_vradial=ratio, factor=1)\n # Generate vx and vy\n with pytest.warns(\n UserWarning, match=\"One of the input parameters is missing, setting \" + \" both to 'None'\"\n ):\n vx, vy = lo.get_vxvy(x_range=[-100, 100, 200], y_range=[-100, 100, 200], x=np.meshgrid)\n\n vx, vy = lo.get_vxvy(x_range=[-100, 100, 200], y_range=[-100, 100, 200])\n\n # detect vortices\n vortex = lo.get_vortex()\n ve = vortex['ve']\n vr = vortex['vr']\n vc = vortex['vc']\n ia = vortex['ia']\n\n np.testing.assert_almost_equal(ve[0], 0.39996991917753405)\n np.testing.assert_almost_equal(vr[0], 1.999849595887626)\n assert vc == ([0.0, 0.0],)\n assert ia == (None,)\n assert len(vortex) == 9\n np.testing.assert_allclose(vortex[\"center\"], np.array([[100.0, 100.0]]))\n np.testing.assert_almost_equal(vortex[\"peak\"], 0.9605688248523583)\n np.testing.assert_almost_equal(vortex[\"radius\"], 50.0732161286822)\n assert len(vortex[\"points\"][0]) == 7877\n assert len(vortex[\"edge\"][0]) == 280\n\n np.testing.assert_allclose(vortex[\"center\"][0][0], 100)\n np.testing.assert_allclose(vortex[\"center\"][0][1], 100)\n\n np.testing.assert_allclose(vmax, vr[0], atol=0.001)\n np.testing.assert_allclose(vmax * ratio, ve[0], atol=0.001)\n\n np.testing.assert_allclose(vc[0][0], 0.0)\n np.testing.assert_allclose(vc[0][1], 0.0)\n np.testing.assert_allclose(rmax, vortex[\"radius\"][0], atol=0.1)\n\n\ndef test_real_data():\n \"\"\"\n run the test on real data and compare with the correct answer.\n\n Notes:\n Input velocity field and image (if there is any) are all stored in\n default Python order (i.e. [y, x] of the data).\n\n Output gamma values are in the same order, thus the same shape as\n velocity field.\n\n other outputs are in the order of [x, y], i.e., vc = [vx, vy],\n edge = [[x1, y1], [x2, y2],...], points = [[x1, y1], [x2, y2],...]\n in units of pixel\n \"\"\"\n # file which stores the velocity field data\n vel_file = get_test_filepath(\"asda_vxvy.npz\")\n # file that stores the correct detection result\n cor_file = get_test_filepath(\"asda_correct.npz\")\n # load velocity field and data\n vxvy = np.load(vel_file, allow_pickle=True)\n vx = vxvy[\"vx\"]\n vy = vxvy[\"vy\"]\n data = vxvy[\"data\"]\n\n # Perform swirl detection\n factor = 1\n # Initialise class\n lo = asda.Asda(vx, vy, factor=factor)\n # detect vortices\n vortex = lo.get_vortex(image=data)\n ve = vortex['ve']\n vr = vortex['vr']\n vc = vortex['vc']\n ia = vortex['ia']\n\n # load correct detect results\n correct = dict(np.load(cor_file, allow_pickle=True))\n\n # compare between detection result and correct detection result\n # number of swirls\n n = len(ve)\n nc = len(correct[\"ve\"])\n assert n == nc\n\n # find correspondences\n pos = []\n i = 0\n for cen in vortex[\"center\"]:\n cen = [int(cen[0]), int(cen[1])]\n idx = np.where(correct[\"center\"] == cen)\n assert not np.size(idx[0]) < 2\n pos.append(np.bincount(idx[0]).argmax())\n\n # perform comparison\n peak_diff = []\n radius_diff = []\n vr_diff = []\n ve_diff = []\n vc_diff = []\n ia_diff = []\n for i in np.arange(n):\n idx = pos[i]\n peak_diff.append((vortex[\"peak\"][i] - correct[\"peak\"][idx]) / correct[\"peak\"][idx] * 100)\n radius_diff.append((vortex[\"radius\"][i] - correct[\"radius\"][idx]) / correct[\"radius\"][idx] * 100)\n vr_diff.append((vr[i] - correct[\"vr\"][idx]) / correct[\"vr\"][idx] * 100)\n ve_diff.append((ve[i] - correct[\"ve\"][idx]) / correct[\"ve\"][idx] * 100)\n vc_diff.append((vc[i] - correct[\"vc\"][idx]) / correct[\"vc\"][idx] * 100)\n ia_diff.append((ia[i] - correct[\"ia\"][idx]) / correct[\"ia\"][idx] * 100)\n\n # Should be no differences\n assert (\n np.mean(ia_diff)\n == np.mean(peak_diff)\n == np.mean(peak_diff)\n == np.mean(radius_diff)\n == np.mean(vr_diff)\n == np.mean(ve_diff)\n == 0.0\n )"
] |
[
[
"numpy.arange",
"numpy.testing.assert_almost_equal",
"numpy.size",
"numpy.mean",
"numpy.bincount",
"numpy.testing.assert_allclose",
"numpy.load",
"numpy.array",
"numpy.where"
]
] |
zhichul/pi-bob
|
[
"b9ed7cb3614f50c51c6273665d8cd2b62dcab886"
] |
[
"src/training/preprocessing_divert.py"
] |
[
"import sys\nimport os\nimport numpy as np\nimport cv2\n\ndef parseData(path):\n\t# res = {\"l\":[],\"r\":[],\"s\":[]}\n\tres = {\"l\":[],\"r\":[],\"s\":[],\"n\":[],\"d\":[]}\n\tfor file in os.listdir(path):\n\t\timg = cv2.imread(os.path.join(path,file),0)\n\t\tdecision = file.split(\"-\")[0]\n\t\tt = [0]\n\t\t# t = [0] * 4\n\t\tif decision == \"left\":\n\t\t\tpass\n\t\telif decision == \"right\":\n\t\t\tpass\n\t\telif decision == \"straight\":\n\t\t\tpass\n\t\telif decision == \"none\":\n\t\t\tpass\n\t\telif decision == \"divert\":\n\t\t\tt[0] = 1\n\t\telse:\n\t\t\tprint(\"Unidentified training example: %s\" % file)\n\t\tif decision[0] in res:\n\t\t\tres[decision[0]].append((tuple(np.multiply(1/255,np.ndarray.flatten(img)).tolist()),tuple(t)))\n\treturn res\n\ndef main():\n\tindir = sys.argv[1]\n\toutdir = sys.argv[2]\n\tassert(os.path.isdir(indir))\n\tassert(os.path.isdir(outdir))\n\tfull = np.float32(np.full((480,640),255))\n\tzero = np.float32(np.full((480,640),0))\n\tfor file in os.listdir(indir):\n\t\tsfile = file.strip(\".jpg\")\n\t\tfor i in range(255//4,255*3//4,10):\n\t\t\timg = np.float32(cv2.imread(os.path.join(indir,file),0))\n\t\t\timg = np.minimum(img+i,full)\n\t\t\timg = cv2.resize(img,(16,12))\n\t\t\timg = cv2.GaussianBlur(img,(3,3),0)\n\t\t\tcv2.imwrite(os.path.join(outdir,sfile+(\"(+%.2f)\"%(i/255))+\".jpg\"),img)\n\t\t\t# img = np.float32(cv2.imread(os.path.join(indir,file),0))\n\t\t\t# img = np.maximum(img-i,zero)\n\t\t\t# img = cv2.GaussianBlur(img,(5,5),0)\n\t\t\t# img = cv2.resize(img,(16,12))\n\t\t\t# cv2.imwrite(os.path.join(outdir,sfile+(\"(-%.2f)\"%(i/255))+\".jpg\"),img)\n\n\n\twith open(sys.argv[3],\"wt\") as f:\n\t\tf.write(str(parseData(sys.argv[2])))\n\treturn 0\n\nmain()"
] |
[
[
"numpy.ndarray.flatten",
"numpy.minimum",
"numpy.full"
]
] |
njcuk9999/matplotlib_select
|
[
"f36546805f0b511b558bf1cae491c0e699f60c4c"
] |
[
"Add_buttons.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 17/02/17 at 11:48 PM\n\n@author: neil\n\nProgram description here\n\nVersion 0.0.1\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Button\nimport sys\n# detect python version\n# if python 3 do this:\nif (sys.version_info > (3, 0)):\n import tkinter\n import tkinter.simpledialog as tksimpledialog\nelse:\n import Tkinter as tkinter\n import tkSimpleDialog as tksimpledialog\n\n# =============================================================================\n# Define Class. Methods and Functions\n# =============================================================================\nclass Add_Buttons(object):\n def __init__(self, ax=None, **kwargs):\n \"\"\"\n Adds a select rectangle feature to any matplotlib axis, with select,\n clear all, and finish buttons\n\n :param ax: matplotlib axis, the frame to add the selector to\n :param kwargs: kwargs passed to the rectangle selector\n\n Current allowed kwargs are:\n\n button_labels - list of strings\n defines the name of each button to be displayed\n Must be of length 1 or greater\n \n button_actions - list of strings\n defines the action of each button. Must be same\n length as button_labels\n\n currently supported actions are:\n \n \"NEXT\" - sends a return statement to move to\n next plot \n self.result set to 1\n \n \"PREVIOUS\" - sends a return statement to move to\n previous plot\n self.result set to -1\n \n \"CLOSE\" - closes the plot\n \n \"OPTION\" - sends the button_label string\n self.result set to button_label\n \n \"UINPUT\" - asks user for an input\n\n button_params - list of dictionaries (optional)\n if defined must be same length as button_labels\n \n a dictionary for each button\n \n keywords of each dictionary:\n \n \"close\" - when used with \"OPTION\" action will\n close the plot after OPTION is clicked\n\n \"\"\"\n # set supported actions (and link to function)\n self.actions = dict(NEXT=self.next,\n PREVIOUS=self.previous,\n CLOSE=self.end,\n OPTION=self.option,\n UINPUT=self.uinput)\n self.supported_actions = list(self.actions.keys())\n # current button params\n self.buttons = []\n self.regions = []\n # result (1, 0, -1, or string)\n self.result = 0\n # storage\n self.data = dict()\n # Deal with having no matplotlib axis\n if ax is None:\n self.ax = plt.gca()\n else:\n self.ax = ax\n # load keyword arguments\n if kwargs is None:\n kwargs = dict()\n self.button_labels = kwargs.get('button_labels', ['Close'])\n self.num_buttons = len(self.button_labels)\n self.button_actions = kwargs.get('button_actions', ['CLOSE'])\n dparams = [dict()]*self.num_buttons\n self.button_params = kwargs.get('button_params', dparams)\n # check inputs are correct\n self.validate_inputs()\n # create buttons\n self.create_buttons()\n\n def validate_inputs(self):\n # Make sure button labels is in correct format\n try:\n self.button_labels = list(self.button_labels)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError(\"Button labels must be a list of strings\")\n # Make sure button actions is in correct format\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError(\"Button labels must be a list of strings\")\n # Make sure button actions is in correct format\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_params:\n if type(it) != dict:\n raise TypeError()\n except TypeError:\n raise TypeError(\"Button params must be a dictionary\")\n # Make sure list are not empty and same length\n if len(self.button_labels) < 1:\n raise ValueError(\"'button_labels' Must have at least one button \"\n \"label in list.\")\n if len(self.button_actions) != len(self.button_labels):\n raise ValueError(\"'button_actions' must be the same length \"\n \"as 'button_labels\")\n self.num_buttons = len(self.button_labels)\n # Make sure all button actions are supported\n sstr = self.supported_actions[0]\n for it in range(len(self.supported_actions)):\n if it > 0:\n sstr += ', {0}'.format(self.supported_actions[it])\n for it in range(len(self.button_actions)):\n e1 = \"Action '{0}' not currently\".format(self.button_actions[it])\n e2 = \"supported. \\n Currently supported actions are: \\n\"\n if self.button_actions[it] not in self.supported_actions:\n raise ValueError(e1 + e2 + sstr)\n\n def create_buttons(self, width=0.2):\n \"\"\"\n Create a set of buttons along the bottom axis of the figure\n\n Need to re-write this to be generic based on used input\n (might not be possible as user need to define events)\n\n :param N: int, Number of buttons, default 3\n :param width: float, width of the buttons in x, must be less than\n 1.0/N\n :return:\n \"\"\"\n b_N, b_length = self.num_buttons, width\n b_sep = (1. / (b_N + 1)) * (1 - b_N * b_length)\n for b in range(b_N):\n start = (b + 1) * b_sep + b * b_length\n r = [start, 0.05, b_length, 0.075]\n self.regions.append(r)\n\n # adjust the figure\n plt.subplots_adjust(bottom=0.25)\n # populate buttons\n for b in range(b_N):\n axbutton = plt.axes(self.regions[b])\n button = Button(axbutton, self.button_labels[b])\n button.on_clicked(self.actions[self.button_actions[b]])\n self.buttons.append(button)\n\n def next(self, event):\n \"\"\"\n Event for clicking a button with action \"NEXT\"\n \n Sets self.result to 1\n \n :param event: \n :return: \n \"\"\"\n self.result = 1\n\n def previous(self, event):\n \"\"\"\n Event for clicking a button with action \"PREVIOUS\"\n\n Sets self.result to -1\n\n :param event: \n :return: \n \"\"\"\n self.result = -1\n\n def option(self, event):\n \"\"\"\n Event for clicking a button with action \"OPTION\"\n\n Sets self.result to button_label[i] where i is the position in\n button_label and button_action of the button clicked\n\n :param event: \n :return: \n \"\"\"\n pos = self.button_region(event)\n if pos is not None:\n self.result = self.button_labels[pos]\n\n close = self.button_params[pos].get('close', False)\n func = self.button_params[pos].get('func', None)\n if func is not None:\n func()\n if close:\n plt.close()\n\n def uinput(self, event):\n pos = self.button_region(event)\n if pos is not None:\n props = self.button_params[pos]\n title = props.get('title', 'Enter a Value')\n startvalue = props.get('comment', 'Message')\n name = props.get('name', 'x')\n fmt = props.get('fmt', None)\n minval = props.get('minval', None)\n maxval = props.get('maxval', None)\n\n root = tkinter.Tk()\n root.withdraw()\n if fmt == int:\n value = tksimpledialog.askinteger(title, startvalue,\n minvalue=minval,\n maxvalue=maxval)\n elif fmt == float:\n value = tksimpledialog.askfloat(title, startvalue,\n minvalue=minval,\n maxvalue=maxval)\n else:\n value = tksimpledialog.askstring(title, startvalue)\n self.data[name] = value\n root.destroy()\n\n\n def end(self, event):\n \"\"\"\n Event for clicking the finish button - closes the graph\n\n :param event: event passed to function\n :return:\n \"\"\"\n plt.close()\n\n def button_region(self, event):\n if len(self.regions) == 0:\n return None\n # get mouse click location in pixels\n x, y = event.x, event.y\n # get the current canvas width and height (in pixels)\n width = event.canvas.geometry().width()\n height = event.canvas.geometry().height()\n # loop round each button region\n for r, rn in enumerate(self.regions):\n # convert region to pixels\n rn1 = [rn[0]*width, rn[1]*height,\n (rn[0] + rn[2])*width, (rn[1] + rn[3])*height]\n # test whether x, y are in region\n cond1 = (x > rn1[0]) & (x < rn1[2])\n cond2 = (y > rn1[1]) & (y < rn1[3])\n if cond1 and cond2:\n return r\n return None\n\n\n# =============================================================================\n# Start of code\n# =============================================================================\n# Main code to test the rectangle selector\nif __name__ == '__main__':\n import numpy as np\n # plt.close()\n # fig, frame = plt.subplots(ncols=1, nrows=1)\n # x = np.random.rand(100)\n # y = np.random.rand(100)\n # plt.scatter(x, y, color='k', marker='o', s=20)\n # odict = dict(close=True)\n # a = Add_Buttons(ax=frame,\n # button_labels=['A', 'B'],\n # button_actions=['OPTION', 'OPTION'],\n # button_params=[odict, odict])\n # plt.show()\n # plt.close()\n\n plt.close()\n fig, frame = plt.subplots(ncols=1, nrows=1)\n x = np.random.rand(100)\n y = np.random.rand(100)\n plt.scatter(x, y, color='k', marker='o', s=20)\n odict = dict(close=True)\n udict = dict(name='x', fmt=int, title='Enter value',\n comment='Please enter x in meters.', minval=4, maxval=10)\n a = Add_Buttons(ax=frame,\n button_labels=['Enter value', 'Close'],\n button_actions=['UINPUT', 'OPTION'],\n button_params=[udict, odict])\n plt.show()\n plt.close()\n\n# =============================================================================\n# End of code\n# =============================================================================\n"
] |
[
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.scatter",
"matplotlib.widgets.Button",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.axes",
"numpy.random.rand",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show"
]
] |
pairlab/ocean
|
[
"f416c836e26fe8813c1d345cc391b56d948d22f0"
] |
[
"rlkit/torch/sac/agent.py"
] |
[
"import numpy as np\n\nimport torch\nfrom torch import nn as nn\nimport torch.nn.functional as F\n\nimport rlkit.torch.pytorch_util as ptu\n\nimport pdb\nimport os\n\neps = 1e-11\n\ndef _product_of_categorical_all(z_means):\n z_means = torch.log(z_means+eps)\n z_mean = torch.sum(z_means, dim=-2)\n cc = torch.max(z_mean).detach()\n z_mean -= cc\n z_mean = torch.exp(z_mean)\n return F.normalize(z_mean, p=1, dim=-1)\n\ndef _weighted_product_of_categorical_all(z_means):\n z_means = torch.log(z_means+eps)\n z_mean = torch.mean(z_means, dim=-2)\n cc = torch.max(z_mean).detach()\n z_mean -= cc\n z_mean = torch.exp(z_mean)\n return F.normalize(z_mean, p=1, dim=-1)\n\ndef _product_of_gaussians_all(mus, sigmas_squared):\n sigmas_squared = torch.clamp(sigmas_squared, min=1e-7)\n sigma_squared = 1. / torch.sum(torch.reciprocal(sigmas_squared), dim=-2)\n mu = sigma_squared * torch.sum(mus / sigmas_squared, dim=-2)\n return mu, sigma_squared\n\ndef _weighted_product_of_gaussians_all(mus, sigmas_squared):\n n = mus.shape[-2]\n sigmas_squared = torch.clamp(sigmas_squared, min=1e-7)\n sigma_squared = n / torch.sum(torch.reciprocal(sigmas_squared), dim=-2)\n mu = sigma_squared * torch.sum(mus / sigmas_squared, dim=-2) / n\n return mu, sigma_squared\n\ndef _weighted_product_of_dirichlet_all(alphas):\n return torch.mean(alphas, dim=-2)\n\ndef read_dim(s):\n a, b, c, d, e = s.split('.')\n return [int(a), int(b), int(c), int(d), int(e)]\n\nclass PEARLAgent(nn.Module):\n\n def __init__(self,\n global_context_encoder,\n recurrent_context_encoder,\n global_latent,\n vrnn_latent,\n policy,\n temperature,\n unitkl,\n alpha,\n g_constraint,\n r_constraint,\n var,\n r_alpha,\n r_var,\n rnn,\n temp_res,\n rnn_sample,\n weighted_sample,\n **kwargs\n ):\n super().__init__()\n self.g_cont_dim, self.g_n_cat, self.g_cat_dim, self.g_n_dir, self.g_dir_dim = read_dim(global_latent)\n if recurrent_context_encoder != None:\n self.r_cont_dim, self.r_n_cat, self.r_cat_dim, self.r_n_dir, self.r_dir_dim = read_dim(vrnn_latent)\n\n self.global_context_encoder = global_context_encoder\n self.recurrent_context_encoder = recurrent_context_encoder\n self.policy = policy\n self.temperature = temperature\n self.unitkl = unitkl\n\n self.g_constraint = g_constraint # global dirichlet type\n self.r_constraint = r_constraint # local dirichlet type\n self.g_alpha = alpha\n self.g_var = var\n self.r_alpha = r_alpha\n self.r_var = r_var\n self.rnn = rnn\n\n self.weighted_sample = weighted_sample\n\n self.temp_res = temp_res\n self.rnn_sample = rnn_sample\n self.n_global, self.n_local, self.n_infer = 0, 0, 0\n\n self.recurrent = kwargs['recurrent']\n self.glob = kwargs['glob']\n self.use_ib = kwargs['use_information_bottleneck']\n self.sparse_rewards = kwargs['sparse_rewards']\n self.use_next_obs = kwargs['use_next_obs']\n # initialize buffers for z dist and z\n # use buffers so latent context can be saved along with model weights\n if self.glob:\n self.register_buffer('z', torch.zeros(1, self.g_cont_dim + self.g_cat_dim * self.g_n_cat + self.g_dir_dim * self.g_n_dir))\n if self.g_cat_dim > 0:\n self.register_buffer('z_means', torch.zeros(1, self.g_cat_dim))\n if self.g_cont_dim > 0:\n self.register_buffer('z_c_means', torch.zeros(1, self.g_cont_dim))\n self.register_buffer('z_c_vars', torch.ones(1, self.g_cont_dim))\n if self.g_dir_dim > 0:\n if self.g_constraint == 'logitnormal':\n self.register_buffer('z_d_means', torch.zeros(1, self.g_dir_dim))\n self.register_buffer('z_d_vars', torch.ones(1, self.g_dir_dim))\n elif self.g_constraint == 'dirichlet':\n self.register_buffer('z_d_means', torch.zeros(1, self.g_dir_dim))\n \n if self.recurrent:\n self.register_buffer('seq_z', torch.zeros(1, self.r_cont_dim + self.r_cat_dim * self.r_n_cat + self.r_dir_dim * self.r_n_dir))\n z_cat_prior, z_cont_prior, z_dir_prior = ptu.FloatTensor(), ptu.FloatTensor(), ptu.FloatTensor()\n if self.r_cat_dim > 0:\n self.register_buffer('seq_z_cat', torch.zeros(1, self.r_cat_dim))\n self.seq_z_next_cat = None\n z_cat_prior = ptu.ones(self.r_cat_dim * self.r_n_cat) / self.r_cat_dim\n if self.r_dir_dim > 0:\n if self.r_constraint == 'logitnormal':\n self.register_buffer('seq_z_dir_mean', torch.zeros(1, self.r_dir_dim))\n self.register_buffer('seq_z_dir_var', torch.ones(1, self.r_dir_dim))\n self.seq_z_next_dir_mean = None\n self.seq_z_next_dir_var = None\n z_dir_prior_mean = ptu.zeros(self.r_n_dir * self.r_dir_dim)\n z_dir_prior_var = ptu.ones(self.r_n_dir * self.r_dir_dim) * self.r_var\n z_dir_prior = torch.cat([z_dir_prior_mean, z_dir_prior_var])\n elif self.r_constraint == 'dirichlet':\n self.register_buffer('seq_z_dir', torch.zeros(1, self.r_dir_dim))\n self.seq_z_next_dir = None\n z_dir_prior = ptu.ones(self.r_n_dir * self.r_dir_dim) * self.r_alpha\n if self.r_cont_dim > 0:\n self.register_buffer('seq_z_cont_mean', torch.zeros(1, self.r_cont_dim))\n self.register_buffer('seq_z_cont_var', torch.zeros(1, self.r_cont_dim))\n self.seq_z_next_cont_mean = None\n self.seq_z_next_cont_var = None\n z_cont_prior = torch.cat([ptu.zeros(self.r_cont_dim), ptu.ones(self.r_cont_dim)])\n self.seq_z_prior = torch.cat([z_cat_prior, z_cont_prior, z_dir_prior])\n\n self.clear_z()\n \n\n def clear_z(self, num_tasks=1, batch_size=1, traj_batch_size=1): \n '''\n reset q(z|c) to the prior\n sample a new z from the prior\n '''\n if self.glob:\n if self.g_cat_dim > 0:\n self.z_means = ptu.ones(num_tasks * self.g_n_cat, self.g_cat_dim)/self.g_cat_dim\n if self.g_cont_dim > 0:\n self.z_c_means = ptu.zeros(num_tasks, self.g_cont_dim)\n self.z_c_vars = ptu.ones(num_tasks, self.g_cont_dim)\n if self.g_dir_dim > 0:\n if self.g_constraint == 'logitnormal':\n self.z_d_means = ptu.zeros(num_tasks * self.g_n_dir, self.g_dir_dim)\n self.z_d_vars = ptu.ones(num_tasks * self.g_n_dir, self.g_dir_dim)*self.g_var\n else:\n self.z_d_means = ptu.ones(num_tasks * self.g_n_dir, self.g_dir_dim)*self.g_alpha \n\n self.sample_z()\n\n if self.recurrent:\n if self.r_cat_dim > 0:\n self.seq_z_cat = ptu.ones(num_tasks * batch_size * self.r_n_cat, self.r_cat_dim) / self.r_cat_dim\n self.seq_z_next_cat = None\n if self.r_cont_dim > 0:\n self.seq_z_cont_mean = ptu.zeros(num_tasks * batch_size, self.r_cont_dim)\n self.seq_z_cont_var = ptu.ones(num_tasks * batch_size, self.r_cont_dim)\n self.seq_z_next_cont_mean = None\n self.seq_z_next_cont_var = None\n if self.r_dir_dim > 0:\n if self.r_constraint == 'logitnormal':\n self.seq_z_dir_mean = ptu.zeros(num_tasks * batch_size * self.r_n_dir, self.r_dir_dim)\n self.seq_z_dir_var = ptu.ones(num_tasks * batch_size * self.r_n_dir, self.r_dir_dim) * self.r_var\n self.seq_z_next_dir_mean = None\n self.seq_z_next_dir_var = None\n elif self.r_constraint == 'dirichlet':\n self.seq_z_dir = ptu.ones(num_tasks * batch_size * self.r_n_dir, self.r_dir_dim) * self.r_alpha\n self.seq_z_next_dir = None\n\n self.sample_sequence_z()\n\n\n # reset the context collected so far\n self.context = None\n # reset any hidden state in the encoder network (relevant for RNN)\n if self.global_context_encoder != None:\n self.global_context_encoder.reset(num_tasks)\n if self.recurrent_context_encoder != None:\n self.recurrent_context_encoder.reset(num_tasks*traj_batch_size)\n\n def detach_z(self):\n ''' disable backprop through z '''\n if self.glob:\n self.z = self.z.detach()\n if self.recurrent:\n self.recurrent_context_encoder.hn = self.recurrent_context_encoder.hn.detach()\n self.recurrent_context_encoder.cn = self.recurrent_context_encoder.cn.detach()\n self.seq_z = self.seq_z.detach()\n\n def update_context(self, inputs):\n ''' append single transition to the current context '''\n o, a, r, no, d, info = inputs\n if self.sparse_rewards:\n r = info['sparse_reward']\n o = ptu.from_numpy(o[None, None, ...])\n a = ptu.from_numpy(a[None, None, ...])\n r = ptu.from_numpy(np.array([r])[None, None, ...])\n no = ptu.from_numpy(no[None, None, ...])\n\n if self.use_next_obs:\n data = torch.cat([o, a, r, no], dim=2)\n else:\n data = torch.cat([o, a, r], dim=2)\n if self.context is None:\n self.context = data\n else:\n self.context = torch.cat([self.context, data], dim=1)\n\n def compute_kl_div(self): \n ''' compute KL( q(z|c) || r(z) ) '''\n kl_div_cont, kl_div_disc, kl_div_dir = ptu.FloatTensor([0.]).mean(), ptu.FloatTensor([0.]).mean(), ptu.FloatTensor([0.]).mean()\n kl_div_seq_cont, kl_div_seq_disc, kl_div_seq_dir = ptu.FloatTensor([0.]).mean(), ptu.FloatTensor([0.]).mean(), ptu.FloatTensor([0.]).mean()\n\n if self.glob:\n if self.g_cat_dim > 0:\n if self.unitkl:\n kl_div_disc = torch.sum(self.z_means_all*torch.log((self.z_means_all+eps)*self.g_cat_dim))\n else:\n kl_div_disc = torch.sum(self.z_means*torch.log((self.z_means+eps)*self.g_cat_dim))\n if self.g_dir_dim > 0:\n if self.g_constraint == 'dirichlet':\n prior = torch.distributions.Dirichlet(ptu.ones(self.g_dir_dim)*self.g_alpha)\n if self.unitkl:\n posteriors = torch.distributions.Dirichlet(self.z_d_means_all)\n else:\n posteriors = torch.distributions.Dirichlet(self.z_d_means)\n kl_div_dir = torch.sum(torch.distributions.kl.kl_divergence(posteriors, prior))\n elif self.g_constraint == 'logitnormal':\n prior = torch.distributions.Normal(ptu.zeros(self.g_dir_dim), ptu.ones(self.g_dir_dim)*np.sqrt(self.g_var))\n if self.unitkl:\n posteriors = torch.distributions.Normal(self.z_d_means_all, torch.sqrt(self.z_d_vars_all))\n else:\n posteriors = torch.distributions.Normal(self.z_d_means, torch.sqrt(self.z_d_vars))\n kl_div_dir = torch.sum(torch.distributions.kl.kl_divergence(posteriors, prior))\n if self.g_cont_dim > 0:\n if self.unitkl:\n kl_div_cont = torch.sum(0.5*(-torch.log(self.z_c_vars_all)+self.z_c_vars_all+self.z_c_means_all*self.z_c_means_all-1)) \n else:\n kl_div_cont = torch.sum(0.5*(-torch.log(self.z_c_vars)+self.z_c_vars+self.z_c_means*self.z_c_means-1)) \n\n if self.recurrent:\n if self.rnn == 'rnn':\n if self.r_cat_dim > 0:\n assert type(self.seq_z_next_cat) != type(None)\n kl_div_seq_disc = torch.sum(self.seq_z_cat * torch.log((self.seq_z_cat + eps) * self.r_cat_dim)) \\\n + torch.sum(self.seq_z_next_cat * torch.log((self.seq_z_next_cat + eps) * self.r_cat_dim))\n if self.r_dir_dim > 0:\n if self.r_constraint == 'dirichlet':\n assert type(self.seq_z_next_dir) != type(None)\n prior = torch.distributions.Dirichlet(ptu.ones(self.r_dir_dim) * self.r_alpha)\n posteriors = torch.distributions.Dirichlet(self.seq_z_dir)\n posteriors_next = torch.distributions.Dirichlet(self.seq_z_next_dir)\n kl_div_seq_dir = torch.sum(torch.distributions.kl.kl_divergence(posteriors, prior)) \\\n + torch.sum(torch.distributions.kl.kl_divergence(posteriors_next, prior))\n elif self.r_constraint == 'logitnormal':\n assert type(self.seq_z_next_dir_mean) != type(None)\n prior = torch.distributions.Normal(ptu.zeros(self.r_dir_dim), ptu.ones(self.r_dir_dim)*np.sqrt(self.r_var))\n posteriors = torch.distributions.Normal(self.seq_z_dir_mean, torch.sqrt(self.seq_z_dir_var))\n posteriors_next = torch.distributions.Normal(self.seq_z_next_dir_mean, torch.sqrt(self.seq_z_next_dir_var))\n kl_div_seq_dir = torch.sum(torch.distributions.kl.kl_divergence(posteriors, prior)) \\\n + torch.sum(torch.distributions.kl.kl_divergence(posteriors_next, prior))\n if self.r_cont_dim > 0:\n kl_div_seq_cont = torch.sum(0.5*(-torch.log(self.seq_z_cont_var)+self.seq_z_cont_var+self.seq_z_cont_mean*self.seq_z_cont_mean-1)) \\\n + torch.sum(0.5*(-torch.log(self.seq_z_next_cont_var)+self.seq_z_next_cont_var+self.seq_z_next_cont_mean*self.seq_z_next_cont_mean-1)) \n elif self.rnn == 'vrnn':\n kl_div_seq_disc, kl_div_seq_cont, kl_div_seq_dir = self.recurrent_context_encoder.compute_kl_div()\n\n\n return kl_div_disc, kl_div_cont, kl_div_dir, kl_div_seq_disc, kl_div_seq_cont, kl_div_seq_dir\n\n def infer_posterior(self, context, ff=False): \n ''' compute q(z|c) as a function of input context and sample new z from it'''\n params = self.global_context_encoder(context)\n if self.g_dir_dim > 0 and self.g_constraint == 'dirichlet':\n params = params.view(context.size(0), -1, self.g_n_cat*self.g_cat_dim + 2*self.g_cont_dim + self.g_n_dir*self.g_dir_dim)\n else: \n params = params.view(context.size(0), -1, self.g_n_cat*self.g_cat_dim + 2*self.g_cont_dim + self.g_n_dir*self.g_dir_dim*2)\n\n if self.g_cat_dim > 0:\n params_disc = params[..., :self.g_n_cat*self.g_cat_dim]\n params_disc = params_disc.view(context.size(0), -1, self.g_n_cat, self.g_cat_dim)\n params_disc = params_disc.transpose(1, 2)\n mu = F.softmax(params_disc, dim=-1)\n if self.unitkl:\n self.z_means_all = torch.reshape(mu, [-1, self.g_cat_dim])\n if self.weighted_sample:\n self.z_means = _weighted_product_of_categorical_all(mu).view(-1, self.g_cat_dim)\n else:\n self.z_means = _product_of_categorical_all(mu).view(-1, self.g_cat_dim)\n \n if self.g_cont_dim > 0:\n params_cont = params[..., self.g_n_cat*self.g_cat_dim:self.g_n_cat*self.g_cat_dim+2*self.g_cont_dim]\n mu_c = params_cont[..., :self.g_cont_dim]\n sigma_squared_c = F.softplus(params_cont[..., self.g_cont_dim:])\n if self.unitkl:\n self.z_c_means_all = torch.reshape(mu_c, [-1, self.g_cont_dim])\n self.z_c_vars_all = torch.reshape(sigma_squared_c, [-1, self.g_cont_dim])\n if self.weighted_sample:\n self.z_c_means, self.z_c_vars = _weighted_product_of_gaussians_all(mu_c, sigma_squared_c)\n else:\n self.z_c_means, self.z_c_vars = _product_of_gaussians_all(mu_c, sigma_squared_c)\n\n if self.g_dir_dim > 0 and self.g_constraint == 'logitnormal':\n params_dir = params[..., self.g_n_cat*self.g_cat_dim+2*self.g_cont_dim:]\n params_dir = params_dir.view(context.size(0), -1, self.g_n_dir, self.g_dir_dim*2)\n params_dir = params_dir.transpose(1, 2)\n mu_d = params_dir[..., :self.g_dir_dim]\n sigma_squared_d = F.softplus(params_dir[..., self.g_dir_dim:])\n if self.unitkl:\n self.z_d_means_all = torch.reshape(mu_d, [-1, self.g_dir_dim])\n self.z_d_vars_all = torch.reshape(sigma_squared_d, [-1, self.g_dir_dim])\n if self.weighted_sample:\n self.z_d_means, self.z_d_vars = _weighted_product_of_gaussians_all(mu_d, sigma_squared_d)\n else:\n self.z_d_means, self.z_d_vars = _product_of_gaussians_all(mu_d, sigma_squared_d)\n self.z_d_means = self.z_d_means.view(-1, self.g_dir_dim)\n self.z_d_vars = self.z_d_vars.view(-1, self.g_dir_dim)\n\n if self.g_dir_dim > 0 and self.g_constraint == 'dirichlet':\n params_dir = params[..., self.g_n_cat*self.g_cat_dim+2*self.g_cont_dim:]\n params_dir = params_dir.view(context.size(0), -1, self.g_n_dir, self.g_dir_dim)\n params_dir = F.softplus(params_dir.transpose(1, 2))\n if self.unitkl:\n # self.z_d_means_all = params_dir.view(-1, self.g_dir_dim)\n self.z_d_means_all = torch.reshape(params_dir, [-1, self.g_dir_dim])\n if self.weighted_sample:\n self.z_d_means = _weighted_product_of_dirichlet_all(params_dir)\n else:\n assert False, \"Global dirichlet parameterization must be weighted sample\"\n self.z_d_means = self.z_d_means.view(-1, self.g_dir_dim)\n\n self.sample_z()\n\n def sample_z(self):\n z, z_c, z_d = ptu.FloatTensor(), ptu.FloatTensor(), ptu.FloatTensor()\n if self.g_cat_dim > 0:\n gumbel = torch.distributions.Gumbel(ptu.FloatTensor([0]), ptu.FloatTensor([1.0])).sample(self.z_means.size()).squeeze(-1)\n log_z = torch.log(self.z_means+eps)\n logit = (log_z + gumbel) / self.temperature\n z = F.softmax(logit, dim=1).view(-1, self.g_n_cat, self.g_cat_dim).view(-1, self.g_n_cat * self.g_cat_dim)\n if self.g_cont_dim > 0:\n normal = torch.distributions.Normal(ptu.FloatTensor([0.]), ptu.FloatTensor([1.])).sample(self.z_c_means.size()).squeeze(-1)\n z_c = self.z_c_means + torch.sqrt(self.z_c_vars)*normal\n if self.g_dir_dim > 0:\n if self.g_constraint == 'dirichlet':\n z_d = torch.distributions.Dirichlet(self.z_d_means).rsample()\\\n .view(-1, self.g_n_dir, self.g_dir_dim).view(-1, self.g_n_dir * self.g_dir_dim)\n elif self.g_constraint == 'logitnormal':\n normal = torch.distributions.Normal(ptu.FloatTensor([0.]), ptu.FloatTensor([1.])).sample(self.z_d_means.size()).squeeze(-1)\n z_d = F.softmax(self.z_d_means + torch.sqrt(self.z_d_vars)*normal, dim=-1)\\\n .view(-1, self.g_n_dir, self.g_dir_dim).view(-1, self.g_n_dir * self.g_dir_dim)\n\n self.z = torch.cat([z, z_c, z_d], dim=-1)\n\n def get_action(self, obs, deterministic=False):\n ''' sample action from the policy, conditioned on the task embedding '''\n z, seq_z = ptu.FloatTensor(), ptu.FloatTensor()\n if self.glob:\n z = self.z\n if self.recurrent:\n seq_z = self.seq_z\n obs = ptu.from_numpy(obs[None])\n in_ = torch.cat([obs, z, seq_z], dim=1)\n\n return self.policy.get_action(in_, deterministic=deterministic)\n\n def set_num_steps_total(self, n):\n self.policy.set_num_steps_total(n)\n\n def forward(self, obs, context, trajectories, indices_in_trajs, do_inference, compute_for_next, is_next):\n ''' given context, get statistics under the current policy of a set of observations '''\n t, b, _ = obs.size()\n obs = obs.view(t * b, -1)\n\n task_z, seq_z = ptu.FloatTensor(), ptu.FloatTensor()\n # self.n_infer += 1\n if do_inference:\n if self.recurrent:\n assert type(self.recurrent_context_encoder) != type(None) and type(trajectories) != type(None) and type(indices_in_trajs) != type(None)\n self.infer_sequence_posterior(trajectories, indices_in_trajs, compute_for_next = compute_for_next)\n\n if self.glob:\n self.infer_posterior(context)\n\n if self.recurrent:\n if is_next:\n seq_z = self.seq_z_next\n else:\n seq_z = self.seq_z\n\n if self.glob:\n task_z = self.z\n task_z = [z.repeat(b, 1) for z in task_z]\n task_z = torch.cat(task_z, dim=0)\n\n in_ = torch.cat([obs, task_z.detach(), seq_z.detach()], dim=1)\n policy_outputs = self.policy(in_, reparameterize=True, return_log_prob=True) \n\n return policy_outputs, task_z, seq_z\n\n def log_diagnostics(self, eval_statistics): \n pass\n\n @property\n def networks(self):\n network_list = []\n if self.glob:\n network_list.append(self.global_context_encoder)\n network_list.append(self.policy)\n if self.recurrent:\n network_list.append(self.recurrent_context_encoder)\n return network_list\n\n def infer_sequence_posterior(self, trajectories, indices_in_trajs, compute_for_next): \n ''' compute q(z|c) as a function of input context and sample new z from it'''\n num_tasks, traj_batch, eps_len, input_dim = trajectories.size()\n self.clear_sequence_z(num_tasks=num_tasks, batch_size=traj_batch * indices_in_trajs.size(2), traj_batch_size=traj_batch)\n if self.rnn_sample == 'full':\n params = self.recurrent_context_encoder(trajectories.view(-1, eps_len, input_dim))\n elif self.rnn_sample == 'full_wo_sampling':\n params = self.recurrent_context_encoder(trajectories.view(-1, eps_len, input_dim))\n elif self.rnn_sample == 'single_sampling':\n traj_ranges = [i for i in range(eps_len) if i % self.temp_res == (self.temp_res - 1)]\n tmp_trajectories = trajectories[:, :, traj_ranges, :]\n params = self.recurrent_context_encoder(tmp_trajectories.view(-1, len(traj_ranges), input_dim))\n eps_len = len(traj_ranges)\n elif self.rnn_sample == 'batch_sampling':\n max_len = int(eps_len//self.temp_res*self.temp_res)\n tmp_trajectories = trajectories[:, :, :max_len, :]\n tmp_trajectories = tmp_trajectories.view(num_tasks * traj_batch, max_len // self.temp_res, self.temp_res * input_dim)\n params = self.recurrent_context_encoder(tmp_trajectories)\n eps_len = max_len // self.temp_res\n\n if self.rnn_sample == 'full':\n if compute_for_next:\n indices_in_trajs_next = indices_in_trajs + 1\n else:\n if compute_for_next:\n indices_in_trajs_next = (indices_in_trajs + 1) // self.temp_res\n indices_in_trajs = indices_in_trajs // self.temp_res\n \n if self.r_constraint == 'logitnormal':\n params = params.view(num_tasks, traj_batch, eps_len, self.r_cont_dim * 2 + self.r_n_cat * self.r_cat_dim + self.r_n_dir * self.r_dir_dim * 2)\n else:\n params = params.view(num_tasks, traj_batch, eps_len, self.r_cont_dim * 2 + self.r_n_cat * self.r_cat_dim + self.r_n_dir * self.r_dir_dim)\n\n if self.rnn_sample == 'full_wo_sampling':\n traj_ranges = [i for i in range(eps_len) if i % self.temp_res == (self.temp_res - 1)]\n params = params[:, :, traj_ranges, :]\n\n batch_per_traj = indices_in_trajs.size(2)\n params = torch.cat([self.seq_z_prior.expand(num_tasks, traj_batch, 1, params.size(3)), params], dim=2)\n\n if self.r_cat_dim > 0:\n params_disc = params[..., :self.r_n_cat * self.r_cat_dim]\n seq_z_cat = torch.gather(params_disc, 2, indices_in_trajs.unsqueeze(-1).expand(num_tasks, traj_batch, batch_per_traj, self.r_n_cat * self.r_cat_dim))\n self.seq_z_cat = F.softmax(seq_z_cat.view(num_tasks * traj_batch * batch_per_traj * self.r_n_cat, self.r_cat_dim), dim=-1)\n if compute_for_next:\n seq_z_next_cat = torch.gather(params_disc, 2, indices_in_trajs_next.unsqueeze(-1).expand(num_tasks, traj_batch, batch_per_traj, self.r_n_cat * self.r_cat_dim)) \n self.seq_z_next_cat = F.softmax(seq_z_next_cat.view(num_tasks * traj_batch * batch_per_traj * self.r_n_cat, self.r_cat_dim), dim=-1)\n else:\n self.seq_z_next_cat = None\n\n if self.r_cont_dim > 0:\n params_cont = params[..., self.r_n_cat * self.r_cat_dim : self.r_n_cat * self.r_cat_dim + 2 * self.r_cont_dim]\n mu_c = params_cont[..., :self.r_cont_dim]\n sigma_squared_c = F.softplus(params_cont[..., self.r_cont_dim:])\n seq_z_cont_mean = torch.gather(mu_c, 2, indices_in_trajs.unsqueeze(-1).expand(num_tasks, traj_batch, batch_per_traj, self.r_cont_dim))\n seq_z_cont_var = torch.gather(sigma_squared_c, 2, indices_in_trajs.unsqueeze(-1).expand(num_tasks, traj_batch, batch_per_traj, self.r_cont_dim))\n self.seq_z_cont_mean = seq_z_cont_mean.view(num_tasks * traj_batch * batch_per_traj, self.r_cont_dim)\n self.seq_z_cont_var = seq_z_cont_var.view(num_tasks * traj_batch * batch_per_traj, self.r_cont_dim)\n if compute_for_next:\n seq_z_next_cont_mean = torch.gather(mu_c, 2, indices_in_trajs_next.unsqueeze(-1).expand(num_tasks, traj_batch, batch_per_traj, self.r_cont_dim))\n seq_z_next_cont_var = torch.gather(sigma_squared_c, 2, indices_in_trajs_next.unsqueeze(-1).expand(num_tasks, traj_batch, batch_per_traj, self.r_cont_dim))\n self.seq_z_next_cont_mean = seq_z_next_cont_mean.view(num_tasks * traj_batch * batch_per_traj, self.r_cont_dim)\n self.seq_z_next_cont_var = seq_z_next_cont_var.view(num_tasks * traj_batch * batch_per_traj, self.r_cont_dim)\n else:\n self.seq_z_next_cont_var = None\n self.seq_z_next_cont_mean = None\n\n if self.r_dir_dim > 0 and self.r_constraint == 'logitnormal':\n params_dir = params[..., self.r_n_cat * self.r_cat_dim + self.r_cont_dim * 2:]\n mu_d = params_dir[..., :self.r_n_dir * self.r_dir_dim]\n sigma_squared_d = F.softplus(params_dir[..., self.r_dir_dim * self.r_n_dir:])\n seq_z_dir_mean = torch.gather(mu_d, 2, indices_in_trajs.unsqueeze(-1).expand(num_tasks, traj_batch, batch_per_traj, self.r_n_dir * self.r_dir_dim))\n seq_z_dir_var = torch.gather(sigma_squared_d, 2, indices_in_trajs.unsqueeze(-1).expand(num_tasks, traj_batch, batch_per_traj, self.r_n_dir * self.r_dir_dim))\n self.seq_z_dir_mean = seq_z_dir_mean.view(num_tasks * traj_batch * batch_per_traj * self.r_n_dir, self.r_dir_dim)\n self.seq_z_dir_var = seq_z_dir_var.view(num_tasks * traj_batch * batch_per_traj * self.r_n_dir, self.r_dir_dim)\n if compute_for_next:\n seq_z_next_dir_mean = torch.gather(mu_d, 2, indices_in_trajs_next.unsqueeze(-1).expand(num_tasks, traj_batch, batch_per_traj, self.r_n_dir * self.r_dir_dim))\n seq_z_next_dir_var = torch.gather(sigma_squared_d, 2, indices_in_trajs_next.unsqueeze(-1).expand(num_tasks, traj_batch, batch_per_traj, self.r_n_dir * self.r_dir_dim))\n self.seq_z_next_dir_mean = seq_z_next_dir_mean.view(num_tasks * traj_batch * batch_per_traj * self.r_n_dir, self.r_dir_dim)\n self.seq_z_next_dir_var = seq_z_next_dir_var.view(num_tasks * traj_batch * batch_per_traj * self.r_n_dir, self.r_dir_dim)\n else:\n self.seq_z_next_dir_mean = None\n self.seq_z_next_dir_var = None\n\n if self.r_dir_dim > 0 and self.r_constraint == 'dirichlet':\n params_dir = params[..., self.r_n_cat * self.r_cat_dim + self.r_cont_dim * 2:]\n seq_z_dir = torch.gather(params_dir, 2, indices_in_trajs.unsqueeze(-1).expand(num_tasks, traj_batch, batch_per_traj, self.r_n_dir * self.r_dir_dim))\n self.seq_z_dir = F.softplus(seq_z_dir.view(num_tasks * traj_batch * batch_per_traj * self.r_n_dir, self.r_dir_dim))\n if compute_for_next:\n seq_z_next_dir = torch.gather(params_dir, 2, indices_in_trajs_next.unsqueeze(-1).expand(num_tasks, traj_batch, batch_per_traj, self.r_n_dir * self.r_dir_dim))\n self.seq_z_next_dir = F.softplus(seq_z_next_dir.view(num_tasks * traj_batch * batch_per_traj * self.r_n_dir, self.r_dir_dim))\n else:\n self.seq_z_next = None\n\n self.sample_sequence_z(compute_for_next)\n\n def sample_sequence_z(self, compute_for_next=False):\n z, z_c, z_d = ptu.FloatTensor(), ptu.FloatTensor(), ptu.FloatTensor()\n if self.r_cat_dim > 0:\n gumbel = torch.distributions.Gumbel(ptu.FloatTensor([0]), ptu.FloatTensor([1.0])).sample(self.seq_z_cat.size()).squeeze(-1)\n log_z = torch.log(self.seq_z_cat + eps)\n logit = (log_z + gumbel) / self.temperature\n z = F.softmax(logit, dim=1).view(-1, self.r_n_cat * self.r_cat_dim)\n if self.r_cont_dim > 0:\n normal = torch.distributions.Normal(ptu.FloatTensor([0.]), ptu.FloatTensor([1.])).sample(self.seq_z_cont_mean.size()).squeeze(-1)\n z_c = self.seq_z_cont_mean + torch.sqrt(self.seq_z_cont_var) * normal\n if self.r_dir_dim > 0:\n if self.r_constraint == 'dirichlet':\n z_d = torch.distributions.Dirichlet(self.seq_z_dir).rsample().view(-1, self.r_n_dir * self.r_dir_dim)\n elif self.r_constraint == 'logitnormal':\n normal = torch.distributions.Normal(ptu.FloatTensor([0.]), ptu.FloatTensor([1.])).sample(self.seq_z_dir_mean.size()).squeeze(-1)\n z_d = F.softmax(self.seq_z_dir_mean + torch.sqrt(self.seq_z_dir_var) * normal, dim=-1).view(-1, self.r_n_dir * self.r_dir_dim)\n\n self.seq_z = torch.cat([z, z_c, z_d], dim=-1)\n\n if compute_for_next:\n z, z_c, z_d = ptu.FloatTensor(), ptu.FloatTensor(), ptu.FloatTensor()\n if self.r_cat_dim > 0:\n gumbel = torch.distributions.Gumbel(ptu.FloatTensor([0]), ptu.FloatTensor([1.0])).sample(self.seq_z_next_cat.size()).squeeze(-1)\n log_z = torch.log(self.seq_z_next_cat + eps)\n logit = (log_z + gumbel) / self.temperature\n z = F.softmax(logit, dim=1).view(-1, self.r_n_cat * self.r_cat_dim)\n if self.r_cont_dim > 0:\n normal = torch.distributions.Normal(ptu.FloatTensor([0.]), ptu.FloatTensor([1.])).sample(self.seq_z_next_cont_mean.size()).squeeze(-1)\n z_c = self.seq_z_next_cont_mean + torch.sqrt(self.seq_z_next_cont_var) * normal\n if self.r_dir_dim > 0:\n if self.r_constraint == 'dirichlet':\n z_d = torch.distributions.Dirichlet(self.seq_z_next_dir).rsample().view(-1, self.r_n_dir * self.r_dir_dim)\n elif self.r_constraint == 'logitnormal':\n normal = torch.distributions.Normal(ptu.FloatTensor([0.]), ptu.FloatTensor([1.])).sample(self.seq_z_next_dir_mean.size()).squeeze(-1)\n z_d = F.softmax(self.seq_z_next_dir_mean + torch.sqrt(self.seq_z_next_dir_var) * normal, dim=-1).view(-1, self.r_n_dir * self.r_dir_dim)\n\n self.seq_z_next = torch.cat([z, z_c, z_d], dim=-1)\n else:\n self.seq_z_next = None\n\n def infer_step_posterior(self, step, resample): \n num_tasks = 1\n traj_batch = step.shape[0]\n params = self.recurrent_context_encoder(step.view(num_tasks, traj_batch, -1))\n if resample:\n if self.r_constraint == 'logitnormal':\n params = params.view(num_tasks, traj_batch, self.r_cont_dim * 2 + self.r_n_cat * self.r_cat_dim + self.r_n_dir * self.r_dir_dim * 2)\n else:\n params = params.view(num_tasks, traj_batch, self.r_cont_dim * 2 + self.r_n_cat * self.r_cat_dim + self.r_n_dir * self.r_dir_dim)\n\n if self.r_cat_dim > 0:\n # params_disc = params[..., :self.r_n_cat * self.r_cat_dim]\n seq_z_cat = params[..., :self.r_n_cat * self.r_cat_dim]\n self.seq_z_cat = F.softmax(seq_z_cat.view(num_tasks * traj_batch * self.r_n_cat, self.r_cat_dim), dim=-1)\n\n if self.r_cont_dim > 0:\n params_cont = params[..., self.r_n_cat*self.r_cat_dim:self.r_n_cat*self.r_cat_dim+2*self.r_cont_dim]\n seq_z_cont_mean = params_cont[..., :self.r_cont_dim]\n seq_z_cont_var = F.softplus(params_cont[..., self.r_cont_dim:])\n self.seq_z_cont_mean = seq_z_cont_mean.view(num_tasks * traj_batch, self.r_cont_dim)\n self.seq_z_cont_var = seq_z_cont_var.view(num_tasks * traj_batch, self.r_cont_dim)\n\n if self.r_dir_dim > 0 and self.r_constraint == 'logitnormal':\n params_dir = params[..., self.r_n_cat * self.r_cat_dim + self.r_cont_dim * 2:]\n seq_z_dir_mean = params_dir[..., :self.r_n_dir * self.r_dir_dim]\n seq_z_dir_var = F.softplus(params_dir[..., self.r_dir_dim * self.r_n_dir:])\n self.seq_z_dir_mean = seq_z_dir_mean.view(num_tasks * traj_batch * self.r_n_dir, self.r_dir_dim)\n self.seq_z_dir_var = seq_z_dir_var.view(num_tasks * traj_batch * self.r_n_dir, self.r_dir_dim)\n\n if self.r_dir_dim > 0 and self.r_constraint == 'dirichlet':\n seq_z_dir = params[..., self.r_n_cat * self.r_cat_dim + self.r_cont_dim * 2:]\n self.seq_z_dir = F.softplus(seq_z_dir.view(num_tasks * traj_batch * self.r_n_dir, self.r_dir_dim))\n\n self.sample_sequence_z()\n\n def clear_sequence_z(self, num_tasks=1, batch_size=1, traj_batch_size=1): \n assert self.recurrent_context_encoder != None\n if self.r_cat_dim > 0:\n self.seq_z_cat = ptu.ones(num_tasks * batch_size * self.r_n_cat, self.r_cat_dim) / self.r_cat_dim\n self.seq_z_next_cat = None\n if self.r_cont_dim > 0:\n self.seq_z_cont_mean = ptu.zeros(num_tasks * batch_size, self.r_cont_dim)\n self.seq_z_cont_var = ptu.ones(num_tasks * batch_size, self.r_cont_dim)\n self.seq_z_next_cont_mean = None\n self.seq_z_next_cont_var = None\n if self.r_dir_dim > 0:\n if self.r_constraint == 'logitnormal':\n self.seq_z_dir_mean = ptu.zeros(num_tasks * batch_size * self.r_n_dir, self.r_dir_dim)\n self.seq_z_dir_var = ptu.ones(num_tasks * batch_size * self.r_n_dir, self.r_dir_dim) * self.r_var\n self.seq_z_next_dir_mean = None\n self.seq_z_next_dir_var = None\n elif self.r_constraint == 'dirichlet':\n self.seq_z_dir = ptu.ones(num_tasks * batch_size * self.r_n_dir, self.r_dir_dim) * self.r_alpha\n self.seq_z_next_dir = None\n\n self.sample_sequence_z()\n self.recurrent_context_encoder.reset(num_tasks*traj_batch_size)\n"
] |
[
[
"torch.mean",
"torch.nn.functional.softmax",
"torch.max",
"numpy.sqrt",
"torch.cat",
"torch.zeros",
"torch.sum",
"torch.ones",
"torch.sqrt",
"torch.reshape",
"torch.reciprocal",
"torch.distributions.Dirichlet",
"torch.nn.functional.softplus",
"torch.exp",
"torch.log",
"numpy.array",
"torch.nn.functional.normalize",
"torch.distributions.kl.kl_divergence",
"torch.clamp"
]
] |
leokb24/BIMPM-pytorch
|
[
"0f3267e33b26a63732f5a7487a74570cf9df9aed"
] |
[
"model/backup.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom pdb import set_trace as bp\n\nclass BiMPM(nn.Module):\n\n def __init__(self, args, data):\n super(BiMPM, self).__init__()\n\n self.args = args\n self.d = self.args.word_dim + int(self.args.use_char_emb) * self.args.char_hidden_size\n self.l = self.args.num_perspective\n\n # self.c_embed_size = int(args['--char-embed-size'])\n # self.w_embed_size = int(args['--embed-size'])\n # self.l = int(args['--perspective'])\n # self.dropout_val = float(args['--dropout'])\n # self.bi_hidden = int(args['--bi-hidden-size'])\n # self.char_hidden = int(args['--char-hidden-size'])\n # self.rnn_type = args['--rnn-type']\n # self.char_layer_size = int(args['--char-lstm-layers'])\n # self.context_layer_size = int(args['--bilstm-layers'])\n # self.char_inp = vocab + 100\n # self.classes = class_size\n # self.char_use = args['--char']\n\n self.wembeddings = nn.Embedding(num_embeddings=args.word_vocab_size,\n embedding_dim=args.word_dim)\n\n self.wembeddings.weight.data.copy_(data.TEXT.vocab.vectors)\n self.dropout = nn.Dropout(p=self.args.dropout)\n\n if self.char_use:\n self.char_embedding = nn.Embedding(args.char_vocab_size, args.char_dim, padding_idx=0)\n\n self.char_lstm = nn.LSTM(input_size=self.args.char_dim,\n hidden_size=self.args.char_hidden_size,\n num_layers=1,\n bidirectional=False,\n batch_first=True,\n dropout=self.args.dropout)\n\n self.context_lstm = nn.LSTM(input_size=self.d,\n hidden_size=self.args.hidden_size,\n num_layers=1,\n bidirectional=True,\n batch_first=True,\n dropout=self.args.dropout)\n\n # ----- Matching Layer -----\n # w_i: (1, 1, hidden_size, l)\n for i in range(1, 9):\n setattr(self, f'w{i}', nn.Parameter(torch.rand(self.l, self.args.hidden_size)))\n\n # ----- Aggregation Layer -----\n self.aggregation_lstm = nn.LSTM(input_size=self.l * 8,\n hidden_size=self.args.hidden_size,\n num_layers=1,\n bidirectional=True,\n batch_first=True,\n dropout=self.args.dropout)\n\n # ----- Prediction Layer -----\n self.ff1 = nn.Linear(self.args.hidden_size * 4, self.args.hidden_size * 2)\n self.ff2 = nn.Linear(self.args.hidden_size * 2, self.class_size)\n\n self.init_weights()\n\n def init_weights(self):\n for param in list(self.parameters()):\n nn.init.uniform_(param, -0.01, 0.01)\n\n def init_char_embed(self, c1, c2):\n c1_embed = self.char_embedding(c1)\n char_p1 = self.char_lstm(c1_embed)\n c2_embed = self.char_embedding(c2)\n char_p2 = self.char_lstm(c2_embed)\n\n # (batch, char_hidden_size * num_directions)\n return char_p1[0][:, -1], char_p2[0][:, -1]\n\n def cosine_similarity(self, prod, norm):\n # As set in PyTorch documentation\n eps = 1e-8\n norm = norm * (norm > eps).float() + eps * (norm <= eps).float()\n\n return prod / norm\n\n def full_matching(self, p1, p2, w_matrix):\n \"\"\"\n :param p1: (batch, seq_len, hidden_size)\n :param p2: (batch, hidden_size)\n :param w_matrix: (l, hidden_size)\n :return: (batch, seq_len, l)\n \"\"\"\n # (1, 1, hidden_size, l)\n w_matrix = w_matrix.transpose(1, 0).unsqueeze(0).unsqueeze(0)\n\n # (batch, seq_len, hidden_size, l)\n p1 = torch.stack([p1] * self.l, dim=3)\n p1 = w_matrix * p1\n\n p1_seq_len = p1.size(1)\n p2 = torch.stack([p2] * p1_seq_len, dim=1)\n p2 = torch.stack([p2] * self.l, dim=3)\n p2 = w_matrix * p2\n result = F.cosine_similarity(p1, p2, dim=2)\n return result\n\n def maxpool_matching(self, p1, p2, w_matrix):\n \"\"\"\n :param p1: (batch, seq_len, hidden_size)\n :param p2: (batch, seq_len, hidden_size)\n :param w_matrix: (l, hidden_size)\n :return: (batch, seq, l)\n \"\"\"\n\n # (1, l, 1, hidden_size)\n w_matrix = w_matrix.unsqueeze(0).unsqueeze(2)\n # (batch, l, seq_len, hidden_size)\n p1 = torch.stack([p1] * self.l, dim=1)\n p1 = w_matrix * p1\n\n p2 = torch.stack([p2] * self.l, dim=1)\n p2 = w_matrix * p2\n\n # (batch, l, seq_len, 1)\n p1_norm = p1.norm(p=2, dim=3, keepdim=True)\n p2_norm = p2.norm(p=2, dim=2, keepdim=True)\n\n # (batch, l, seq1_len, seq2_len)\n full_mat = torch.matmul(p1, p2.transpose(2, 3))\n deno_mat = torch.matmul(p1_norm, p2_norm.transpose(2, 3))\n\n # (batch, seq1, seq2, l)\n result = self.cosine_similarity(full_mat, deno_mat).permute(0, 2, 3, 1)\n return result\n\n def attentive_matching(self, p1, p2, w_matrix_att, w_matrix_max):\n # Perform both attentive types of matching together\n p1_norm = p1.norm(p=2, dim=2, keepdim=True)\n p2_norm = p2.norm(p=2, dim=2, keepdim=True)\n\n full_mat = torch.matmul(p1.permute(1, 0, 2), p2.permute(1, 2, 0))\n deno_mat = torch.matmul(p1_norm.permute(1, 0, 2), p2_norm.permute(1, 2, 0))\n alpha_mat = self.cosine_similarity(full_mat, deno_mat)\n\n _, max_index = torch.max(alpha_mat, dim=2)\n max_index = torch.stack([max_index] * self.bi_hidden, dim=2)\n\n h_mat = torch.bmm(alpha_mat, p2.transpose(1, 0))\n alpha_mat = alpha_mat.sum(dim=2, keepdim=True)\n resultant = h_mat / alpha_mat\n\n v1 = resultant.transpose(1, 0).unsqueeze(-1) * w_matrix_att\n v2 = p1.unsqueeze(-1) * w_matrix_att\n result_match = F.cosine_similarity(v1, v2, dim=2)\n\n out_mat = torch.gather(p2.transpose(1, 0), 1, max_index)\n v1 = out_mat.transpose(1, 0).unsqueeze(-1) * w_matrix_max\n v2 = p1.unsqueeze(-1) * w_matrix_max\n result_max = F.cosine_similarity(v1, v2, dim=2)\n\n return result_match, result_max\n\n def forward(self, **kwargs):\n\n p1_input = self.wembeddings(kwargs['p'])\n p2_input = self.wembeddings(kwargs['h'])\n\n if self.args.use_char_emb:\n char_p1, char_p2 = self.init_char_embed(kwargs['char_p'], kwargs['char_h'])\n dim1, dim2 = kwargs['p'].size()\n char_p1 = char_p1.view(dim1, dim2, -1)\n dim1, dim2 = kwargs['h'].size()\n char_p2 = char_p2.view(dim1, dim2, -1)\n p1_input = torch.cat((p1_input, char_p1), -1)\n p2_input = torch.cat((p2_input, char_p2), -1)\n\n context1_full, (context1_lh, _) = self.context_lstm(p1_input)\n context2_full, (context2_lh, _) = self.context_lstm(p2_input)\n\n else:\n context1_full, (context1_lh, _) = self.context_lstm(p1_input)\n context2_full, (context2_lh, _) = self.context_lstm(p2_input)\n\n # (batch, seq_len, hidden_size)\n context1_forw, context1_back = torch.split(context1_full, self.args.hidden_size, -1)\n # (batch, hidden_size)\n # context1_lh_forw, context1_lh_back = context1_lh[0], context1_lh[1]\n context1_lh_forw, context1_lh_back = context1_forw[:, -1], context1_back[:, -1]\n\n context2_forw, context2_back = torch.split(context2_full, self.args.hidden_size, -1)\n context2_lh_forw, context2_lh_back = context2_forw[:, -1], context2_lh[:, -1]\n\n # 4 tensors from forward and backward matching (full matching)\n match_p1_forw = self.full_matching(context1_forw, context2_lh_forw, self.w1)\n match_p1_back = self.full_matching(context1_back, context2_lh_back, self.w2)\n match_p2_forw = self.full_matching(context2_forw, context1_lh_forw, self.w1)\n match_p2_back = self.full_matching(context2_back, context1_lh_back, self.w2)\n\n # 4 tensors from forward and backward matching (max-pooling matching)\n maxm_forw = self.maxpool_matching(context1_forw, context2_forw, self.w3)\n maxm_back = self.maxpool_matching(context1_back, context2_back, self.w4)\n maxm_p1_forw, _ = maxm_forw.max(dim=2)\n maxm_p1_back, _ = maxm_back.max(dim=2)\n maxm_p2_forw, _ = maxm_forw.max(dim=1)\n maxm_p2_back, _ = maxm_back(dim=1)\n\n # 8 tensors from the forward and backward attentive matching and attentive max\n att_p1_forw, attm_p1_forw = self.attentive_matching(context1_forw, context2_forw, self.w5, self.w7)\n att_p1_back, attm_p1_back = self.attentive_matching(context1_back, context2_back, self.w6, self.w8)\n att_p2_forw, attm_p2_forw = self.attentive_matching(context2_forw, context1_forw, self.w5, self.w7)\n att_p2_back, attm_p2_back = self.attentive_matching(context2_back, context1_back, self.w6, self.w8)\n\n aggr_p1 = torch.cat([match_p1_forw, match_p1_back, maxm_p1_forw, maxm_p1_back,\n att_p1_forw, att_p1_back, attm_p1_forw, attm_p1_back], dim=2)\n\n aggr_p2 = torch.cat([match_p2_forw, match_p2_back, maxm_p2_forw, maxm_p2_back,\n att_p2_forw, att_p2_back, attm_p2_forw, attm_p2_back], dim=2)\n\n aggr_p1 = self.dropout(aggr_p1)\n aggr_p2 = self.dropout(aggr_p2)\n\n _, (p1_output, _) = self.aggregation_lstm(aggr_p1)\n _, (p2_output, _) = self.aggregation_lstm(aggr_p2)\n\n output = torch.cat([torch.cat([p1_output[0, :, :], p1_output[1, :, :]], dim=-1),\n torch.cat([p2_output[0, :, :], p2_output[1, :, :]], dim=-1)], dim=-1)\n\n output = self.dropout(output)\n output = torch.tanh(self.ff1(output))\n output = self.dropout(output)\n output = self.ff2(output)\n\n return output\n"
] |
[
[
"torch.nn.Dropout",
"torch.nn.init.uniform_",
"torch.max",
"torch.nn.LSTM",
"torch.cat",
"torch.nn.Embedding",
"torch.nn.Linear",
"torch.nn.functional.cosine_similarity",
"torch.rand",
"torch.split",
"torch.stack"
]
] |
googleinterns/deep-3d-reconstruction
|
[
"cd4e1ddb5a410b3629341fad21582c9d7ce07de8"
] |
[
"scannet/utils/pc_utils.py"
] |
[
"\"\"\" Utility functions for processing point clouds.\n\nAuthor: Charles R. Qi, Hao Su\nDate: November 2016\n\"\"\"\n\nimport os\nimport sys\nimport warnings\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\n\n# Draw point cloud\nfrom eulerangles import euler2mat\n\n# Point cloud IO\nimport numpy as np\nfrom plyfile import PlyData, PlyElement\n\n \n# ----------------------------------------\n# Point Cloud/Volume Conversions\n# ----------------------------------------\n\ndef point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flatten=True):\n \"\"\" Input is BxNx3 batch of point cloud\n Output is Bx(vsize^3)\n \"\"\"\n vol_list = []\n for b in range(point_clouds.shape[0]):\n vol = point_cloud_to_volume(np.squeeze(point_clouds[b,:,:]), vsize, radius)\n if flatten:\n vol_list.append(vol.flatten())\n else:\n vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0))\n if flatten:\n return np.vstack(vol_list)\n else:\n return np.concatenate(vol_list, 0)\n\n\ndef point_cloud_to_volume(points, vsize, radius=1.0):\n \"\"\" input is Nx3 points.\n output is vsize*vsize*vsize\n assumes points are in range [-radius, radius]\n \"\"\"\n vol = np.zeros((vsize,vsize,vsize))\n voxel = 2*radius/float(vsize)\n locations = (points + radius)/voxel\n locations = locations.astype(int)\n vol[locations[:,0],locations[:,1],locations[:,2]] = 1.0\n return vol\n\n#a = np.zeros((16,1024,3))\n#print point_cloud_to_volume_batch(a, 12, 1.0, False).shape\n\ndef volume_to_point_cloud(vol):\n \"\"\" vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize\n return Nx3 numpy array.\n \"\"\"\n vsize = vol.shape[0]\n assert(vol.shape[1] == vsize and vol.shape[1] == vsize)\n points = []\n for a in range(vsize):\n for b in range(vsize):\n for c in range(vsize):\n if vol[a,b,c] == 1:\n points.append(np.array([a,b,c]))\n if len(points) == 0:\n return np.zeros((0,3))\n points = np.vstack(points)\n return points\n\ndef point_cloud_to_volume_v2_batch(point_clouds, vsize=12, radius=1.0, num_sample=128):\n \"\"\" Input is BxNx3 a batch of point cloud\n Output is BxVxVxVxnum_samplex3\n Added on Feb 19\n \"\"\"\n vol_list = []\n for b in range(point_clouds.shape[0]):\n vol = point_cloud_to_volume_v2(point_clouds[b,:,:], vsize, radius, num_sample)\n vol_list.append(np.expand_dims(vol, 0))\n return np.concatenate(vol_list, 0)\n\ndef point_cloud_to_volume_v2(points, vsize, radius=1.0, num_sample=128):\n \"\"\" input is Nx3 points\n output is vsize*vsize*vsize*num_sample*3\n assumes points are in range [-radius, radius]\n samples num_sample points in each voxel, if there are less than\n num_sample points, replicate the points\n Added on Feb 19\n \"\"\"\n vol = np.zeros((vsize,vsize,vsize,num_sample,3))\n voxel = 2*radius/float(vsize)\n locations = (points + radius)/voxel\n locations = locations.astype(int)\n loc2pc = {}\n for n in range(points.shape[0]):\n loc = tuple(locations[n,:])\n if loc not in loc2pc:\n loc2pc[loc] = []\n loc2pc[loc].append(points[n,:])\n #print loc2pc\n\n for i in range(vsize):\n for j in range(vsize):\n for k in range(vsize):\n if (i,j,k) not in loc2pc:\n vol[i,j,k,:,:] = np.zeros((num_sample,3))\n else:\n pc = loc2pc[(i,j,k)] # a list of (3,) arrays\n pc = np.vstack(pc) # kx3\n # Sample/pad to num_sample points\n if pc.shape[0]>num_sample:\n choices = np.random.choice(pc.shape[0], num_sample, replace=False)\n pc = pc[choices,:]\n elif pc.shape[0]<num_sample:\n pc = np.lib.pad(pc, ((0,num_sample-pc.shape[0]),(0,0)), 'edge')\n # Normalize\n pc_center = (np.array([i,j,k])+0.5)*voxel - radius\n #print 'pc center: ', pc_center\n pc = (pc - pc_center) / voxel # shift and scale\n vol[i,j,k,:,:] = pc \n #print (i,j,k), vol[i,j,k,:,:]\n return vol\n\ndef point_cloud_to_image_batch(point_clouds, imgsize, radius=1.0, num_sample=128):\n \"\"\" Input is BxNx3 a batch of point cloud\n Output is BxIxIxnum_samplex3\n Added on Feb 19\n \"\"\"\n img_list = []\n for b in range(point_clouds.shape[0]):\n img = point_cloud_to_image(point_clouds[b,:,:], imgsize, radius, num_sample)\n img_list.append(np.expand_dims(img, 0))\n return np.concatenate(img_list, 0)\n\n\ndef point_cloud_to_image(points, imgsize, radius=1.0, num_sample=128):\n \"\"\" input is Nx3 points\n output is imgsize*imgsize*num_sample*3\n assumes points are in range [-radius, radius]\n samples num_sample points in each pixel, if there are less than\n num_sample points, replicate the points\n Added on Feb 19\n \"\"\"\n img = np.zeros((imgsize, imgsize, num_sample, 3))\n pixel = 2*radius/float(imgsize)\n locations = (points[:,0:2] + radius)/pixel # Nx2\n locations = locations.astype(int)\n loc2pc = {}\n for n in range(points.shape[0]):\n loc = tuple(locations[n,:])\n if loc not in loc2pc:\n loc2pc[loc] = []\n loc2pc[loc].append(points[n,:])\n for i in range(imgsize):\n for j in range(imgsize):\n if (i,j) not in loc2pc:\n img[i,j,:,:] = np.zeros((num_sample,3))\n else:\n pc = loc2pc[(i,j)]\n pc = np.vstack(pc)\n if pc.shape[0]>num_sample:\n choices = np.random.choice(pc.shape[0], num_sample, replace=False)\n pc = pc[choices,:]\n elif pc.shape[0]<num_sample:\n pc = np.lib.pad(pc, ((0,num_sample-pc.shape[0]),(0,0)), 'edge')\n pc_center = (np.array([i,j])+0.5)*pixel - radius\n pc[:,0:2] = (pc[:,0:2] - pc_center)/pixel\n img[i,j,:,:] = pc\n return img\n\ndef surface_normal_area(face, vertex):\n normals = list()\n areas = list()\n vertex_to_face = [[] for i in range(len(vertex))]\n for fid, f in enumerate(face):\n f = f[0]\n va, vb, vc = f[0], f[1], f[2]\n vertex_to_face[va].append(fid)\n vertex_to_face[vb].append(fid)\n vertex_to_face[vc].append(fid)\n\n a = vertex[vb] - vertex[va]\n b = vertex[vc] - vertex[va]\n normal = np.cross(a, b)\n area = np.dot(normal, normal) / 2.0\n normalized_normal = normal / np.linalg.norm(normal)\n normals.append(normalized_normal)\n areas.append(area)\n return np.array(normals), np.array(areas), vertex_to_face\n\n\ndef vertex_normal(vertex_to_face, normal, areas):\n vertex_normals = list()\n num_vertex = len(vertex_to_face)\n for vid in range(num_vertex):\n adj_faces = vertex_to_face[vid]\n if len(adj_faces)==0: # single point with no adjancy points\n vertex_normals.append([0,0,1])\n continue\n adj_faces_area = np.expand_dims(np.array(areas[adj_faces]), axis=-1)\n adj_faces_normal = np.array(normal[adj_faces])\n avg_normal = (adj_faces_normal * adj_faces_area) / np.sum(adj_faces_area)\n avg_normal = np.sum(avg_normal, axis=0)\n normalized_normal = avg_normal / np.linalg.norm(avg_normal)\n #if np.isclose(np.linalg.norm(avg_normal), 0.0):\n # print('-------------------')\n # print(len(adj_faces))\n # print('-------------------')\n # print('-------------------')\n # print(adj_faces_area.shape, adj_faces_normal.shape, adj_faces_area, adj_faces_normal) \n # print(adj_faces_normal * adj_faces_area)\n # print(np.sum(adj_faces_area))\n # print((adj_faces_normal * adj_faces_area) / np.sum(adj_faces_area))\n # print(avg_normal, np.linalg.norm(avg_normal), adj_faces_area, adj_faces_normal) \n # print('-------------------')\n vertex_normals.append(normalized_normal)\n return np.array(vertex_normals)\n\n\n\n \n \n# ----------------------------------------\n# Point cloud IO\n# ----------------------------------------\n\ndef read_ply(filename):\n \"\"\" read XYZ point cloud from filename PLY file \"\"\"\n plydata = PlyData.read(filename)\n pc = plydata['vertex'].data\n pc_array = np.array([[x, y, z] for x,y,z in pc])\n return pc_array\n\ndef read_ply_rgba(filename):\n \"\"\" read XYZRGBA point cloud from filename PLY file \"\"\"\n plydata = PlyData.read(filename)\n pc = plydata['vertex'].data\n pc_array = np.array([[x, y, z,r,g,b,a] for x,y,z,r,g,b,a in pc])\n return pc_array\n\ndef read_ply_rgba_normal(filename):\n \"\"\" read XYZRGBA and NxNyNz point cloud from filename PLY file \"\"\"\n plydata = PlyData.read(filename)\n pc = plydata['vertex'].data\n pc_array = np.array([[x, y, z,r,g,b,a] for x,y,z,r,g,b,a in pc])\n face = plydata['face'].data\n f_n, f_a, v_f = surface_normal_area(face, pc_array[:, 0:3])\n v_n = vertex_normal(v_f, f_n, f_a)\n pc_array = np.concatenate((pc_array, v_n), axis=-1)\n return pc_array\n\ndef write_ply(points, filename, text=True):\n \"\"\" input: Nx3, write points to filename as PLY format. \"\"\"\n points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]\n vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])\n el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])\n PlyData([el], text=text).write(filename)\n\ndef write_ply_rgb(points, colors, filename, text=True):\n \"\"\" input: Nx3, Nx3 write points and colors to filename as PLY format. \"\"\"\n num_points = len(points)\n assert len(colors) == num_points\n\n points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]\n colors = [(colors[i,0], colors[i,1], colors[i,2]) for i in range(colors.shape[0])]\n vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])\n color = np.array(colors, dtype=[('red', 'u1'), ('green', 'u1'),('blue', 'u1')])\n \n \n vertex_all = np.empty(num_points, vertex.dtype.descr + color.dtype.descr)\n \n for prop in vertex.dtype.names:\n vertex_all[prop] = vertex[prop]\n \n for prop in color.dtype.names:\n vertex_all[prop] = color[prop]\n\n el = PlyElement.describe(vertex_all, 'vertex', comments=['vertices'])\n PlyData([el], text=text).write(filename)\n\ndef write_ply_rgb_normal(points, colors, normals, filename, text=True):\n \"\"\" input: Nx3, Nx3, Nx3 write points and colors to filename as PLY format. \"\"\"\n num_points = len(points)\n assert len(colors) == num_points\n\n points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]\n colors = [(colors[i,0], colors[i,1], colors[i,2]) for i in range(colors.shape[0])]\n normals = [(normals[i,0], normals[i,1], normals[i,2]) for i in range(normals.shape[0])]\n vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])\n color = np.array(colors, dtype=[('red', 'u1'), ('green', 'u1'),('blue', 'u1')])\n normal = np.array(normals, dtype=[('nx', 'f4'), ('ny', 'f4'),('nz', 'f4')])\n \n \n vertex_all = np.empty(num_points, vertex.dtype.descr + color.dtype.descr + normal.dtype.descr)\n \n for prop in vertex.dtype.names:\n vertex_all[prop] = vertex[prop]\n \n for prop in color.dtype.names:\n vertex_all[prop] = color[prop]\n\n for prop in normal.dtype.names:\n vertex_all[prop] = normal[prop]\n\n el = PlyElement.describe(vertex_all, 'vertex', comments=['vertices'])\n PlyData([el], text=text).write(filename)\n# ----------------------------------------\n# Simple Point cloud and Volume Renderers\n# ----------------------------------------\n\ndef draw_point_cloud(input_points, canvasSize=500, space=200, diameter=25,\n xrot=0, yrot=0, zrot=0, switch_xyz=[0,1,2], normalize=True):\n \"\"\" Render point cloud to image with alpha channel.\n Input:\n points: Nx3 numpy array (+y is up direction)\n Output:\n gray image as numpy array of size canvasSizexcanvasSize\n \"\"\"\n image = np.zeros((canvasSize, canvasSize))\n if input_points is None or input_points.shape[0] == 0:\n return image\n\n points = input_points[:, switch_xyz]\n M = euler2mat(zrot, yrot, xrot)\n points = (np.dot(M, points.transpose())).transpose()\n\n # Normalize the point cloud\n # We normalize scale to fit points in a unit sphere\n if normalize:\n centroid = np.mean(points, axis=0)\n points -= centroid\n furthest_distance = np.max(np.sqrt(np.sum(abs(points)**2,axis=-1)))\n points /= furthest_distance\n\n # Pre-compute the Gaussian disk\n radius = (diameter-1)/2.0\n disk = np.zeros((diameter, diameter))\n for i in range(diameter):\n for j in range(diameter):\n if (i - radius) * (i-radius) + (j-radius) * (j-radius) <= radius * radius:\n disk[i, j] = np.exp((-(i-radius)**2 - (j-radius)**2)/(radius**2))\n mask = np.argwhere(disk > 0)\n dx = mask[:, 0]\n dy = mask[:, 1]\n dv = disk[disk > 0]\n \n # Order points by z-buffer\n zorder = np.argsort(points[:, 2])\n points = points[zorder, :]\n points[:, 2] = (points[:, 2] - np.min(points[:, 2])) / (np.max(points[:, 2] - np.min(points[:, 2])))\n max_depth = np.max(points[:, 2])\n \n for i in range(points.shape[0]):\n j = points.shape[0] - i - 1\n x = points[j, 0]\n y = points[j, 1]\n xc = canvasSize/2 + (x*space)\n yc = canvasSize/2 + (y*space)\n xc = int(np.round(xc))\n yc = int(np.round(yc))\n \n px = dx + xc\n py = dy + yc\n \n image[px, py] = image[px, py] * 0.7 + dv * (max_depth - points[j, 2]) * 0.3\n \n image = image / np.max(image)\n return image\n\ndef point_cloud_three_views(points):\n \"\"\" input points Nx3 numpy array (+y is up direction).\n return an numpy array gray image of size 500x1500. \"\"\" \n # +y is up direction\n # xrot is azimuth\n # yrot is in-plane\n # zrot is elevation\n img1 = draw_point_cloud(points, zrot=110/180.0*np.pi, xrot=45/180.0*np.pi, yrot=0/180.0*np.pi)\n img2 = draw_point_cloud(points, zrot=70/180.0*np.pi, xrot=135/180.0*np.pi, yrot=0/180.0*np.pi)\n img3 = draw_point_cloud(points, zrot=180.0/180.0*np.pi, xrot=90/180.0*np.pi, yrot=0/180.0*np.pi)\n image_large = np.concatenate([img1, img2, img3], 1)\n return image_large\n\n\ndef point_cloud_three_views_demo():\n \"\"\" Demo for draw_point_cloud function \"\"\"\n from PIL import Image\n points = read_ply('../third_party/mesh_sampling/piano.ply')\n im_array = point_cloud_three_views(points)\n img = Image.fromarray(np.uint8(im_array*255.0))\n img.save('piano.jpg')\n\nif __name__==\"__main__\":\n point_cloud_three_views_demo()\n\n\ndef pyplot_draw_point_cloud(points, output_filename):\n \"\"\" points is a Nx3 numpy array \"\"\"\n import matplotlib.pyplot as plt\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(points[:,0], points[:,1], points[:,2])\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n #savefig(output_filename)\n\ndef pyplot_draw_volume(vol, output_filename):\n \"\"\" vol is of size vsize*vsize*vsize\n output an image to output_filename\n \"\"\"\n points = volume_to_point_cloud(vol)\n pyplot_draw_point_cloud(points, output_filename)\n\ndef write_ply_color(points, labels, out_filename, num_classes=None, colors=None):\n \"\"\" Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ file \"\"\"\n import matplotlib.pyplot as pyplot\n labels = labels.astype(int)\n N = points.shape[0]\n if num_classes is None:\n num_classes = np.max(labels)+1\n print(num_classes)\n else:\n assert(num_classes>np.max(labels))\n if colors is None:\n #colors = [pyplot.cm.hsv(i/float(num_classes)) for i in range(num_classes)]\n colors = [pyplot.cm.jet(i/float(num_classes)) for i in range(num_classes)]\n fout = open(out_filename, 'w')\n for i in range(N):\n c = colors[labels[i]]\n fout.write('v %f %f %f %d %d %d\\n' % (points[i,0],points[i,1],points[i,2],c[0],c[1],c[2]))\n fout.close()\n"
] |
[
[
"numpy.dot",
"numpy.expand_dims",
"numpy.squeeze",
"numpy.concatenate",
"numpy.max",
"numpy.round",
"numpy.mean",
"numpy.cross",
"numpy.exp",
"numpy.uint8",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.lib.pad",
"numpy.min",
"numpy.random.choice",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.linalg.norm",
"numpy.empty",
"numpy.argwhere",
"numpy.vstack"
]
] |
KrishPro/Reverse-Seqs
|
[
"1729170c93693a4b0b96d55a573b722772c9f8af"
] |
[
"model.py"
] |
[
"\"\"\"\nWritten by KrishPro @ KP\n\"\"\"\n\nimport torch.nn as nn\nimport torch\nimport math\n\n\nclass PositionalEncoding(nn.Module):\n def __init__(self,\n emb_size: int,\n dropout: float,\n maxlen: int = 5000):\n super(PositionalEncoding, self).__init__()\n den = torch.exp(- torch.arange(0, emb_size, 2)* math.log(10000) / emb_size)\n pos = torch.arange(0, maxlen).reshape(maxlen, 1)\n pos_embedding = torch.zeros((maxlen, emb_size))\n pos_embedding[:, 0::2] = torch.sin(pos * den)\n pos_embedding[:, 1::2] = torch.cos(pos * den)\n pos_embedding = pos_embedding.unsqueeze(-2)\n\n self.dropout = nn.Dropout(dropout)\n self.register_buffer('pos_embedding', pos_embedding)\n\n def forward(self, token_embedding: torch.Tensor):\n return self.dropout(token_embedding + self.pos_embedding[:token_embedding.size(0), :])\n\nclass EmbeddingLayer(nn.Module):\n def __init__(self, vocab_size: int, emb_size: int, dropout: float) -> None:\n super().__init__()\n \n self.emb_size = emb_size\n self.embedding_layer = nn.Embedding(vocab_size, self.emb_size)\n self.positional_encoding = PositionalEncoding(self.emb_size, dropout)\n \n def forward(self, indices: torch.Tensor):\n assert indices.dtype == torch.long, f\"Indices to embedding layer must be of dtype torch.long, Currently dtype is {indices.dtype}\"\n embeddings = self.embedding_layer(indices) * math.sqrt(self.emb_size)\n return self.positional_encoding(embeddings)\n\nclass Transformer(nn.Module):\n def __init__(self, d_model: int, vocab_size: int, nhead: int, num_encoder_layers: int, num_decoder_layers: int, dim_feedforward: int, dropout: float) -> None:\n super().__init__()\n self.embedding_layer = EmbeddingLayer(vocab_size, d_model, dropout)\n self.transformer = nn.Transformer(d_model, nhead, num_encoder_layers, num_decoder_layers, dim_feedforward, dropout=dropout)\n self.classifier = nn.Linear(d_model, vocab_size)\n\n @staticmethod\n def generate_square_subsequent_mask(sz: int) -> torch.Tensor:\n r\"\"\"Generate a square mask for the sequence. The masked positions are filled with float('-inf').\n Unmasked positions are filled with float(0.0).\n \"\"\"\n return torch.triu(torch.full((sz, sz), float('-inf')), diagonal=1)\n\n def forward(self, src: torch.Tensor, tar: torch.Tensor):\n T, _ = tar.shape\n\n tar_mask: torch.Tensor = self.generate_square_subsequent_mask(T).to(tar.device)\n\n src = self.embedding_layer(src)\n tar = self.embedding_layer(tar)\n\n transformer_out: torch.Tensor = self.transformer(src, tar, tgt_mask=tar_mask)\n\n classifier_out: torch.Tensor = self.classifier(transformer_out)\n return classifier_out\n"
] |
[
[
"torch.nn.Dropout",
"torch.sin",
"torch.zeros",
"torch.nn.Transformer",
"torch.nn.Embedding",
"torch.nn.Linear",
"torch.arange",
"torch.cos"
]
] |
DehuiYan/tumorDetection
|
[
"58258c0ee769dff3db4c3577215de6197b3463c7"
] |
[
"src/detection.py"
] |
[
"#!/usr/bin/env python\n# coding=utf-8\n\nimport numpy as np\nimport os\nimport sys\nimport tensorflow as tf\n\nfrom collections import defaultdict\nfrom io import StringIO\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\n\n#matplotlib inline\n#sys.path.append(\"..\")\n\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as vis_util\n\ndef load_image_into_numpy_array(image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)\n\n\ndef region_detection(PATH_TO_CKPT, PATH_TO_LABELS, NUM_CLASSES, image, score_thresh=0.5):\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\n categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n with detection_graph.as_default():\n with tf.Session(graph=detection_graph) as sess:\n #image = Image.open(PATH_TO_IMAGE)\n image_np = load_image_into_numpy_array(image)\n image_np_expanded = np.expand_dims(image_np, axis=0)\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n scores = detection_graph.get_tensor_by_name('detection_scores:0')\n classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n (boxes, scores, classes, num_detections) = sess.run(\n [boxes, scores, classes, num_detections],\n feed_dict = {image_tensor: image_np_expanded}\n )\n vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n min_score_thresh=score_thresh,\n use_normalized_coordinates=True,\n line_thickness=8\n )\n #plt.figure(figsize=(12,8))\n #plt.imshow(image_np)\n #im = Image.fromarray(image_np)\n #im.save('../testdata/'+str(cnt+1)+'.jpg')\n if np.squeeze(scores)[0] > score_thresh:\n flag = True\n else:\n flag = False\n #print np.squeeze(boxes), np.squeeze(scores), classes, num_detections\n return flag, image_np, np.squeeze(boxes), np.squeeze(scores)\n\nif __name__ == '__main__':\n PATH_TO_CKPT = '../../mydata/tfmodels/ssd_inception_v2/train/output_inference_graph.pb'\n PATH_TO_LABELS = '../../mydata/tfdata/pascal_label_map.pbtxt'\n PATH_TO_IMAGE = []\n rootdir = '../../mydata/VOCdevkit/VOC2007/JPEGImages'\n lists = os.listdir(rootdir)\n for i in range(len(lists)):\n path = os.path.join(rootdir, lists[i])\n PATH_TO_IMAGE.append(path)\n NUM_CLASSES = 1\n for image_file in PATH_TO_IMAGE:\n image = Image.open(image_file)\n region_detection(PATH_TO_CKPT, PATH_TO_LABELS, NUM_CLASSES, image, 0.5)\n\n"
] |
[
[
"tensorflow.Graph",
"tensorflow.import_graph_def",
"numpy.expand_dims",
"tensorflow.gfile.GFile",
"numpy.squeeze",
"tensorflow.Session",
"tensorflow.GraphDef"
]
] |
mdhillmancmcl/TheWorldAvatar-CMCL-Fork
|
[
"011aee78c016b76762eaf511c78fabe3f98189f4"
] |
[
"web/CO2WEB/agents/WHR_network_optimization_trim.py"
] |
[
"\n# coding: utf-8\n\n# In[26]:\n\nfrom rdflib import Graph\n\nfrom rdflib import URIRef, BNode, Literal\n\nimport numpy as np\n\nimport scipy as sp\n\nfrom scipy.optimize import linprog\n\nimport networkx as nx\n\nimport matplotlib.pyplot as plt\nimport json\n\nimport os\ndir = os.path.dirname(__file__)\nimport sys\n\nfor arg in sys.argv:\n print (arg)\n\nprint (sys.argv[1])\n\n# In[27]:\n\n#g = Graph()\n#g.parse(sys.argv[1], format=\"xml\")\n#C:/irp3-WebJPS-git/CO2WEB/testFiles/wasteheatnetwork.owl\nResultJSON = {}\ndef add2Results (name, value):\n ResultJSON[name] = value\n\n# In[28]:\n\n#print (\"Ontology parsing begins, we find there are %s total tuples in your ontology\" %(len(g)))\n#add2Results(\"tupleNumber\", len(g))\n\n# In[29]:\n\n#input(\"Press Enter to continue...\")\n\n\n# In[98]:\n\nA = np.zeros((5,2))\nB = np.zeros((5,2))\n\n\n#SourcePlantQualities\nA[0,0] = float(sys.argv[1])\nA[1,0] = float(sys.argv[2])\nA[2,0] = float(sys.argv[3])\nA[3,0] = float(sys.argv[4])\nA[4,0] = float(sys.argv[5])\n\n#SinkPlantQualities\nA[0,1] = float(sys.argv[6])\nA[1,1] = float(sys.argv[7])\nA[2,1] = float(sys.argv[8])\nA[3,1] = float(sys.argv[9])\nA[4,1] = float(sys.argv[10])\n\n\n\nB[0,0] = float(sys.argv[11])\nB[1,0] = float(sys.argv[12])\nB[2,0] = float(sys.argv[13])\nB[3,0] = float(sys.argv[14])\nB[4,0] = float(sys.argv[15])\n\nB[0,1] = float(sys.argv[16])\nB[1,1] = float(sys.argv[17])\nB[2,1] = float(sys.argv[18])\nB[3,1] = float(sys.argv[19])\nB[4,1] = float(sys.argv[20])\n\n\nSourcePlantQuantities = [B[0,0],B[1,0],B[2,0],B[3,0],B[4,0]]\n\nSinkPlantQuantities = [B[0,1],B[1,1],B[2,1],B[3,1],B[4,1]]\nSourcePlantQualities = [A[0,0],A[1,0],A[2,0],A[3,0],A[4,0]]\n\nSinkPlantQualities = [A[0,1],A[1,1],A[2,1],A[3,1],A[4,1]]\n\n\n#print (\"Ontology parsing has successfully finished.\")\n\n\n# In[102]:\n\n#input(\"Press Enter to continue...\")\n\n\n# In[ ]:\n\n\n\n\n# In[103]:\n\nst1 = 'sc'\nst2 = 'sk'\n\n\n# In[104]:\n\ndef optimization (x, y):\n SourceT = x[:,0]\n SinkT = x[:,1]\n SourceH = y[:,0]\n SinkH = y[:,1]\n count = 0\n for i in range(SourceT.shape[0]):\n for j in range(SourceT.shape[0]): \n if (j!=i): \n #print (\"source temperature is \" + str(SinkT[i,0]) + \" sink temperature is \" + str(SinkT[j,0]) )\n criteria = SourceT[i] - SinkT[j]\n if criteria >= 0:\n if count == 0: M = np.matrix([SourceH[i], SinkH[j], i+1, j+1])\n else: \n newrow = np.matrix([SourceH[i], SinkH[j], i+1, j+1])\n M = np.vstack([M, newrow])\n #print (\"waste heat recovery from \" + str(i+1) + \" to \" + str(j+1) + \" is possible\")\n #print (' ')\n count = count + 1\n #else:\n #print (\"waste heat recovery from \" + str(i+1) + \" to \" + str(j+1) + \" is impossible\")\n #print (' ')\n #print (M) \n #print (count)\n #print ('After transportation network modelling, we find that:\\n There are ' + str(count) + \n # ' total possible waste heat recovery energy flows in the network')\n #print ('')\n #print ('There possible energy flows are:\\n')\n\n add2Results(\"flowNumber\", str(count))\n flowList = []\n for i in range(count):\n #print (\"From plant %s to plant %s\" %(M[i,2],M[i,3]))\n flowList.append([M[i,2],M[i,3]])\n add2Results(\"flowList\", flowList)\n\n\n \n # optimization of energy network, formulation and solution of the otpimization problem (step 3 in the paper)\n c = [-1, -1, -1, -1, -1, -1, -1]\n AA = [[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,1,1,1,0],[0,0,0,0,0,0,1],[1,1,0,1,0,0,1],[0,0,1,0,0,0,0],[0,0,0,0,1,0,0],[0,0,0,0,0,1,0]]\n bb = [5766,2164,2150,1467,895,1512,1645,994]\n x0_bounds = (0, None)\n x1_bounds = (0, None)\n x2_bounds = (0, None)\n x3_bounds = (0, None)\n x4_bounds = (0, None)\n x5_bounds = (0, None)\n x6_bounds = (0, None)\n \n res = linprog(c, A_ub=AA, b_ub=bb, bounds=(x0_bounds, x1_bounds,x2_bounds, x3_bounds, x4_bounds,x5_bounds, x6_bounds),\n options={\"disp\": True})\n #print (res)\n \n #print ('')\n #print ('After network optimization, we find that the optimal waste heat recovery network is:\\n')\n modifyList = []\n for i in range(count):\n if i < len(res.x) and res.x[i] != 0:\n modifyList.append([M[i,2],M[i,3],res.x[i]])\n #print (\"%s kW waste heat from plant %s to plant %s\" %(res.x[i],M[i,2],M[i,3])) \\\n add2Results(\"modifyList\", modifyList)\n\n #print ('')\n #print ('Under such a design configuration, the whole eco-industrial park can save %s kW energy' %(-res.fun))\n add2Results(\"saveNumber\", -res.fun)\n print (\"JSON\"+str(json.dumps(ResultJSON)))\n\n\n\n\n\n\n\n# In[109]:\n\nA1=A\nB1=B\n\nran1 = np.random.rand(5,2)\n\nran2 = np.random.rand(5,2)\n\nA1 = A + A * ran1 * 0.5\n\nB1 = B + B * ran2 * 0.5\n\n\n# In[110]:\n\noptimization(A1, B1)\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n\n\n"
] |
[
[
"numpy.matrix",
"scipy.optimize.linprog",
"numpy.random.rand",
"numpy.zeros",
"numpy.vstack"
]
] |
kktsubota/CompressAI
|
[
"158fd64b74d50b5d7373ec6fe97fab7744b959aa"
] |
[
"compressai/models/priors.py"
] |
[
"# Copyright 2020 InterDigital Communications, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom compressai.ans import BufferedRansEncoder, RansDecoder\nfrom compressai.entropy_models import EntropyBottleneck, GaussianConditional\nfrom compressai.layers import GDN, MaskedConv2d\n\nfrom .utils import conv, deconv, update_registered_buffers\n\n__all__ = [\n \"CompressionModel\",\n \"FactorizedPrior\",\n \"ScaleHyperprior\",\n \"MeanScaleHyperprior\",\n \"JointAutoregressiveHierarchicalPriors\",\n]\n\n\nclass CompressionModel(nn.Module):\n \"\"\"Base class for constructing an auto-encoder with at least one entropy\n bottleneck module.\n\n Args:\n entropy_bottleneck_channels (int): Number of channels of the entropy\n bottleneck\n \"\"\"\n\n def __init__(self, entropy_bottleneck_channels, init_weights=True):\n super().__init__()\n self.entropy_bottleneck = EntropyBottleneck(entropy_bottleneck_channels)\n\n if init_weights:\n self._initialize_weights()\n\n def aux_loss(self):\n \"\"\"Return the aggregated loss over the auxiliary entropy bottleneck\n module(s).\n \"\"\"\n aux_loss = sum(\n m.loss() for m in self.modules() if isinstance(m, EntropyBottleneck)\n )\n return aux_loss\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):\n nn.init.kaiming_normal_(m.weight)\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n\n def forward(self, *args):\n raise NotImplementedError()\n\n def update(self, force=False):\n \"\"\"Updates the entropy bottleneck(s) CDF values.\n\n Needs to be called once after training to be able to later perform the\n evaluation with an actual entropy coder.\n\n Args:\n force (bool): overwrite previous values (default: False)\n\n Returns:\n updated (bool): True if one of the EntropyBottlenecks was updated.\n\n \"\"\"\n updated = False\n for m in self.children():\n if not isinstance(m, EntropyBottleneck):\n continue\n rv = m.update(force=force)\n updated |= rv\n return updated\n\n def load_state_dict(self, state_dict):\n # Dynamically update the entropy bottleneck buffers related to the CDFs\n update_registered_buffers(\n self.entropy_bottleneck,\n \"entropy_bottleneck\",\n [\"_quantized_cdf\", \"_offset\", \"_cdf_length\"],\n state_dict,\n )\n super().load_state_dict(state_dict)\n\n\nclass FactorizedPrior(CompressionModel):\n r\"\"\"Factorized Prior model from J. Balle, D. Minnen, S. Singh, S.J. Hwang,\n N. Johnston: `\"Variational Image Compression with a Scale Hyperprior\"\n <https://arxiv.org/abs/1802.01436>`_, Int Conf. on Learning Representations\n (ICLR), 2018.\n\n Args:\n N (int): Number of channels\n M (int): Number of channels in the expansion layers (last layer of the\n encoder and last layer of the hyperprior decoder)\n \"\"\"\n\n def __init__(self, N, M, **kwargs):\n super().__init__(entropy_bottleneck_channels=M, **kwargs)\n\n self.g_a = nn.Sequential(\n conv(3, N),\n GDN(N),\n conv(N, N),\n GDN(N),\n conv(N, N),\n GDN(N),\n conv(N, M),\n )\n\n self.g_s = nn.Sequential(\n deconv(M, N),\n GDN(N, inverse=True),\n deconv(N, N),\n GDN(N, inverse=True),\n deconv(N, N),\n GDN(N, inverse=True),\n deconv(N, 3),\n )\n\n self.N = N\n self.M = M\n\n @property\n def downsampling_factor(self) -> int:\n return 2 ** 4\n\n def forward(self, x):\n y = self.g_a(x)\n y_hat, y_likelihoods = self.entropy_bottleneck(y)\n x_hat = self.g_s(y_hat)\n\n return {\n \"x_hat\": x_hat,\n \"likelihoods\": {\n \"y\": y_likelihoods,\n },\n }\n\n @classmethod\n def from_state_dict(cls, state_dict):\n \"\"\"Return a new model instance from `state_dict`.\"\"\"\n N = state_dict[\"g_a.0.weight\"].size(0)\n M = state_dict[\"g_a.6.weight\"].size(0)\n net = cls(N, M)\n net.load_state_dict(state_dict)\n return net\n\n def compress(self, x):\n y = self.g_a(x)\n y_strings = self.entropy_bottleneck.compress(y)\n return {\"strings\": [y_strings], \"shape\": y.size()[-2:]}\n\n def decompress(self, strings, shape):\n assert isinstance(strings, list) and len(strings) == 1\n y_hat = self.entropy_bottleneck.decompress(strings[0], shape)\n x_hat = self.g_s(y_hat).clamp_(0, 1)\n return {\"x_hat\": x_hat}\n\n\n# From Balle's tensorflow compression examples\nSCALES_MIN = 0.11\nSCALES_MAX = 256\nSCALES_LEVELS = 64\n\n\ndef get_scale_table(min=SCALES_MIN, max=SCALES_MAX, levels=SCALES_LEVELS):\n return torch.exp(torch.linspace(math.log(min), math.log(max), levels))\n\n\nclass ScaleHyperprior(CompressionModel):\n r\"\"\"Scale Hyperprior model from J. Balle, D. Minnen, S. Singh, S.J. Hwang,\n N. Johnston: `\"Variational Image Compression with a Scale Hyperprior\"\n <https://arxiv.org/abs/1802.01436>`_ Int. Conf. on Learning Representations\n (ICLR), 2018.\n\n Args:\n N (int): Number of channels\n M (int): Number of channels in the expansion layers (last layer of the\n encoder and last layer of the hyperprior decoder)\n \"\"\"\n\n def __init__(self, N, M, **kwargs):\n super().__init__(entropy_bottleneck_channels=N, **kwargs)\n\n self.g_a = nn.Sequential(\n conv(3, N),\n GDN(N),\n conv(N, N),\n GDN(N),\n conv(N, N),\n GDN(N),\n conv(N, M),\n )\n\n self.g_s = nn.Sequential(\n deconv(M, N),\n GDN(N, inverse=True),\n deconv(N, N),\n GDN(N, inverse=True),\n deconv(N, N),\n GDN(N, inverse=True),\n deconv(N, 3),\n )\n\n self.h_a = nn.Sequential(\n conv(M, N, stride=1, kernel_size=3),\n nn.ReLU(inplace=True),\n conv(N, N),\n nn.ReLU(inplace=True),\n conv(N, N),\n )\n\n self.h_s = nn.Sequential(\n deconv(N, N),\n nn.ReLU(inplace=True),\n deconv(N, N),\n nn.ReLU(inplace=True),\n conv(N, M, stride=1, kernel_size=3),\n nn.ReLU(inplace=True),\n )\n\n self.gaussian_conditional = GaussianConditional(None)\n self.N = int(N)\n self.M = int(M)\n\n @property\n def downsampling_factor(self) -> int:\n return 2 ** (4 + 2)\n\n def forward(self, x):\n y = self.g_a(x)\n z = self.h_a(torch.abs(y))\n z_hat, z_likelihoods = self.entropy_bottleneck(z)\n scales_hat = self.h_s(z_hat)\n y_hat, y_likelihoods = self.gaussian_conditional(y, scales_hat)\n x_hat = self.g_s(y_hat)\n\n return {\n \"x_hat\": x_hat,\n \"likelihoods\": {\"y\": y_likelihoods, \"z\": z_likelihoods},\n }\n\n def load_state_dict(self, state_dict):\n update_registered_buffers(\n self.gaussian_conditional,\n \"gaussian_conditional\",\n [\"_quantized_cdf\", \"_offset\", \"_cdf_length\", \"scale_table\"],\n state_dict,\n )\n super().load_state_dict(state_dict)\n\n @classmethod\n def from_state_dict(cls, state_dict):\n \"\"\"Return a new model instance from `state_dict`.\"\"\"\n N = state_dict[\"g_a.0.weight\"].size(0)\n M = state_dict[\"g_a.6.weight\"].size(0)\n net = cls(N, M)\n net.load_state_dict(state_dict)\n return net\n\n def update(self, scale_table=None, force=False):\n if scale_table is None:\n scale_table = get_scale_table()\n updated = self.gaussian_conditional.update_scale_table(scale_table, force=force)\n updated |= super().update(force=force)\n return updated\n\n def compress(self, x):\n y = self.g_a(x)\n z = self.h_a(torch.abs(y))\n\n z_strings = self.entropy_bottleneck.compress(z)\n z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])\n\n scales_hat = self.h_s(z_hat)\n indexes = self.gaussian_conditional.build_indexes(scales_hat)\n y_strings = self.gaussian_conditional.compress(y, indexes)\n return {\"strings\": [y_strings, z_strings], \"shape\": z.size()[-2:]}\n\n def decompress(self, strings, shape):\n assert isinstance(strings, list) and len(strings) == 2\n z_hat = self.entropy_bottleneck.decompress(strings[1], shape)\n scales_hat = self.h_s(z_hat)\n indexes = self.gaussian_conditional.build_indexes(scales_hat)\n y_hat = self.gaussian_conditional.decompress(strings[0], indexes, z_hat.dtype)\n x_hat = self.g_s(y_hat).clamp_(0, 1)\n return {\"x_hat\": x_hat}\n\n\nclass MeanScaleHyperprior(ScaleHyperprior):\n r\"\"\"Scale Hyperprior with non zero-mean Gaussian conditionals from D.\n Minnen, J. Balle, G.D. Toderici: `\"Joint Autoregressive and Hierarchical\n Priors for Learned Image Compression\" <https://arxiv.org/abs/1809.02736>`_,\n Adv. in Neural Information Processing Systems 31 (NeurIPS 2018).\n\n Args:\n N (int): Number of channels\n M (int): Number of channels in the expansion layers (last layer of the\n encoder and last layer of the hyperprior decoder)\n \"\"\"\n\n def __init__(self, N, M, **kwargs):\n super().__init__(N, M, **kwargs)\n\n self.h_a = nn.Sequential(\n conv(M, N, stride=1, kernel_size=3),\n nn.LeakyReLU(inplace=True),\n conv(N, N),\n nn.LeakyReLU(inplace=True),\n conv(N, N),\n )\n\n self.h_s = nn.Sequential(\n deconv(N, M),\n nn.LeakyReLU(inplace=True),\n deconv(M, M * 3 // 2),\n nn.LeakyReLU(inplace=True),\n conv(M * 3 // 2, M * 2, stride=1, kernel_size=3),\n )\n\n def forward(self, x):\n y = self.g_a(x)\n z = self.h_a(y)\n z_hat, z_likelihoods = self.entropy_bottleneck(z)\n gaussian_params = self.h_s(z_hat)\n scales_hat, means_hat = gaussian_params.chunk(2, 1)\n y_hat, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat)\n x_hat = self.g_s(y_hat)\n\n return {\n \"x_hat\": x_hat,\n \"likelihoods\": {\"y\": y_likelihoods, \"z\": z_likelihoods},\n }\n\n def compress(self, x):\n y = self.g_a(x)\n z = self.h_a(y)\n\n z_strings = self.entropy_bottleneck.compress(z)\n z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])\n\n gaussian_params = self.h_s(z_hat)\n scales_hat, means_hat = gaussian_params.chunk(2, 1)\n indexes = self.gaussian_conditional.build_indexes(scales_hat)\n y_strings = self.gaussian_conditional.compress(y, indexes, means=means_hat)\n return {\"strings\": [y_strings, z_strings], \"shape\": z.size()[-2:]}\n\n def decompress(self, strings, shape):\n assert isinstance(strings, list) and len(strings) == 2\n z_hat = self.entropy_bottleneck.decompress(strings[1], shape)\n gaussian_params = self.h_s(z_hat)\n scales_hat, means_hat = gaussian_params.chunk(2, 1)\n indexes = self.gaussian_conditional.build_indexes(scales_hat)\n y_hat = self.gaussian_conditional.decompress(\n strings[0], indexes, means=means_hat\n )\n x_hat = self.g_s(y_hat).clamp_(0, 1)\n return {\"x_hat\": x_hat}\n\n\nclass JointAutoregressiveHierarchicalPriors(MeanScaleHyperprior):\n r\"\"\"Joint Autoregressive Hierarchical Priors model from D.\n Minnen, J. Balle, G.D. Toderici: `\"Joint Autoregressive and Hierarchical\n Priors for Learned Image Compression\" <https://arxiv.org/abs/1809.02736>`_,\n Adv. in Neural Information Processing Systems 31 (NeurIPS 2018).\n\n Args:\n N (int): Number of channels\n M (int): Number of channels in the expansion layers (last layer of the\n encoder and last layer of the hyperprior decoder)\n \"\"\"\n\n def __init__(self, N=192, M=192, **kwargs):\n super().__init__(N=N, M=M, **kwargs)\n\n self.g_a = nn.Sequential(\n conv(3, N, kernel_size=5, stride=2),\n GDN(N),\n conv(N, N, kernel_size=5, stride=2),\n GDN(N),\n conv(N, N, kernel_size=5, stride=2),\n GDN(N),\n conv(N, M, kernel_size=5, stride=2),\n )\n\n self.g_s = nn.Sequential(\n deconv(M, N, kernel_size=5, stride=2),\n GDN(N, inverse=True),\n deconv(N, N, kernel_size=5, stride=2),\n GDN(N, inverse=True),\n deconv(N, N, kernel_size=5, stride=2),\n GDN(N, inverse=True),\n deconv(N, 3, kernel_size=5, stride=2),\n )\n\n self.h_a = nn.Sequential(\n conv(M, N, stride=1, kernel_size=3),\n nn.LeakyReLU(inplace=True),\n conv(N, N, stride=2, kernel_size=5),\n nn.LeakyReLU(inplace=True),\n conv(N, N, stride=2, kernel_size=5),\n )\n\n self.h_s = nn.Sequential(\n deconv(N, M, stride=2, kernel_size=5),\n nn.LeakyReLU(inplace=True),\n deconv(M, M * 3 // 2, stride=2, kernel_size=5),\n nn.LeakyReLU(inplace=True),\n conv(M * 3 // 2, M * 2, stride=1, kernel_size=3),\n )\n\n self.entropy_parameters = nn.Sequential(\n nn.Conv2d(M * 12 // 3, M * 10 // 3, 1),\n nn.LeakyReLU(inplace=True),\n nn.Conv2d(M * 10 // 3, M * 8 // 3, 1),\n nn.LeakyReLU(inplace=True),\n nn.Conv2d(M * 8 // 3, M * 6 // 3, 1),\n )\n\n self.context_prediction = MaskedConv2d(\n M, 2 * M, kernel_size=5, padding=2, stride=1\n )\n\n self.gaussian_conditional = GaussianConditional(None)\n self.N = int(N)\n self.M = int(M)\n\n @property\n def downsampling_factor(self) -> int:\n return 2 ** (4 + 2)\n\n def forward(self, x):\n y = self.g_a(x)\n z = self.h_a(y)\n z_hat, z_likelihoods = self.entropy_bottleneck(z)\n params = self.h_s(z_hat)\n\n y_hat = self.gaussian_conditional.quantize(\n y, \"noise\" if self.training else \"dequantize\"\n )\n ctx_params = self.context_prediction(y_hat)\n gaussian_params = self.entropy_parameters(\n torch.cat((params, ctx_params), dim=1)\n )\n scales_hat, means_hat = gaussian_params.chunk(2, 1)\n _, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat)\n x_hat = self.g_s(y_hat)\n\n return {\n \"x_hat\": x_hat,\n \"likelihoods\": {\"y\": y_likelihoods, \"z\": z_likelihoods},\n }\n\n @classmethod\n def from_state_dict(cls, state_dict):\n \"\"\"Return a new model instance from `state_dict`.\"\"\"\n N = state_dict[\"g_a.0.weight\"].size(0)\n M = state_dict[\"g_a.6.weight\"].size(0)\n net = cls(N, M)\n net.load_state_dict(state_dict)\n return net\n\n def compress(self, x):\n if next(self.parameters()).device != torch.device(\"cpu\"):\n warnings.warn(\n \"Inference on GPU is not recommended for the autoregressive \"\n \"models (the entropy coder is run sequentially on CPU).\"\n )\n\n y = self.g_a(x)\n z = self.h_a(y)\n\n z_strings = self.entropy_bottleneck.compress(z)\n z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])\n\n params = self.h_s(z_hat)\n\n s = 4 # scaling factor between z and y\n kernel_size = 5 # context prediction kernel size\n padding = (kernel_size - 1) // 2\n\n y_height = z_hat.size(2) * s\n y_width = z_hat.size(3) * s\n\n y_hat = F.pad(y, (padding, padding, padding, padding))\n\n y_strings = []\n for i in range(y.size(0)):\n string = self._compress_ar(\n y_hat[i : i + 1],\n params[i : i + 1],\n y_height,\n y_width,\n kernel_size,\n padding,\n )\n y_strings.append(string)\n\n return {\"strings\": [y_strings, z_strings], \"shape\": z.size()[-2:]}\n\n def _compress_ar(self, y_hat, params, height, width, kernel_size, padding):\n cdf = self.gaussian_conditional.quantized_cdf.tolist()\n cdf_lengths = self.gaussian_conditional.cdf_length.tolist()\n offsets = self.gaussian_conditional.offset.tolist()\n\n encoder = BufferedRansEncoder()\n symbols_list = []\n indexes_list = []\n\n # Warning, this is slow...\n # TODO: profile the calls to the bindings...\n masked_weight = self.context_prediction.weight * self.context_prediction.mask\n for h in range(height):\n for w in range(width):\n y_crop = y_hat[:, :, h : h + kernel_size, w : w + kernel_size]\n ctx_p = F.conv2d(\n y_crop,\n masked_weight,\n bias=self.context_prediction.bias,\n )\n\n # 1x1 conv for the entropy parameters prediction network, so\n # we only keep the elements in the \"center\"\n p = params[:, :, h : h + 1, w : w + 1]\n gaussian_params = self.entropy_parameters(torch.cat((p, ctx_p), dim=1))\n gaussian_params = gaussian_params.squeeze(3).squeeze(2)\n scales_hat, means_hat = gaussian_params.chunk(2, 1)\n\n indexes = self.gaussian_conditional.build_indexes(scales_hat)\n\n y_crop = y_crop[:, :, padding, padding]\n y_q = self.gaussian_conditional.quantize(y_crop, \"symbols\", means_hat)\n y_hat[:, :, h + padding, w + padding] = y_q + means_hat\n\n symbols_list.extend(y_q.squeeze().tolist())\n indexes_list.extend(indexes.squeeze().tolist())\n\n encoder.encode_with_indexes(\n symbols_list, indexes_list, cdf, cdf_lengths, offsets\n )\n\n string = encoder.flush()\n return string\n\n def decompress(self, strings, shape):\n assert isinstance(strings, list) and len(strings) == 2\n\n if next(self.parameters()).device != torch.device(\"cpu\"):\n warnings.warn(\n \"Inference on GPU is not recommended for the autoregressive \"\n \"models (the entropy coder is run sequentially on CPU).\"\n )\n\n # FIXME: we don't respect the default entropy coder and directly call the\n # range ANS decoder\n\n z_hat = self.entropy_bottleneck.decompress(strings[1], shape)\n params = self.h_s(z_hat)\n\n s = 4 # scaling factor between z and y\n kernel_size = 5 # context prediction kernel size\n padding = (kernel_size - 1) // 2\n\n y_height = z_hat.size(2) * s\n y_width = z_hat.size(3) * s\n\n # initialize y_hat to zeros, and pad it so we can directly work with\n # sub-tensors of size (N, C, kernel size, kernel_size)\n y_hat = torch.zeros(\n (z_hat.size(0), self.M, y_height + 2 * padding, y_width + 2 * padding),\n device=z_hat.device,\n )\n\n for i, y_string in enumerate(strings[0]):\n self._decompress_ar(\n y_string,\n y_hat[i : i + 1],\n params[i : i + 1],\n y_height,\n y_width,\n kernel_size,\n padding,\n )\n\n y_hat = F.pad(y_hat, (-padding, -padding, -padding, -padding))\n x_hat = self.g_s(y_hat).clamp_(0, 1)\n return {\"x_hat\": x_hat}\n\n def _decompress_ar(\n self, y_string, y_hat, params, height, width, kernel_size, padding\n ):\n cdf = self.gaussian_conditional.quantized_cdf.tolist()\n cdf_lengths = self.gaussian_conditional.cdf_length.tolist()\n offsets = self.gaussian_conditional.offset.tolist()\n\n decoder = RansDecoder()\n decoder.set_stream(y_string)\n\n # Warning: this is slow due to the auto-regressive nature of the\n # decoding... See more recent publication where they use an\n # auto-regressive module on chunks of channels for faster decoding...\n for h in range(height):\n for w in range(width):\n # only perform the 5x5 convolution on a cropped tensor\n # centered in (h, w)\n y_crop = y_hat[:, :, h : h + kernel_size, w : w + kernel_size]\n ctx_p = F.conv2d(\n y_crop,\n self.context_prediction.weight,\n bias=self.context_prediction.bias,\n )\n # 1x1 conv for the entropy parameters prediction network, so\n # we only keep the elements in the \"center\"\n p = params[:, :, h : h + 1, w : w + 1]\n gaussian_params = self.entropy_parameters(torch.cat((p, ctx_p), dim=1))\n scales_hat, means_hat = gaussian_params.chunk(2, 1)\n\n indexes = self.gaussian_conditional.build_indexes(scales_hat)\n rv = decoder.decode_stream(\n indexes.squeeze().tolist(), cdf, cdf_lengths, offsets\n )\n rv = torch.Tensor(rv).reshape(1, -1, 1, 1)\n rv = self.gaussian_conditional.dequantize(rv, means_hat)\n\n hp = h + padding\n wp = w + padding\n y_hat[:, :, hp : hp + 1, wp : wp + 1] = rv\n"
] |
[
[
"torch.abs",
"torch.Tensor",
"torch.cat",
"torch.nn.functional.conv2d",
"torch.nn.Conv2d",
"torch.nn.LeakyReLU",
"torch.nn.init.zeros_",
"torch.device",
"torch.nn.ReLU",
"torch.nn.functional.pad",
"torch.nn.init.kaiming_normal_"
]
] |
kiconiaworks/igata
|
[
"1d8a4b82a65eb936d5d8f8ff70747ba82ddef31a"
] |
[
"igata/utils.py"
] |
[
"import csv\nimport datetime\nimport json\nimport logging\nimport os\nimport time\nimport urllib\nfrom collections.abc import Hashable\nfrom decimal import Decimal\nfrom gzip import GzipFile\nfrom hashlib import md5\nfrom io import BytesIO, StringIO\nfrom pathlib import Path\nfrom typing import Generator, List, Optional, Tuple, Union\nfrom urllib.error import HTTPError\nfrom urllib.parse import unquote, urlparse\nfrom uuid import NAMESPACE_URL, uuid5\n\nimport boto3\nimport imageio\nimport numpy as np\nimport pandas\nimport requests\nfrom botocore.errorfactory import ClientError\nfrom igata import settings\nfrom requests.adapters import HTTPAdapter\nfrom retry.api import retry_call\nfrom urllib3 import Retry\n\nlogger = logging.getLogger(\"cliexecutor\")\n\n\n# for generating UUID for request_id\nUUID_NAMESPACE_DNS_NAME = os.getenv(\"UUID_NAMESPACE_DNS_NAME\", \"my-api.com\")\n\nS3 = boto3.client(\"s3\", endpoint_url=settings.S3_ENDPOINT)\n\n\ndef default_json_encoder(obj):\n \"\"\"\n Serialize for objects that cannot be serialized by the default json encoder\n\n Usage:\n\n json_bytes = json.dumps(myobj, default=default_json_encoder)\n\n \"\"\"\n if isinstance(obj, datetime.datetime):\n return obj.isoformat()\n elif isinstance(obj, Decimal):\n return float(obj)\n raise TypeError(f\"Object cannot be serialized: {obj}\")\n\n\ndef flatten(nested_object, keystring=\"\", allow_null_strings=True, separator=\"__\") -> Generator[tuple, None, None]:\n \"\"\"\n Flatten a nested dictionary into a flat/single-level key, value tuple.\n\n Usage:\n nested_object = {\n 'key1': {'other': 'other1'},\n 'key2': 'value2'\n }\n for key_value in flatten(nested_object):\n print(key_value) # ('key1__other': 'other1') ...\n\n .. note::\n\n Results can be converted to dictionary using:\n\n flattened_dict = dict(flatten(nested_object))\n\n \"\"\"\n if isinstance(nested_object, dict):\n keystring = f\"{keystring}{separator}\" if keystring else keystring\n for key in nested_object:\n updated_keystring = f\"{keystring}{key}\"\n yield from flatten(nested_object[key], updated_keystring, allow_null_strings, separator)\n elif isinstance(nested_object, list):\n for list_element in nested_object:\n yield from flatten(list_element, keystring, allow_null_strings, separator)\n else:\n if not allow_null_strings:\n if nested_object != \"\":\n yield keystring, nested_object\n else:\n yield keystring, nested_object\n\n\ndef prepare_images(bucket, key) -> Tuple[Tuple[str, str], np.array, float, Optional[str]]:\n \"\"\"\n Read the given s3 key into a numpy array.from retry.api import retry_call\n \"\"\"\n error_message = None\n key = unquote(key)\n url = S3.generate_presigned_url(ClientMethod=\"get_object\", Params={\"Bucket\": bucket, \"Key\": key}, ExpiresIn=3600, HttpMethod=\"GET\")\n\n start = time.time()\n try:\n image = retry_call(imageio.imread, fargs=[url], tries=10)[:, :, :3]\n except HTTPError as e:\n logger.exception(e)\n error_message = f\"Exception while processing image(s3://{bucket}/{key}): ({e.code}) {e.reason}\"\n logger.error(error_message)\n image = np.array([])\n except ValueError as e:\n logger.exception(e)\n error_message = f\"Exception while processing image(s3://{bucket}/{key}): {e.args}\"\n logger.error(error_message)\n image = np.array([])\n end = time.time()\n download_time = end - start\n\n return (bucket, key), image, download_time, error_message\n\n\ndef _download_s3_file(bucket: str, key: str) -> dict:\n \"\"\"Download file from S3\"\"\"\n url = S3.generate_presigned_url(ClientMethod=\"get_object\", Params={\"Bucket\": bucket, \"Key\": key}, ExpiresIn=3600, HttpMethod=\"GET\")\n logger.info(f\"downloading ({url})...\")\n response = requests_retry_session().get(url)\n return response\n\n\ndef prepare_csv_reader(\n bucket: str,\n key: str,\n encoding: str = settings.INPUT_CSV_ENCODING,\n delimiter: str = settings.INPUT_CSV_DELIMITER,\n reader: Union[csv.reader, csv.DictReader] = csv.DictReader,\n dialect: str = settings.INPUT_CSV_READER_DIALECT,\n) -> Tuple[Tuple[str, str], Union[csv.reader, csv.DictReader, None], float, Optional[str]]:\n \"\"\"\n Read the given s3 key into a numpy array.from retry.api import retry_call\n reader = csv.DictReader(StringIO(text))\n \"\"\"\n error_message = None\n csvreader = None\n key = unquote(key)\n if key.lower().endswith((\".csv\", \".gz\")):\n start = time.time()\n try:\n response = _download_s3_file(bucket, key)\n except HTTPError as e:\n logger.exception(e)\n error_message = f\"Exception while processing csv(s3://{bucket}/{key}): ({e.code}) {e.reason}\"\n logger.error(error_message)\n\n except ValueError as e:\n logger.exception(e)\n error_message = f\"Exception while processing csv(s3://{bucket}/{key}): {e.args}\"\n logger.error(error_message)\n\n if 200 <= response.status_code <= 299:\n if key.lower().endswith(\".gz\"):\n data = GzipFile(fileobj=BytesIO(response.content)).read().decode(encoding)\n csvreader = reader(StringIO(data), dialect=dialect, delimiter=delimiter)\n elif key.lower().endswith(\".csv\"):\n data = response.text\n csvreader = reader(StringIO(data), dialect=dialect, delimiter=delimiter)\n\n else:\n error_message = f\"({response.status_code}) error downloading data\"\n else:\n error_message = f\"unsupported CSV file extension: s3://{bucket}/{key}\"\n\n end = time.time()\n download_time = end - start\n\n return (bucket, key), csvreader, download_time, error_message\n\n\ndef prepare_csv_dataframe(\n bucket: str, key: str, read_csv_kwargs: Optional[dict] = None\n) -> Tuple[Tuple[str, str], Optional[pandas.DataFrame], float, Optional[str]]:\n \"\"\"Read CSV from s3 and return a dataframe\"\"\"\n df = None\n error_message = None\n response = None\n start = time.time()\n try:\n response = _download_s3_file(bucket, key)\n except HTTPError as e:\n logger.exception(e)\n error_message = f\"Exception while processing csv(s3://{bucket}/{key}): ({e.code}) {e.reason}\"\n logger.error(error_message)\n\n if response:\n if 200 <= response.status_code <= 299:\n filename = Path(key.split(\"/\")[-1])\n data = BytesIO(response.content)\n data.name = filename.name\n\n if not read_csv_kwargs:\n # set defaults\n read_csv_kwargs = {\n \"sep\": settings.DEFAULT_INPUT_CSV_DELIMITER,\n \"encoding\": settings.DEFAULT_INPUT_CSV_ENCODING,\n \"header\": settings.DEFAULT_INPUT_CSV_HEADER_LINES,\n }\n\n # - determine compression\n ext = filename.suffix.lower()\n compression_ext_mapping = {\".zip\": \"zip\", \".gz\": \"gzip\", \".xz\": \"xz\", \".bz2\": \"bz2\"}\n compression = compression_ext_mapping.get(ext, None)\n if compression and \"compression\" not in read_csv_kwargs:\n read_csv_kwargs[\"compression\"] = compression\n\n logger.debug(f\"read_csv_kwargs={read_csv_kwargs}\")\n try:\n df = pandas.read_csv(data, **read_csv_kwargs)\n except Exception as e:\n logger.exception(e)\n error_message = f\"Exception Occurred while calling pandas.read_csv(): {e.args}\"\n else:\n error_message = f\"Invalid response.status_code while processing csv(s3://{bucket}/{key}): status_code={response.status_code}\"\n logger.error(error_message)\n else:\n error_message = f\"response not defined, download failed for: s3://{bucket}/{key}\"\n logger.error(\"response not defined!\")\n\n end = time.time()\n download_time = end - start\n\n return (bucket, key), df, download_time, error_message\n\n\ndef parse_s3_uri(uri: str) -> Tuple[str, str]:\n \"\"\"\n Parse s3 uri (s3://bucket/key) to (bucket, key)\n \"\"\"\n result = urlparse(uri)\n bucket = result.netloc\n key = result.path[1:] # removes leading slash\n return bucket, key\n\n\ndef generate_request_id(*values, uuid_namespace_dns_name=UUID_NAMESPACE_DNS_NAME) -> str:\n \"\"\"\n Generate the UUID string for given values\n\n .. note::\n\n values are sorted to ensure key reproducibility\n\n \"\"\"\n if not all(isinstance(v, Hashable) for v in values):\n raise ValueError(f\"Given value not hashable, values: {values}\")\n unique_key = md5(\".\".join(value for value in sorted(str(v) for v in values)).encode(\"utf8\")).hexdigest()\n hash_url = urllib.parse.quote_plus(f\"http://{uuid_namespace_dns_name}/{unique_key}\")\n value = str(uuid5(namespace=NAMESPACE_URL, name=hash_url))\n return value\n\n\ndef serialize_json_and_chunk_by_bytes(items: List[Union[dict, str]], max_bytes: int = 2048) -> Generator[str, None, None]:\n \"\"\"\n Serialize items into JSON and yield by the resulting\n \"\"\"\n is_initial = True\n last_json_str = None\n chunked_items = []\n logger.debug(f\"chunk_processing items incoming: {len(items)}\")\n for item in items:\n if chunked_items:\n json_str = json.dumps(chunked_items)\n json_bytes = json_str.encode(\"utf8\")\n\n if is_initial and len(json_bytes) > max_bytes:\n raise ValueError(f\"Single item > max_bytes({max_bytes}: {json_bytes}\")\n\n elif len(json_bytes) > max_bytes:\n yield last_json_str\n chunked_items = chunked_items[-1:] # remove items yielded in last_json_str\n\n last_json_str = json_str\n chunked_items.append(item)\n is_initial = False\n\n if chunked_items:\n json_str = json.dumps(chunked_items)\n encoded = json_str.encode(\"utf8\")\n if len(encoded) >= max_bytes:\n json_str = json.dumps(chunked_items[:-1])\n yield json_str # make sure to send last one!\n json_str = json.dumps(chunked_items[-1:])\n yield json_str # make sure to send last one!\n else:\n yield json_str # make sure to send last one!\n\n\ndef requests_retry_session(retries=3, backoff_factor=0.3, status_forcelist=(500, 502, 504), session=None):\n \"\"\"\n request retry sessions\n :param retries:\n :param backoff_factor:\n :param status_forcelist:\n :param session:\n :return:\n \"\"\"\n session = session or requests.Session()\n retry = Retry(total=retries, read=retries, connect=retries, backoff_factor=backoff_factor, status_forcelist=status_forcelist)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount(\"http://\", adapter)\n session.mount(\"https://\", adapter)\n return session\n\n\ndef s3_key_exists(bucket: str, key: str) -> bool:\n \"\"\"Check if given bucket, key exists\"\"\"\n exists = False\n try:\n S3.head_object(Bucket=bucket, Key=key)\n exists = True\n except ClientError as e:\n if e.response[\"ResponseMetadata\"][\"HTTPStatusCode\"] == 404:\n logger.error(f\"s3 key does not exist: s3://{bucket}/{key}\")\n else:\n logger.exception(e)\n logger.error(f\"Unknown ClientError: {e.args}\")\n\n return exists\n"
] |
[
[
"numpy.array",
"pandas.read_csv"
]
] |
jesford/clusterlensing
|
[
"2815c1bb07d904ca91a80dae3f52090016768072"
] |
[
"clusterlensing/tests/test_nfw.py"
] |
[
"import numpy as np\nfrom numpy.testing import assert_equal, assert_allclose, assert_raises\nfrom astropy import units\n\nfrom clusterlensing.nfw import SurfaceMassDensity\n\n\n# ----------------------------\n# test units of inputs\n\nncl = 3\nr_s = np.repeat(0.1, ncl) * units.Mpc\ndelta_c = np.repeat(10000., ncl) # dimensionless\nrho_c = np.repeat(0.2, ncl) * units.Msun / units.pc**2 / units.Mpc\nsig_off = np.repeat(0.2, ncl) * units.Mpc\nrbinarray = np.logspace(np.log10(0.1), np.log10(5.), num=10) * units.Mpc\n\n\ndef test_rs_units():\n rs_unitless = r_s.value\n rs_wrongunits = r_s.value * units.Msun\n\n # test that a dimensionless rs is coverted to Mpc\n smd = SurfaceMassDensity(rs_unitless, delta_c, rho_c,\n offsets=sig_off, rbins=rbinarray)\n assert_equal(smd._rs.unit, units.Mpc)\n\n # test that a non-Mpc unit on rs raises an error\n assert_raises(ValueError, SurfaceMassDensity, rs_wrongunits, delta_c,\n rho_c, offsets=sig_off, rbins=rbinarray)\n\n\ndef test_rhoc_units():\n rhoc_unitless = rho_c.value\n rhoc_wrongunits = rho_c.value * units.kg / units.m**3\n\n # test that a dimensionless rho_c is coverted to Msun/Mpc/pc**2\n smd = SurfaceMassDensity(r_s, delta_c, rhoc_unitless,\n offsets=sig_off, rbins=rbinarray)\n assert_equal(smd._rho_crit.unit, units.Msun / units.Mpc / units.pc**2)\n\n # test that an incorrect unit on rho_c raises an error\n assert_raises(ValueError, SurfaceMassDensity, r_s, delta_c,\n rhoc_wrongunits, offsets=sig_off, rbins=rbinarray)\n\n\ndef test_dc_units():\n dc_wrongunits = delta_c * units.Mpc\n assert_raises(ValueError, SurfaceMassDensity, r_s, dc_wrongunits, rho_c,\n offsets=sig_off, rbins=rbinarray)\n\n\n# ----------------------------\n# test lists as input\n\nrs_list = list(r_s.value)\ndc_list = list(delta_c)\nrc_list = list(rho_c.value)\nsoff_list = list(sig_off.value)\nrbin_list = list(rbinarray.value)\n\n\ndef test_list_rs():\n smd = SurfaceMassDensity(rs_list, delta_c, rho_c,\n offsets=sig_off, rbins=rbinarray)\n assert_equal(smd._rs, r_s)\n\n\ndef test_list_dc():\n smd = SurfaceMassDensity(r_s, dc_list, rho_c,\n offsets=sig_off, rbins=rbinarray)\n assert_equal(smd._delta_c, delta_c)\n\n\ndef test_list_rc():\n smd = SurfaceMassDensity(r_s, delta_c, rc_list,\n offsets=sig_off, rbins=rbinarray)\n assert_equal(smd._rho_crit, rho_c)\n\n\ndef test_list_sigoff():\n smd = SurfaceMassDensity(r_s, delta_c, rho_c,\n offsets=soff_list, rbins=rbinarray)\n assert_equal(smd._sigmaoffset, sig_off)\n\n\ndef test_list_rbins():\n smd = SurfaceMassDensity(r_s, delta_c, rho_c,\n offsets=sig_off, rbins=rbin_list)\n assert_equal(smd._rbins, rbinarray)\n\n\ndef test_input_single_values():\n assert_raises(TypeError, SurfaceMassDensity, r_s[0], delta_c, rho_c,\n offsets=sig_off, rbins=rbinarray)\n assert_raises(TypeError, SurfaceMassDensity, r_s, delta_c[0], rho_c,\n offsets=sig_off, rbins=rbinarray)\n assert_raises(TypeError, SurfaceMassDensity, r_s, delta_c, rho_c[0],\n offsets=sig_off, rbins=rbinarray)\n assert_raises(TypeError, SurfaceMassDensity, r_s, delta_c, rho_c,\n offsets=sig_off[0], rbins=rbinarray)\n assert_raises(TypeError, SurfaceMassDensity, r_s, delta_c, rho_c,\n offsets=sig_off, rbins=rbinarray[0])\n\n\ndef test_incompatible_lengths():\n\n assert_raises(ValueError, SurfaceMassDensity, r_s[0:2], delta_c, rho_c,\n offsets=sig_off, rbins=rbinarray)\n\n assert_raises(ValueError, SurfaceMassDensity, r_s, delta_c[0:2], rho_c,\n offsets=sig_off, rbins=rbinarray)\n\n assert_raises(ValueError, SurfaceMassDensity, r_s, delta_c, rho_c[0:2],\n offsets=sig_off[0:2], rbins=rbinarray)\n\n assert_raises(ValueError, SurfaceMassDensity, r_s, delta_c, rho_c,\n offsets=sig_off[0:2], rbins=rbinarray)\n\n\n# ----------------------------\n# test NFW centered profiles\n\ntoy_data_rbins = np.array([0.1, 0.26591479, 0.70710678,\n 1.88030155, 5.]) * units.Mpc\ntoy_data_z = np.array([0.05, 1.0])\ntoy_data_rs = np.array([[0.06920826, 0.06765531],\n [0.10535529, 0.100552],\n [0.4255034, 0.37502254],\n [0., 0.]]) * units.Mpc\ntoy_data_dc = np.array([[15993.18343503, 7231.03898592],\n [12760.73852901, 6142.71245062],\n [6138.9566454, 3615.01284489],\n [np.nan, np.nan]])\ntoy_data_rhoc = np.array([[0.13363, 0.4028],\n [0.13363, 0.4028],\n [0.13363, 0.4028],\n [0.13363, 0.4028]]) * (units.Msun / units.Mpc /\n (units.pc**2))\n\ntoy_data_sigma = np.array([[[61.676628, 13.91868, 2.4468037,\n 0.37863308, 0.055465539],\n [79.68557, 17.851227, 3.1250101,\n 0.48266501, 0.070650539]],\n [[127.43513, 33.316155, 6.4106052,\n 1.035399, 0.15438678],\n [166.96108, 42.902785, 8.1626417,\n 1.3110978, 0.19504083]],\n [[878.18174, 390.91924, 120.07696,\n 25.927776, 4.446575],\n [1260.0668, 536.75538, 156.88796,\n 32.628947, 5.4837611]],\n [np.empty(5) * np.nan, np.empty(5) * np.nan]])\n\ntoy_data_deltasigma = np.array([[[65.309133, 26.372068, 7.6175566,\n 1.7577702, 0.35356601],\n [85.578152, 34.252649, 9.8301535,\n 2.2592132, 0.45331823]],\n [[103.74144, 49.563266, 16.319755,\n 4.0942593, 0.8663956],\n [139.95018, 65.609361, 21.269033,\n 5.2818827, 1.1108229]],\n [[314.79661, 245.51579, 138.66486,\n 53.056979, 14.742129],\n [483.0392, 364.3582, 195.7899,\n 71.458238, 19.210324]],\n [np.empty(5) * np.nan, np.empty(5) * np.nan]])\n\n\ndef test_centered_profiles():\n\n def _check_sigma(i):\n assert_allclose(sigma_py.value, toy_data_sigma[i],\n rtol=1e-04)\n\n def _check_deltasigma(i):\n assert_allclose(deltasigma_py.value, toy_data_deltasigma[i],\n rtol=1e-04)\n # note: default tolerance is (rtol=1e-07, atol=0)\n\n zipped_inputs = zip(toy_data_rs, toy_data_dc, toy_data_rhoc)\n\n # check all 4 sets of toy_data:\n for i, (r_s, delta_c, rho_c) in enumerate(zipped_inputs):\n\n smd = SurfaceMassDensity(r_s, delta_c, rho_c, rbins=toy_data_rbins)\n sigma_py = smd.sigma_nfw()\n _check_sigma(i)\n\n deltasigma_py = smd.deltasigma_nfw()\n _check_deltasigma(i)\n\n\n# ----------------------------\n# test NFW offset profiles\n\ntoy_data_offset = np.array([0.1, 0.1]) * units.Mpc\n\ntoy_data_sigma_off = np.array([[[56.922836, 17.461102, 2.5331495,\n 0.3805162, 0.055490059],\n [73.797572, 22.464487, 3.2357614,\n 0.48506959, 0.070681805]],\n [[111.72633, 39.527318, 6.6159646,\n 1.0403518, 0.15445337],\n [147.04471, 51.205603, 8.427435,\n 1.3174017, 0.19512522]],\n [[748.5117, 407.12286, 121.99994,\n 26.017806, 4.4481085],\n [1070.3932, 563.12414, 159.64883,\n 32.747803, 5.4857215]],\n [np.empty(5) * np.nan, np.empty(5) * np.nan]])\n\ntoy_data_deltasigma_off = np.array([[[8.3569013, 16.021837, 7.3340382,\n 1.7247532, 0.34935389],\n [10.902221, 20.846143, 9.4631887,\n 2.2162585, 0.44782718]],\n [[14.476809, 29.198514, 15.730196,\n 4.0331203, 0.85895897],\n [19.335676, 38.775763, 20.500251,\n 5.2009726, 1.1009191]],\n [[57.800595, 136.55704, 131.88898,\n 52.590289, 14.699197],\n [86.975707, 203.06399, 186.61751,\n 70.815764, 19.149708]],\n [np.empty(5) * np.nan,\n np.empty(5) * np.nan]])\n\n\ndef test_miscentered_profiles():\n\n def _check_sigma(i):\n assert_allclose(sigma_py_off.value, toy_data_sigma_off[i],\n rtol=1e-04)\n\n def _check_deltasigma(i):\n assert_allclose(deltasigma_py_off.value, toy_data_deltasigma_off[i],\n rtol=1e-04)\n\n zipped_inputs = zip(toy_data_rs, toy_data_dc, toy_data_rhoc)\n\n # check all 4 sets of toy_data:\n for i, (r_s, delta_c, rho_c) in enumerate(zipped_inputs):\n\n smd = SurfaceMassDensity(r_s, delta_c, rho_c,\n offsets=toy_data_offset,\n rbins=toy_data_rbins)\n sigma_py_off = smd.sigma_nfw()\n _check_sigma(i)\n\n deltasigma_py_off = smd.deltasigma_nfw()\n _check_deltasigma(i)\n"
] |
[
[
"numpy.testing.assert_equal",
"numpy.log10",
"numpy.testing.assert_raises",
"numpy.testing.assert_allclose",
"numpy.repeat",
"numpy.array",
"numpy.empty"
]
] |
lost-person/AREL
|
[
"c39d5a0ba219b499e595812a31362a8f2535859e"
] |
[
"vist_eval/cider/cider_scorer.py"
] |
[
"#!/usr/bin/env python\n# Tsung-Yi Lin <[email protected]>\n# Ramakrishna Vedantam <[email protected]>\n\nimport copy\nfrom collections import defaultdict\nimport numpy as np\nimport pdb\nimport math\nimport pickle\nimport os\n\n\ndef precook(s, n=4, out=False):\n \"\"\"\n Takes a string as input and returns an object that can be given to\n either cook_refs or cook_test. This is optional: cook_refs and cook_test\n can take string arguments as well.\n :param s: string : sentence to be converted into ngrams\n :param n: int : number of ngrams for which representation is calculated\n :return: term frequency vector for occuring ngrams\n \"\"\"\n words = s.split()\n counts = defaultdict(int)\n for k in range(1, n + 1):\n for i in range(len(words) - k + 1):\n ngram = tuple(words[i:i + k])\n counts[ngram] += 1\n return counts\n\n\ndef cook_refs(refs, n=4): ## lhuang: oracle will call with \"average\"\n '''Takes a list of reference sentences for a single segment\n and returns an object that encapsulates everything that BLEU\n needs to know about them.\n :param refs: list of string : reference sentences for some image\n :param n: int : number of ngrams for which (ngram) representation is calculated\n :return: result (list of dict)\n '''\n return [precook(ref, n) for ref in refs]\n\n\ndef cook_test(test, n=4):\n '''Takes a test sentence and returns an object that\n encapsulates everything that BLEU needs to know about it.\n :param test: list of string : hypothesis sentence for some image\n :param n: int : number of ngrams for which (ngram) representation is calculated\n :return: result (dict)\n '''\n return precook(test, n, True)\n\n\nclass CiderScorer(object):\n \"\"\"CIDEr scorer.\n \"\"\"\n\n def copy(self):\n ''' copy the refs.'''\n new = CiderScorer(n=self.n)\n new.ctest = copy.copy(self.ctest)\n new.crefs = copy.copy(self.crefs)\n return new\n\n def __init__(self, df=None, test=None, refs=None, n=4, sigma=6.0):\n ''' singular instance '''\n self.n = n\n self.sigma = sigma\n self.crefs = []\n self.ctest = []\n if df is not None:\n pkl_file = pickle.load(open(df + '.p', 'r'))\n self.ref_len = pkl_file['ref_len']\n self.document_frequency = pkl_file['document_frequency']\n else:\n self.document_frequency = defaultdict(float)\n self.cook_append(test, refs)\n self.ref_len = None\n\n def cook_append(self, test, refs):\n '''called by constructor and __iadd__ to avoid creating new instances.'''\n\n if refs is not None:\n self.crefs.append(cook_refs(refs))\n if test is not None:\n self.ctest.append(cook_test(test)) ## N.B.: -1\n else:\n self.ctest.append(None) # lens of crefs and ctest have to match\n\n def size(self):\n assert len(self.crefs) == len(self.ctest), \"refs/test mismatch! %d<>%d\" % (len(self.crefs), len(self.ctest))\n return len(self.crefs)\n\n def __iadd__(self, other):\n '''add an instance (e.g., from another sentence).'''\n\n if type(other) is tuple:\n ## avoid creating new CiderScorer instances\n self.cook_append(other[0], other[1])\n else:\n self.ctest.extend(other.ctest)\n self.crefs.extend(other.crefs)\n\n return self\n\n def compute_doc_freq(self):\n '''\n Compute term frequency for reference data.\n This will be used to compute idf (inverse document frequency later)\n The term frequency is stored in the object\n :return: None\n '''\n for refs in self.crefs:\n # refs, k ref captions of one image\n for ngram in set([ngram for ref in refs for (ngram, count) in ref.items()]):\n self.document_frequency[ngram] += 1\n # maxcounts[ngram] = max(maxcounts.get(ngram,0), count)\n\n def compute_cider(self):\n def counts2vec(cnts):\n \"\"\"\n Function maps counts of ngram to vector of tfidf weights.\n The function returns vec, an array of dictionary that store mapping of n-gram and tf-idf weights.\n The n-th entry of array denotes length of n-grams.\n :param cnts:\n :return: vec (array of dict), norm (array of float), length (int)\n \"\"\"\n vec = [defaultdict(float) for _ in range(self.n)]\n length = 0\n norm = [0.0 for _ in range(self.n)]\n for (ngram, term_freq) in cnts.items():\n # give word count 1 if it doesn't appear in reference corpus\n df = np.log(max(1.0, self.document_frequency[ngram]))\n # ngram index\n n = len(ngram) - 1\n # tf (term_freq) * idf (precomputed idf) for n-grams\n vec[n][ngram] = float(term_freq) * (self.ref_len - df)\n # compute norm for the vector. the norm will be used for computing similarity\n norm[n] += pow(vec[n][ngram], 2)\n\n if n == 1:\n length += term_freq\n norm = [np.sqrt(n) for n in norm]\n return vec, norm, length\n\n def sim(vec_hyp, vec_ref, norm_hyp, norm_ref, length_hyp, length_ref):\n '''\n Compute the cosine similarity of two vectors.\n :param vec_hyp: array of dictionary for vector corresponding to hypothesis\n :param vec_ref: array of dictionary for vector corresponding to reference\n :param norm_hyp: array of float for vector corresponding to hypothesis\n :param norm_ref: array of float for vector corresponding to reference\n :param length_hyp: int containing length of hypothesis\n :param length_ref: int containing length of reference\n :return: array of score for each n-grams cosine similarity\n '''\n delta = float(length_hyp - length_ref)\n # measure consine similarity\n val = np.array([0.0 for _ in range(self.n)])\n for n in range(self.n):\n # ngram\n for (ngram, count) in vec_hyp[n].items():\n # vrama91 : added clipping\n val[n] += min(vec_hyp[n][ngram], vec_ref[n][ngram]) * vec_ref[n][ngram]\n\n if (norm_hyp[n] != 0) and (norm_ref[n] != 0):\n val[n] /= (norm_hyp[n] * norm_ref[n])\n\n assert (not math.isnan(val[n]))\n # vrama91: added a length based gaussian penalty\n val[n] *= np.e ** (-(delta ** 2) / (2 * self.sigma ** 2))\n return val\n\n # compute log reference length\n self.ref_len = np.log(float(len(self.crefs)))\n\n scores = []\n for test, refs in zip(self.ctest, self.crefs):\n # compute vector for test captions\n vec, norm, length = counts2vec(test)\n # compute vector for ref captions\n score = np.array([0.0 for _ in range(self.n)])\n for ref in refs:\n vec_ref, norm_ref, length_ref = counts2vec(ref)\n score += sim(vec, vec_ref, norm, norm_ref, length, length_ref)\n # change by vrama91 - mean of ngram scores, instead of sum\n score_avg = np.mean(score)\n # divide by number of references\n score_avg /= len(refs)\n # multiply score by 10\n score_avg *= 10.0\n # append score of an image to the score list\n scores.append(score_avg)\n return scores\n\n def compute_score(self, option=None, verbose=0):\n # compute idf\n self.compute_doc_freq()\n # assert to check document frequency\n assert (len(self.ctest) >= max(self.document_frequency.values()))\n # compute cider score\n score = self.compute_cider()\n # debug\n # print score\n return np.mean(np.array(score)), np.array(score)\n"
] |
[
[
"numpy.array",
"numpy.mean",
"numpy.sqrt"
]
] |
chrisying/nasbench
|
[
"c51c65b90cb5ef571a8d5b5af2aff893b92863d8"
] |
[
"nasbench/lib/evaluate.py"
] |
[
"# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Performs training and evaluation of the proposed model spec on TPU.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\n\nfrom nasbench.lib import cifar\nfrom nasbench.lib import model_builder\nfrom nasbench.lib import training_time\nimport numpy as np\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\nVALID_EXCEPTIONS = (\n tf.train.NanLossDuringTrainingError, # NaN loss\n tf.errors.ResourceExhaustedError, # OOM\n tf.errors.InvalidArgumentError, # NaN gradient\n tf.errors.DeadlineExceededError, # Timed out\n)\n\n\nclass AbortError(Exception):\n \"\"\"Signals that evaluation failed for a valid reason.\"\"\"\n pass\n\n\ndef train_and_evaluate(spec, config, model_dir):\n \"\"\"Train and evaluate the proposed model.\n\n This method trains and evaluates the model for the creation of the benchmark\n dataset. The default values from the config.py are exactly the values used.\n\n Args:\n spec: ModelSpec object.\n config: config dict generated from config.py.\n model_dir: directory to store the checkpoint files.\n\n Returns:\n dict containing the evaluation metadata.\n \"\"\"\n return _train_and_evaluate_impl(spec, config, model_dir)\n\n\ndef augment_and_evaluate(spec, config, model_dir, epochs_per_eval=5):\n \"\"\"Trains the model on the full training set and evaluates on test set.\n\n \"Augment\" specifically refers to training the same spec in a larger network on\n the full training set. Typically this involves increasing the epoch count,\n number of modules/stacks, and changing the LR schedule. These changes should\n be made to the config dict before calling this method.\n\n Note: this method was not used for generating the NAS Benchmark dataset. See\n train_and_evaluate instead.\n\n Args:\n spec: ModelSpec object.\n config: config dict generated from config.py.\n model_dir: directory to store the checkpoint files.\n epochs_per_eval: number of epochs per evaluation run. Evaluation is always\n run at the very start and end.\n\n Returns:\n dict containing the evaluation metadata.\n \"\"\"\n return _augment_and_evaluate_impl(spec, config, model_dir, epochs_per_eval)\n\n\ndef _train_and_evaluate_impl(spec, config, model_dir):\n \"\"\"Train and evaluate implementation, see train_and_evaluate docstring.\"\"\"\n evaluator = _TrainAndEvaluator(spec, config, model_dir)\n return evaluator.run()\n\n\nclass _TrainAndEvaluator(object):\n \"\"\"Runs the training and evaluation.\"\"\"\n\n def __init__(self, spec, config, model_dir):\n \"\"\"Initialize evaluator. See train_and_evaluate docstring.\"\"\"\n self.input_train = cifar.CIFARInput('train', config)\n self.input_train_eval = cifar.CIFARInput('train_eval', config)\n self.input_valid = cifar.CIFARInput('valid', config)\n self.input_test = cifar.CIFARInput('test', config)\n self.input_sample = cifar.CIFARInput('sample', config)\n self.estimator = _create_estimator(spec, config, model_dir,\n self.input_train.num_images,\n self.input_sample.num_images)\n\n self.spec = spec\n self.config = config\n self.model_dir = model_dir\n\n def run(self):\n \"\"\"Runs training and evaluation.\"\"\"\n attempts = 0\n while True:\n # Delete everything in the model dir at the start of each attempt\n try:\n tf.gfile.DeleteRecursively(self.model_dir)\n except tf.errors.NotFoundError:\n pass\n tf.gfile.MakeDirs(self.model_dir)\n\n try:\n # Train\n if self.config['train_seconds'] > 0.0:\n timing = training_time.limit(self.config['train_seconds'])\n else:\n timing = training_time.limit(None)\n\n evaluations = list(map(float, self.config['intermediate_evaluations']))\n if not evaluations or evaluations[-1] != 1.0:\n evaluations.append(1.0)\n assert evaluations == sorted(evaluations)\n\n evaluation_results = []\n start_time = time.time()\n\n # Train for 1 step with 0 LR to initialize the weights, then evaluate\n # once at the start for completeness, accuracies expected to be around\n # random selection. Note that batch norm moving averages change during\n # the step but the trainable weights do not.\n self.estimator.train(\n input_fn=self.input_train.input_fn,\n max_steps=1,\n hooks=[timing.train_hook],\n saving_listeners=[timing.saving_listener])\n evaluation_results.append(self._evaluate_all(0.0, 0))\n\n for next_evaluation in evaluations:\n epoch = next_evaluation * self.config['train_epochs']\n train_steps = int(epoch * self.input_train.num_images /\n self.config['batch_size'])\n self.estimator.train(\n input_fn=self.input_train.input_fn,\n max_steps=train_steps,\n hooks=[timing.train_hook],\n saving_listeners=[timing.saving_listener])\n\n evaluation_results.append(self._evaluate_all(epoch, train_steps))\n\n all_time = time.time() - start_time\n break # Break from retry loop on success\n except VALID_EXCEPTIONS as e: # pylint: disable=catching-non-exception\n attempts += 1\n tf.logging.warning(str(e))\n if attempts >= self.config['max_attempts']:\n raise AbortError(str(e))\n\n metadata = {\n 'trainable_params': _get_param_count(self.model_dir),\n 'total_time': all_time, # includes eval and other metric time\n 'evaluation_results': evaluation_results,\n }\n\n return metadata\n\n def _evaluate_all(self, epochs, steps):\n \"\"\"Runs all the evaluations.\"\"\"\n train_accuracy = _evaluate(self.estimator, self.input_train_eval,\n self.config, name='train')\n valid_accuracy = _evaluate(self.estimator, self.input_valid,\n self.config, name='valid')\n test_accuracy = _evaluate(self.estimator, self.input_test,\n self.config, name='test')\n train_time = self.estimator.get_variable_value(\n training_time.TOTAL_TIME_NAME)\n\n now = time.time()\n sample_metrics = self._compute_sample_metrics()\n predict_time = time.time() - now\n\n return {\n 'epochs': epochs,\n 'training_time': train_time,\n 'training_steps': steps,\n 'train_accuracy': train_accuracy,\n 'validation_accuracy': valid_accuracy,\n 'test_accuracy': test_accuracy,\n 'sample_metrics': sample_metrics,\n 'predict_time': predict_time,\n }\n\n def _compute_sample_metrics(self):\n \"\"\"Computes the metrics on a fixed batch.\"\"\"\n sample_metrics = next(self.estimator.predict(\n input_fn=self.input_sample.input_fn, yield_single_examples=False))\n\n # Fix the extra batch dimension added by PREDICT\n for metric in sample_metrics:\n if metric in ['logits', 'input_grad_norm']:\n # Batch-shaped tensors take first batch\n sample_metrics[metric] = (\n sample_metrics[metric][:self.input_sample.num_images, Ellipsis])\n else:\n # Other tensors remove batch dimension\n sample_metrics[metric] = sample_metrics[metric][0, Ellipsis]\n\n return sample_metrics\n\n\ndef _augment_and_evaluate_impl(spec, config, model_dir, epochs_per_eval=5):\n \"\"\"Augment and evaluate implementation, see augment_and_evaluate docstring.\"\"\"\n input_augment, input_test = [\n cifar.CIFARInput(m, config)\n for m in ['augment', 'test']]\n estimator = _create_estimator(spec, config, model_dir,\n input_augment.num_images)\n\n if config['train_seconds'] > 0.0:\n timing = training_time.limit(config['train_seconds'])\n else:\n timing = training_time.limit(None)\n\n steps_per_epoch = input_augment.num_images / config['batch_size'] # float\n ckpt = tf.train.latest_checkpoint(model_dir)\n if not ckpt:\n current_step = 0\n else:\n current_step = int(ckpt.split('-')[-1])\n max_steps = int(config['train_epochs'] * steps_per_epoch)\n\n while current_step < max_steps:\n next_step = current_step + int(epochs_per_eval * steps_per_epoch)\n next_step = min(next_step, max_steps)\n estimator.train(\n input_fn=input_augment.input_fn,\n max_steps=next_step,\n hooks=[timing.train_hook],\n saving_listeners=[timing.saving_listener])\n current_step = next_step\n\n test_accuracy = _evaluate(estimator, input_test, config)\n\n metadata = {\n 'trainable_params': _get_param_count(model_dir),\n 'test_accuracy': test_accuracy,\n }\n\n return metadata\n\n\ndef _create_estimator(spec, config, model_dir,\n num_train_images, num_sample_images=None):\n \"\"\"Creates the TPUEstimator object.\"\"\"\n # Estimator will save a checkpoint at the end of every train() call. Disable\n # automatic checkpoints by setting the time interval between checkpoints to\n # a very large value.\n run_config = tf.contrib.tpu.RunConfig(\n model_dir=model_dir,\n keep_checkpoint_max=3, # Keeps ckpt at start, halfway, and end\n save_checkpoints_secs=2**30,\n tpu_config=tf.contrib.tpu.TPUConfig(\n iterations_per_loop=config['tpu_iterations_per_loop'],\n num_shards=config['tpu_num_shards']))\n\n # This is a hack to allow PREDICT on a fixed batch on TPU. By replicating the\n # batch by the number of shards, this ensures each TPU core operates on the\n # entire fixed batch.\n if num_sample_images and config['use_tpu']:\n num_sample_images *= config['tpu_num_shards']\n\n estimator = tf.contrib.tpu.TPUEstimator(\n use_tpu=config['use_tpu'],\n model_fn=model_builder.build_model_fn(\n spec, config, num_train_images),\n config=run_config,\n train_batch_size=config['batch_size'],\n eval_batch_size=config['batch_size'],\n predict_batch_size=num_sample_images)\n\n return estimator\n\n\ndef _evaluate(estimator, input_data, config, name=None):\n \"\"\"Evaluate the estimator on the input data.\"\"\"\n steps = input_data.num_images // config['batch_size']\n results = estimator.evaluate(\n input_fn=input_data.input_fn,\n steps=steps,\n name=name)\n return results['accuracy']\n\n\ndef _get_param_count(model_dir):\n \"\"\"Get trainable param count from the model directory.\"\"\"\n tf.reset_default_graph()\n checkpoint = tf.train.get_checkpoint_state(model_dir)\n with tf.Session() as sess:\n saver = tf.train.import_meta_graph(\n checkpoint.model_checkpoint_path + '.meta')\n saver.restore(sess, checkpoint.model_checkpoint_path)\n params = np.sum([np.prod(v.get_shape().as_list())\n for v in tf.trainable_variables()])\n\n return params\n\n"
] |
[
[
"tensorflow.compat.v1.train.import_meta_graph",
"tensorflow.compat.v1.train.get_checkpoint_state",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.contrib.tpu.TPUConfig",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.reset_default_graph",
"tensorflow.compat.v1.gfile.MakeDirs",
"tensorflow.compat.v1.train.latest_checkpoint",
"tensorflow.compat.v1.gfile.DeleteRecursively"
]
] |
arshee2403/ga-learner-dsmp-repo
|
[
"f79d26ceaf38952e975404984f7d14d585727c15"
] |
[
"Regularization./code.py"
] |
[
"# --------------\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# path- variable storing file path\r\n\r\n#Code starts here\r\ndf = pd.read_csv(path)\r\nprint(df.head(5))\r\nX = df.drop('Price',axis=1)\r\ny = df['Price']\r\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3,random_state = 6)\r\ncorr = X_train.corr()\r\nprint(corr)\n\n\n# --------------\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.metrics import r2_score\r\n\r\n# Code starts here\r\nregressor = LinearRegression()\r\nregressor.fit(X_train,y_train)\r\ny_pred = regressor.predict(X_test)\r\nr2 = r2_score(y_test,y_pred)\r\nprint(r2)\n\n\n# --------------\nfrom sklearn.linear_model import Lasso\r\n\r\n# Code starts here\r\nlasso = Lasso()\r\nlasso.fit(X_train,y_train)\r\nlasso_pred = lasso.predict(X_test)\r\nr2_lasso = r2_score(y_test,lasso_pred)\r\nprint(r2_lasso)\n\n\n# --------------\nfrom sklearn.linear_model import Ridge\r\n\r\n# Code starts here\r\nridge = Ridge()\r\nridge.fit(X_train,y_train)\r\nridge_pred = ridge.predict(X_test)\r\nr2_ridge = r2_score(y_test,ridge_pred)\r\nprint(r2_ridge)\r\n\r\n\r\n\r\n# Code ends here\n\n\n# --------------\nfrom sklearn.model_selection import cross_val_score\r\n\r\n#Code starts here\r\nregressor = LinearRegression()\r\nscore = cross_val_score(regressor,X_train,y_train,cv=10)\r\nmean_score = np.mean(score)\r\nprint(mean_score)\n\n\n# --------------\nfrom sklearn.preprocessing import PolynomialFeatures\r\nfrom sklearn.pipeline import make_pipeline\r\n\r\n#Code starts \r\nmodel = make_pipeline(PolynomialFeatures(2), LinearRegression())\r\nmodel.fit(X_train,y_train)\r\ny_pred =model.predict(X_test)\r\nr2_poly= r2_score(y_test,y_pred)\r\nprint(r2_poly)\n\n\n"
] |
[
[
"pandas.read_csv",
"sklearn.metrics.r2_score",
"sklearn.model_selection.cross_val_score",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.linear_model.Lasso",
"sklearn.linear_model.Ridge",
"numpy.mean",
"sklearn.linear_model.LinearRegression"
]
] |
tuanho27/OpenPCDet
|
[
"facde3cdb9590f238cfbfac88633e2746dcad2a4"
] |
[
"tools/train.py"
] |
[
"import os\nimport torch\nimport torch.nn as nn\nfrom tensorboardX import SummaryWriter\nfrom pcdet.config import cfg, log_config_to_file, cfg_from_list, cfg_from_yaml_file\nfrom pcdet.utils import common_utils\nfrom pcdet.datasets import build_dataloader\nfrom pcdet.models import build_network, model_fn_decorator\nfrom train_utils.optimization import build_optimizer, build_scheduler\nfrom train_utils.train_utils import train_model\nimport torch.distributed as dist\nfrom test import repeat_eval_ckpt\nfrom pathlib import Path\nimport argparse\nimport datetime\nimport glob\n\n\ndef parse_config():\n parser = argparse.ArgumentParser(description='arg parser')\n parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')\n\n parser.add_argument('--batch_size', type=int, default=16, required=False, help='batch size for training')\n parser.add_argument('--epochs', type=int, default=100, required=False, help='number of epochs to train for')\n parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')\n parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')\n parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')\n parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')\n parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')\n parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')\n parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')\n parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')\n parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')\n parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')\n parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')\n parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')\n parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,\n help='set extra config keys if needed')\n\n parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')\n parser.add_argument('--start_epoch', type=int, default=0, help='')\n parser.add_argument('--save_to_file', action='store_true', default=False, help='')\n\n args = parser.parse_args()\n\n cfg_from_yaml_file(args.cfg_file, cfg)\n cfg.TAG = Path(args.cfg_file).stem\n cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'\n\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs, cfg)\n\n return args, cfg\n\n\ndef main():\n args, cfg = parse_config()\n if args.launcher == 'none':\n dist_train = False\n else:\n args.batch_size, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(\n args.batch_size, args.tcp_port, args.local_rank, backend='nccl'\n )\n dist_train = True\n if args.fix_random_seed:\n common_utils.set_random_seed(666)\n\n output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag\n ckpt_dir = output_dir / 'ckpt'\n output_dir.mkdir(parents=True, exist_ok=True)\n ckpt_dir.mkdir(parents=True, exist_ok=True)\n\n log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))\n logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)\n\n # log to file\n logger.info('**********************Start logging**********************')\n gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'\n logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)\n\n if dist_train:\n total_gpus = dist.get_world_size()\n logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))\n for key, val in vars(args).items():\n logger.info('{:16} {}'.format(key, val))\n log_config_to_file(cfg, logger=logger)\n if cfg.LOCAL_RANK == 0:\n os.system('cp %s %s' % (args.cfg_file, output_dir))\n\n tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None\n\n # -----------------------create dataloader & network & optimizer---------------------------\n train_set, train_loader, train_sampler = build_dataloader(\n dataset_cfg=cfg.DATA_CONFIG,\n class_names=cfg.CLASS_NAMES,\n batch_size=args.batch_size,\n dist=dist_train, workers=args.workers,\n logger=logger,\n training=True,\n merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,\n total_epochs=args.epochs\n )\n\n model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=train_set)\n if args.sync_bn:\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n model.cuda()\n\n optimizer = build_optimizer(model, cfg.OPTIMIZATION)\n\n # load checkpoint if it is possible\n start_epoch = it = 0\n last_epoch = -1\n if args.pretrained_model is not None:\n model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist, logger=logger)\n\n if args.ckpt is not None:\n it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist, optimizer=optimizer, logger=logger)\n last_epoch = start_epoch + 1\n else:\n ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))\n if len(ckpt_list) > 0:\n ckpt_list.sort(key=os.path.getmtime)\n it, start_epoch = model.load_params_with_optimizer(\n ckpt_list[-1], to_cpu=dist, optimizer=optimizer, logger=logger\n )\n last_epoch = start_epoch + 1\n\n model.train() # before wrap to DistributedDataParallel to support fixed some parameters\n if dist_train:\n model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])\n logger.info(model)\n\n lr_scheduler, lr_warmup_scheduler = build_scheduler(\n optimizer, total_iters_each_epoch=len(train_loader), total_epochs=args.epochs,\n last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION\n )\n\n # -----------------------start training---------------------------\n logger.info('**********************Start training %s/%s(%s)**********************'\n % (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))\n train_model(\n model,\n optimizer,\n train_loader,\n model_func=model_fn_decorator(),\n lr_scheduler=lr_scheduler,\n optim_cfg=cfg.OPTIMIZATION,\n start_epoch=start_epoch,\n total_epochs=args.epochs,\n start_iter=it,\n rank=cfg.LOCAL_RANK,\n tb_log=tb_log,\n ckpt_save_dir=ckpt_dir,\n train_sampler=train_sampler,\n lr_warmup_scheduler=lr_warmup_scheduler,\n ckpt_save_interval=args.ckpt_save_interval,\n max_ckpt_save_num=args.max_ckpt_save_num,\n merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch\n )\n\n logger.info('**********************End training %s/%s(%s)**********************\\n\\n\\n'\n % (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))\n\n logger.info('**********************Start evaluation %s/%s(%s)**********************' %\n (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))\n test_set, test_loader, sampler = build_dataloader(\n dataset_cfg=cfg.DATA_CONFIG,\n class_names=cfg.CLASS_NAMES,\n batch_size=args.batch_size,\n dist=dist_train, workers=args.workers, logger=logger, training=False\n )\n eval_output_dir = output_dir / 'eval' / 'eval_with_train'\n eval_output_dir.mkdir(parents=True, exist_ok=True)\n args.start_epoch = max(args.epochs - 10, 0) # Only evaluate the last 10 epochs\n\n repeat_eval_ckpt(\n model.module if dist_train else model,\n test_loader, args, eval_output_dir, logger, ckpt_dir,\n dist_test=dist_train\n )\n logger.info('**********************End evaluation %s/%s(%s)**********************' %\n (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag))\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.cuda.device_count",
"torch.distributed.get_world_size",
"torch.nn.SyncBatchNorm.convert_sync_batchnorm"
]
] |
mingyingyu/transformers
|
[
"1045f7b45d31c62af595e4d53b4a37cbaf5687b9"
] |
[
"examples/seq2seq/utils.py"
] |
[
"import itertools\nimport json\nimport linecache\nimport os\nimport pickle\nimport warnings\nfrom logging import getLogger\nfrom pathlib import Path\nfrom typing import Callable, Dict, Iterable, List\n\nimport git\nimport numpy as np\nimport torch\nfrom rouge_score import rouge_scorer, scoring\nfrom sacrebleu import corpus_bleu\nfrom torch import nn\nfrom torch.utils.data import Dataset, Sampler\n\nfrom transformers import BartTokenizer\n\n\ndef label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=-100):\n \"\"\"From fairseq\"\"\"\n if target.dim() == lprobs.dim() - 1:\n target = target.unsqueeze(-1)\n nll_loss = -lprobs.gather(dim=-1, index=target)\n smooth_loss = -lprobs.sum(dim=-1, keepdim=True)\n if ignore_index is not None:\n pad_mask = target.eq(ignore_index)\n nll_loss.masked_fill_(pad_mask, 0.0)\n smooth_loss.masked_fill_(pad_mask, 0.0)\n bs = pad_mask.long().sum()\n else:\n nll_loss = nll_loss.squeeze(-1)\n smooth_loss = smooth_loss.squeeze(-1)\n bs = lprobs.shape[0]\n\n nll_loss = nll_loss.sum() # mean()? Scared to break other math.\n smooth_loss = smooth_loss.sum()\n eps_i = epsilon / lprobs.size(-1)\n loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss\n return loss / bs, nll_loss / bs\n\n\ndef encode_line(tokenizer, line, max_length, pad_to_max_length=True, return_tensors=\"pt\"):\n extra_kw = {\"add_prefix_space\": True} if isinstance(tokenizer, BartTokenizer) else {}\n return tokenizer(\n [line],\n max_length=max_length,\n padding=\"max_length\" if pad_to_max_length else None,\n truncation=True,\n return_tensors=return_tensors,\n **extra_kw,\n )\n\n\ndef lmap(f: Callable, x: Iterable) -> List:\n \"\"\"list(map(f, x))\"\"\"\n return list(map(f, x))\n\n\ndef calculate_bleu_score(output_lns, refs_lns, **kwargs) -> dict:\n \"\"\"Uses sacrebleu's corpus_bleu implementation.\"\"\"\n return {\"bleu\": corpus_bleu(output_lns, [refs_lns], **kwargs).score}\n\n\ndef trim_batch(\n input_ids, pad_token_id, attention_mask=None,\n):\n \"\"\"Remove columns that are populated exclusively by pad_token_id\"\"\"\n keep_column_mask = input_ids.ne(pad_token_id).any(dim=0)\n if attention_mask is None:\n return input_ids[:, keep_column_mask]\n else:\n return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])\n\n\nclass Seq2SeqDataset(Dataset):\n def __init__(\n self,\n tokenizer,\n data_dir,\n max_source_length,\n max_target_length,\n type_path=\"train\",\n n_obs=None,\n src_lang=None,\n tgt_lang=None,\n prefix=\"\",\n ):\n super().__init__()\n self.src_file = Path(data_dir).joinpath(type_path + \".source\")\n self.tgt_file = Path(data_dir).joinpath(type_path + \".target\")\n self.src_lens = self.get_char_lens(self.src_file)\n self.max_source_length = max_source_length\n self.max_target_length = max_target_length\n assert min(self.src_lens) > 0, f\"found empty line in {self.src_file}\"\n self.tokenizer = tokenizer\n self.prefix = prefix\n if n_obs is not None:\n self.src_lens = self.src_lens[:n_obs]\n self.pad_token_id = self.tokenizer.pad_token_id\n self.src_lang = src_lang\n self.tgt_lang = tgt_lang\n\n def __len__(self):\n return len(self.src_lens)\n\n def __getitem__(self, index) -> Dict[str, torch.Tensor]:\n index = index + 1 # linecache starts at 1\n source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip(\"\\n\")\n tgt_line = linecache.getline(str(self.tgt_file), index).rstrip(\"\\n\")\n assert source_line, f\"empty source line for index {index}\"\n assert tgt_line, f\"empty tgt line for index {index}\"\n source_inputs = encode_line(self.tokenizer, source_line, self.max_source_length)\n target_inputs = encode_line(self.tokenizer, tgt_line, self.max_target_length)\n\n source_ids = source_inputs[\"input_ids\"].squeeze()\n target_ids = target_inputs[\"input_ids\"].squeeze()\n src_mask = source_inputs[\"attention_mask\"].squeeze()\n return {\n \"input_ids\": source_ids,\n \"attention_mask\": src_mask,\n \"decoder_input_ids\": target_ids,\n }\n\n @staticmethod\n def get_char_lens(data_file):\n return [len(x) for x in Path(data_file).open().readlines()]\n\n def collate_fn(self, batch) -> Dict[str, torch.Tensor]:\n input_ids = torch.stack([x[\"input_ids\"] for x in batch])\n masks = torch.stack([x[\"attention_mask\"] for x in batch])\n target_ids = torch.stack([x[\"decoder_input_ids\"] for x in batch])\n pad_token_id = self.pad_token_id\n y = trim_batch(target_ids, pad_token_id)\n source_ids, source_mask = trim_batch(input_ids, pad_token_id, attention_mask=masks)\n batch = {\n \"input_ids\": source_ids,\n \"attention_mask\": source_mask,\n \"decoder_input_ids\": y,\n }\n return batch\n\n def make_sortish_sampler(self, batch_size):\n return SortishSampler(self.src_lens, batch_size)\n\n\nclass TranslationDataset(Seq2SeqDataset):\n \"\"\"A dataset that calls prepare_translation_batch.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.max_source_length != self.max_target_length:\n warnings.warn(\n f\"Mbart is using sequence lengths {self.max_source_length}, {self.max_target_length}. \"\n f\"Imbalanced sequence lengths may be undesired for translation tasks\"\n )\n\n def __getitem__(self, index) -> Dict[str, str]:\n index = index + 1 # linecache starts at 1\n source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip(\"\\n\")\n tgt_line = linecache.getline(str(self.tgt_file), index).rstrip(\"\\n\")\n assert source_line, f\"empty source line for index {index}\"\n assert tgt_line, f\"empty tgt line for index {index}\"\n return {\n \"tgt_texts\": tgt_line,\n \"src_texts\": source_line,\n }\n\n def collate_fn(self, batch) -> Dict[str, torch.Tensor]:\n batch_encoding = self.tokenizer.prepare_translation_batch(\n [x[\"src_texts\"] for x in batch],\n src_lang=self.src_lang,\n tgt_texts=[x[\"tgt_texts\"] for x in batch],\n tgt_lang=self.tgt_lang,\n max_length=self.max_source_length,\n max_target_length=self.max_target_length,\n )\n return batch_encoding.data\n\n\nclass SortishSampler(Sampler):\n \"Go through the text data by order of src length with a bit of randomness. From fastai repo.\"\n\n def __init__(self, data, batch_size):\n self.data, self.bs = data, batch_size\n\n def key(self, i):\n return self.data[i]\n\n def __len__(self) -> int:\n return len(self.data)\n\n def __iter__(self):\n idxs = np.random.permutation(len(self.data))\n sz = self.bs * 50\n ck_idx = [idxs[i : i + sz] for i in range(0, len(idxs), sz)]\n sort_idx = np.concatenate([sorted(s, key=self.key, reverse=True) for s in ck_idx])\n sz = self.bs\n ck_idx = [sort_idx[i : i + sz] for i in range(0, len(sort_idx), sz)]\n max_ck = np.argmax([self.key(ck[0]) for ck in ck_idx]) # find the chunk with the largest key,\n ck_idx[0], ck_idx[max_ck] = ck_idx[max_ck], ck_idx[0] # then make sure it goes first.\n sort_idx = np.concatenate(np.random.permutation(ck_idx[1:])) if len(ck_idx) > 1 else np.array([], dtype=np.int)\n sort_idx = np.concatenate((ck_idx[0], sort_idx))\n return iter(sort_idx)\n\n\nlogger = getLogger(__name__)\n\n\ndef use_task_specific_params(model, task):\n \"\"\"Update config with summarization specific params.\"\"\"\n task_specific_params = model.config.task_specific_params\n\n if task_specific_params is not None:\n pars = task_specific_params.get(task, {})\n logger.info(f\"using task specific params for {task}: {pars}\")\n model.config.update(pars)\n\n\ndef pickle_load(path):\n \"\"\"pickle.load(path)\"\"\"\n with open(path, \"rb\") as f:\n return pickle.load(f)\n\n\ndef pickle_save(obj, path):\n \"\"\"pickle.dump(obj, path)\"\"\"\n with open(path, \"wb\") as f:\n return pickle.dump(obj, f)\n\n\ndef flatten_list(summary_ids: List[List]):\n return [x for x in itertools.chain.from_iterable(summary_ids)]\n\n\ndef save_git_info(folder_path: str) -> None:\n \"\"\"Save git information to output_dir/git_log.json\"\"\"\n repo_infos = get_git_info()\n save_json(repo_infos, os.path.join(folder_path, \"git_log.json\"))\n\n\ndef save_json(content, path):\n with open(path, \"w\") as f:\n json.dump(content, f, indent=4)\n\n\ndef load_json(path):\n with open(path) as f:\n return json.load(f)\n\n\ndef get_git_info():\n repo = git.Repo(search_parent_directories=True)\n repo_infos = {\n \"repo_id\": str(repo),\n \"repo_sha\": str(repo.head.object.hexsha),\n \"repo_branch\": str(repo.active_branch),\n }\n return repo_infos\n\n\nROUGE_KEYS = [\"rouge1\", \"rouge2\", \"rougeL\"]\n\n\ndef calculate_rouge(output_lns: List[str], reference_lns: List[str], use_stemmer=True) -> Dict:\n scorer = rouge_scorer.RougeScorer(ROUGE_KEYS, use_stemmer=use_stemmer)\n aggregator = scoring.BootstrapAggregator()\n\n for reference_ln, output_ln in zip(reference_lns, output_lns):\n scores = scorer.score(reference_ln, output_ln)\n aggregator.add_scores(scores)\n\n result = aggregator.aggregate()\n return {k: v.mid.fmeasure for k, v in result.items()}\n\n\ndef freeze_params(model: nn.Module):\n for par in model.parameters():\n par.requires_grad = False\n\n\ndef grad_status(model: nn.Module) -> Iterable:\n return (par.requires_grad for par in model.parameters())\n\n\ndef any_requires_grad(model: nn.Module) -> bool:\n return any(grad_status(model))\n\n\ndef assert_all_frozen(model):\n model_grads: List[bool] = list(grad_status(model))\n n_require_grad = sum(lmap(int, model_grads))\n npars = len(model_grads)\n assert not any(model_grads), f\"{n_require_grad/npars:.1%} of {npars} weights require grad\"\n\n\ndef assert_not_all_frozen(model):\n model_grads: List[bool] = list(grad_status(model))\n npars = len(model_grads)\n assert any(model_grads), f\"none of {npars} weights require grad\"\n"
] |
[
[
"torch.stack",
"numpy.array",
"numpy.random.permutation",
"numpy.concatenate"
]
] |
fhvilshoj/EvaluatingCounterfactuals
|
[
"136fd5f7fca5c886082c0597d2fc581705113870"
] |
[
"evaluate/metrics/lvs.py"
] |
[
"import os\nimport numpy as np\nfrom tensorflow.keras.models import load_model\n\n# Local imports\nfrom .utils import fix_shape, get_model_dir\nfrom .computer import Computer\n\nMODEL_NAMES = {\n 'fakemnist': ['independent.h5', 'independent_mnist.h5'],\n 'celeba': ['independent_makeup.h5', 'independent_cheekbones.h5', 'independent_attractive.h5', 'independent_lipstick.h5', 'independent_smile.h5'],\n}\n\nLABEL_NAMES = { \n 'fakemnist': ['FakeMNIST', 'MNIST'],\n 'celeba': [ 'Heavy_Makeup', 'High_Cheekbones', 'Attractive', 'Wearing_Lipstick', 'Smiling' ],\n}\n\n\ndef KL(P, Q, e=1e-3):\n return P * (np.log(P + (e * P==0)) - np.log(Q+(e*Q==0)))\n\ndef JS(P, Q):\n if len(P.shape) == 1: \n P_ = np.concatenate([1-P.reshape(-1, 1), P.reshape(-1, 1)], -1)\n Q_ = np.concatenate([1-Q.reshape(-1, 1), Q.reshape(-1, 1)], -1)\n else:\n P_ = P\n Q_ = Q \n \n M = 0.5 * ( P_ + Q_ )\n js = 0.5 *( KL(P_, M) + KL(Q_, M) ) \n js = js.sum(-1) \n return js, np.mean(js), np.std(js), P.shape[0]\n\n\nclass LabelVariationScore(Computer):\n def __init__(self, *args, model=None, **kwargs):\n super(LabelVariationScore, self).__init__(*args, **kwargs)\n\n self.load_models()\n\n self.name = type(self).__name__ \n self.desc = \"\"\"LVS:\n \n E_{x~Px} [D_JS( o(x) || o( cf(x) ) )]\n \n \"\"\"\n\n def load_models(self):\n dataset = self.cfg.get('data', 'dataset').lower().replace(\"_\", \"\")\n try: \n model_names = MODEL_NAMES[dataset]\n self.label_names = LABEL_NAMES[dataset]\n except KeyError:\n raise ValueError(\"LVS can only be computed for FakeMNIST and Celeba-HQ\")\n self.class_label_models = [load_model(get_model_dir(self.cfg, mn)) for mn in model_names]\n\n def score_fn(self, model): \n def score(i, x, xcf, y, yhat, ycf, yhatcf):\n x = fix_shape(x)\n xcf = fix_shape(xcf) \n\n p = model.predict(x).squeeze()\n q = model.predict(xcf).squeeze()\n return p, q \n\n return lambda i, args: score(i, *args)\n\n def compute_divergences(self, s): \n P = np.array(list( map( lambda x: x[0], s) ) )\n Q = np.array(list( map( lambda x: x[1], s) ) )\n\n _, mu, std, n = JS(P, Q)\n ci = 1.96*std/np.sqrt(n)\n\n return mu.astype(float), float(ci), len(s)\n\n def compute(self, *args): \n aggr_fn = self.compute_divergences\n res = {}\n\n for (label_name, model) in zip(self.label_names, self.class_label_models): \n elt_fn = self.score_fn(model) \n res[label_name] = self._iterate(elt_fn, aggr_fn, *args)\n\n return res\n\n"
] |
[
[
"numpy.std",
"numpy.mean",
"numpy.log",
"numpy.sqrt"
]
] |
BrancoLab/Kino
|
[
"0e914e3d65fdf76e4efa95b9848cb30da3653f3d"
] |
[
"scripts/animation_complete.py"
] |
[
"import sys\n\nsys.path.append(\"./\")\n\nimport pandas as pd\n\n\nfrom kino.animal import mouse\nfrom kino.locomotion import Locomotion\nfrom kino.animate import CompleteAnimation\n\n\"\"\"\n Creates a slowmo animation of the locomotion bout\n in the allocentric and egocentric views\n\"\"\"\n\ntracking = pd.read_hdf(\"scripts/example_tracking.h5\")\n\n\nlocomotion = Locomotion(mouse, tracking, fps=60)\n\nfor bp in locomotion.bodyparts.values():\n bp.thetadot[0] = 0\n\negocentric = locomotion.to_egocentric()\n\nanim = CompleteAnimation(\n locomotion,\n egocentric,\n fps=60,\n bodyparts=[\"right_fl\", \"right_hl\", \"left_fl\", \"left_hl\", \"com\"],\n)\nanim.animate(\"cache/complete_locomotion.mp4\")\n"
] |
[
[
"pandas.read_hdf"
]
] |
guanghuixu/kg2text
|
[
"c70fd9a60b30c5d2136f7a413452e8b550c7b8da",
"c70fd9a60b30c5d2136f7a413452e8b550c7b8da"
] |
[
"graph2text/onmt/decoders/transformer.py",
"graph2text/onmt/utils/bleu_eval.py"
] |
[
"\"\"\"\nImplementation of \"Attention is All You Need\"\n\"\"\"\n\nimport torch\nimport torch.nn as nn\n\nfrom onmt.decoders.decoder import DecoderBase\nfrom onmt.modules import MultiHeadedAttention, AverageAttention\nfrom onmt.modules.position_ffn import PositionwiseFeedForward\nfrom onmt.utils.misc import sequence_mask\nfrom pytorch_transformers.modeling_bert import BertConfig\nfrom onmt.decoders.rewriter import Rewriter\n\nclass TransformerDecoderLayer(nn.Module):\n \"\"\"\n Args:\n d_model (int): the dimension of keys/values/queries in\n :class:`MultiHeadedAttention`, also the input size of\n the first-layer of the :class:`PositionwiseFeedForward`.\n heads (int): the number of heads for MultiHeadedAttention.\n d_ff (int): the second-layer of the :class:`PositionwiseFeedForward`.\n dropout (float): dropout probability.\n self_attn_type (string): type of self-attention scaled-dot, average\n \"\"\"\n\n def __init__(self, d_model, heads, d_ff, dropout, attention_dropout,\n self_attn_type=\"scaled-dot\", max_relative_positions=0,\n aan_useffn=False, full_context_alignment=False,\n alignment_heads=None):\n super(TransformerDecoderLayer, self).__init__()\n\n if self_attn_type == \"scaled-dot\":\n self.self_attn = MultiHeadedAttention(\n heads, d_model, dropout=dropout,\n max_relative_positions=max_relative_positions)\n elif self_attn_type == \"average\":\n self.self_attn = AverageAttention(d_model,\n dropout=attention_dropout,\n aan_useffn=aan_useffn)\n\n self.context_attn = MultiHeadedAttention(\n heads, d_model, dropout=attention_dropout)\n self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)\n self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6)\n self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6)\n self.drop = nn.Dropout(dropout)\n self.full_context_alignment = full_context_alignment\n self.alignment_heads = alignment_heads\n\n def forward(self, *args, **kwargs):\n \"\"\" Extend _forward for (possibly) multiple decoder pass:\n 1. Always a default (future masked) decoder forward pass,\n 2. Possibly a second future aware decoder pass for joint learn\n full context alignement.\n\n Args:\n * All arguments of _forward.\n with_align (bool): whether return alignment attention.\n\n Returns:\n (FloatTensor, FloatTensor, FloatTensor or None):\n\n * output ``(batch_size, 1, model_dim)``\n * top_attn ``(batch_size, 1, src_len)``\n * attn_align ``(batch_size, 1, src_len)`` or None\n \"\"\"\n with_align = kwargs.pop('with_align', False)\n output, attns = self._forward(*args, **kwargs)\n top_attn = attns[:, 0, :, :].contiguous()\n attn_align = None\n if with_align:\n if self.full_context_alignment:\n # return _, (B, Q_len, K_len)\n _, attns = self._forward(*args, **kwargs, future=True)\n\n if self.alignment_heads is not None:\n attns = attns[:, :self.alignment_heads, :, :].contiguous()\n # layer average attention across heads, get ``(B, Q, K)``\n # Case 1: no full_context, no align heads -> layer avg baseline\n # Case 2: no full_context, 1 align heads -> guided align\n # Case 3: full_context, 1 align heads -> full cte guided align\n attn_align = attns.mean(dim=1)\n return output, top_attn, attn_align\n\n def _forward(self, inputs, memory_bank, src_pad_mask, tgt_pad_mask,\n layer_cache=None, step=None, future=False):\n \"\"\" A naive forward pass for transformer decoder.\n # TODO: change 1 to T as T could be 1 or tgt_len\n Args:\n inputs (FloatTensor): ``(batch_size, 1, model_dim)``\n memory_bank (FloatTensor): ``(batch_size, src_len, model_dim)``\n src_pad_mask (LongTensor): ``(batch_size, 1, src_len)``\n tgt_pad_mask (LongTensor): ``(batch_size, 1, 1)``\n\n Returns:\n (FloatTensor, FloatTensor):\n\n * output ``(batch_size, 1, model_dim)``\n * attns ``(batch_size, head, 1, src_len)``\n\n \"\"\"\n dec_mask = None\n\n if step is None:\n tgt_len = tgt_pad_mask.size(-1)\n if not future: # apply future_mask, result mask in (B, T, T)\n future_mask = torch.ones(\n [tgt_len, tgt_len],\n device=tgt_pad_mask.device,\n dtype=torch.uint8)\n future_mask = future_mask.triu_(1).view(1, tgt_len, tgt_len)\n # BoolTensor was introduced in pytorch 1.2\n try:\n future_mask = future_mask.bool()\n except AttributeError:\n pass\n dec_mask = torch.gt(tgt_pad_mask + future_mask, 0)\n else: # only mask padding, result mask in (B, 1, T)\n dec_mask = tgt_pad_mask\n\n input_norm = self.layer_norm_1(inputs)\n\n if isinstance(self.self_attn, MultiHeadedAttention):\n query, _ = self.self_attn(input_norm, input_norm, input_norm,\n mask=dec_mask,\n layer_cache=layer_cache,\n attn_type=\"self\")\n elif isinstance(self.self_attn, AverageAttention):\n query, _ = self.self_attn(input_norm, mask=dec_mask,\n layer_cache=layer_cache, step=step)\n\n query = self.drop(query) + inputs\n\n query_norm = self.layer_norm_2(query)\n mid, attns = self.context_attn(memory_bank, memory_bank, query_norm,\n mask=src_pad_mask,\n layer_cache=layer_cache,\n attn_type=\"context\")\n output = self.feed_forward(self.drop(mid) + query)\n\n return output, attns\n\n def update_dropout(self, dropout, attention_dropout):\n self.self_attn.update_dropout(attention_dropout)\n self.context_attn.update_dropout(attention_dropout)\n self.feed_forward.update_dropout(dropout)\n self.drop.p = dropout\n\n\nclass TransformerDecoder(DecoderBase):\n \"\"\"The Transformer decoder from \"Attention is All You Need\".\n :cite:`DBLP:journals/corr/VaswaniSPUJGKP17`\n\n .. mermaid::\n\n graph BT\n A[input]\n B[multi-head self-attn]\n BB[multi-head src-attn]\n C[feed forward]\n O[output]\n A --> B\n B --> BB\n BB --> C\n C --> O\n\n\n Args:\n num_layers (int): number of encoder layers.\n d_model (int): size of the model\n heads (int): number of heads\n d_ff (int): size of the inner FF layer\n copy_attn (bool): if using a separate copy attention\n self_attn_type (str): type of self-attention scaled-dot, average\n dropout (float): dropout parameters\n embeddings (onmt.modules.Embeddings):\n embeddings to use, should have positional encodings\n \"\"\"\n\n def __init__(self, num_layers, d_model, heads, d_ff,\n copy_attn, self_attn_type, dropout, attention_dropout,\n embeddings, max_relative_positions, aan_useffn,\n full_context_alignment, alignment_layer,\n alignment_heads=None):\n super(TransformerDecoder, self).__init__()\n\n self.embeddings = embeddings\n\n # Decoder State\n self.state = {}\n\n self.transformer_layers = nn.ModuleList(\n [TransformerDecoderLayer(d_model, heads, d_ff, dropout,\n attention_dropout, self_attn_type=self_attn_type,\n max_relative_positions=max_relative_positions,\n aan_useffn=aan_useffn,\n full_context_alignment=full_context_alignment,\n alignment_heads=alignment_heads)\n for i in range(num_layers)])\n\n # previously, there was a GlobalAttention module here for copy\n # attention. But it was never actually used -- the \"copy\" attention\n # just reuses the context attention.\n self._copy = copy_attn\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n\n self.alignment_layer = alignment_layer\n\n self.rewriter = Rewriter(BertConfig(hidden_size=d_model, \\\n num_attention_heads=8, num_hidden_layers=4))\n\n @classmethod\n def from_opt(cls, opt, embeddings):\n \"\"\"Alternate constructor.\"\"\"\n return cls(\n opt.dec_layers,\n opt.dec_rnn_size,\n opt.heads,\n opt.transformer_ff,\n opt.copy_attn,\n opt.self_attn_type,\n opt.dropout[0] if type(opt.dropout) is list else opt.dropout,\n opt.attention_dropout[0] if type(opt.attention_dropout)\n is list else opt.dropout,\n embeddings,\n opt.max_relative_positions,\n opt.aan_useffn,\n opt.full_context_alignment,\n opt.alignment_layer,\n alignment_heads=opt.alignment_heads)\n\n def init_state(self, src, memory_bank, enc_hidden):\n \"\"\"Initialize decoder state.\"\"\"\n self.state[\"src\"] = src\n self.state[\"cache\"] = None\n\n def map_state(self, fn):\n def _recursive_map(struct, batch_dim=0):\n for k, v in struct.items():\n if v is not None:\n if isinstance(v, dict):\n _recursive_map(v)\n else:\n struct[k] = fn(v, batch_dim)\n\n self.state[\"src\"] = fn(self.state[\"src\"], 1)\n if self.state[\"cache\"] is not None:\n _recursive_map(self.state[\"cache\"])\n\n def detach_state(self):\n self.state[\"src\"] = self.state[\"src\"].detach()\n\n def forward(self, tgt, memory_bank, step=None, **kwargs):\n \"\"\"Decode, possibly stepwise.\"\"\"\n if step == 0:\n self._init_cache(memory_bank)\n\n tgt_words = tgt[:, :, 0].transpose(0, 1)\n\n emb = self.embeddings(tgt, step=step)\n assert emb.dim() == 3 # len x batch x embedding_dim\n\n output = emb.transpose(0, 1).contiguous()\n src_memory_bank = memory_bank.transpose(0, 1).contiguous()\n\n pad_idx = self.embeddings.word_padding_idx\n src_lens = kwargs[\"memory_lengths\"]\n src_max_len = self.state[\"src\"].shape[0]\n src_pad_mask = ~sequence_mask(src_lens, src_max_len).unsqueeze(1)\n tgt_pad_mask = tgt_words.data.eq(pad_idx).unsqueeze(1) # [B, 1, T_tgt]\n\n with_align = kwargs.pop('with_align', False)\n attn_aligns = []\n\n for i, layer in enumerate(self.transformer_layers):\n layer_cache = self.state[\"cache\"][\"layer_{}\".format(i)] \\\n if step is not None else None\n output, attn, attn_align = layer(\n output,\n src_memory_bank,\n src_pad_mask,\n tgt_pad_mask,\n layer_cache=layer_cache,\n step=step,\n with_align=with_align)\n if attn_align is not None:\n attn_aligns.append(attn_align)\n \n re_output = self.rewriter(src_memory_bank, src_pad_mask.squeeze(1).float(), \\\n output, tgt_pad_mask.squeeze(1).float())\n\n output = self.layer_norm(output)\n dec_outs = output.transpose(0, 1).contiguous()\n attn = attn.transpose(0, 1).contiguous()\n\n attns = {\"std\": attn}\n if self._copy:\n attns[\"copy\"] = attn\n if with_align:\n attns[\"align\"] = attn_aligns[self.alignment_layer] # `(B, Q, K)`\n # attns[\"align\"] = torch.stack(attn_aligns, 0).mean(0) # All avg\n\n # TODO change the way attns is returned dict => list or tuple (onnx)\n # return dec_outs, attns\n return (dec_outs,self.layer_norm(output).transpose(0, 1).contiguous()), attns\n\n def _init_cache(self, memory_bank):\n self.state[\"cache\"] = {}\n batch_size = memory_bank.size(1)\n depth = memory_bank.size(-1)\n\n for i, layer in enumerate(self.transformer_layers):\n layer_cache = {\"memory_keys\": None, \"memory_values\": None}\n if isinstance(layer.self_attn, AverageAttention):\n layer_cache[\"prev_g\"] = torch.zeros((batch_size, 1, depth),\n device=memory_bank.device)\n else:\n layer_cache[\"self_keys\"] = None\n layer_cache[\"self_values\"] = None\n self.state[\"cache\"][\"layer_{}\".format(i)] = layer_cache\n\n def update_dropout(self, dropout, attention_dropout):\n self.embeddings.update_dropout(dropout)\n for layer in self.transformer_layers:\n layer.update_dropout(dropout, attention_dropout)\n",
"import pickle\nimport os\nimport collections\nimport sys\n\n# sys.path.append('./pycocoevalcap')\nfrom pycocoevalcap.bleu.bleu import Bleu\nfrom pycocoevalcap.rouge.rouge import Rouge\nfrom pycocoevalcap.meteor.meteor import Meteor\n#from pycocoevalcap.cider.cider import Cider\n\nimport torch\nimport torch.nn as nn\n\nimport onmt\nimport pdb\ncount_n = 0\n\nclass Evaluate(nn.Module):\n def __init__(self, vocab_file='outputs/vocabs.txt'):\n super(Evaluate, self).__init__()\n self.scorers = [\n (Bleu(4), [\"Bleu_1\", \"Bleu_2\", \"Bleu_3\", \"Bleu_4\"]),\n # (Rouge(), \"ROUGE_L\")\n ]\n with open(vocab_file, encoding='utf-8') as f:\n vocab_list = f.readlines()\n self.vocab = [_.strip('\\n') for _ in vocab_list]\n self.padding_idx = self.vocab.index('<blank>')\n\n def forward(self, output1, output2, target, generator_func=None):\n # [length, batch, class_n]\n if generator_func:\n output1 = generator_func(output1)\n output2 = generator_func(output2)\n\n output1 = torch.log_softmax(output1, dim=-1)\n output1_idx = output1.argmax(dim=-1)\n\n # output2 = torch.log_softmax(output2, dim=-1)\n # output2_p, output2_idx = output2.max(dim=-1)\n\n target = target.squeeze(-1)\n batch_stats = onmt.utils.Statistics()\n bleu_loss = 0\n for i in range(target.size(1)):\n action, select_log_p, entropy = self.impl(torch.softmax(output2[:, i], dim=-1))\n target_sentence = [self.vocab[x.item()] for x in target[:, i]]\n output1_sentence = [self.vocab[x.item()] for x in output1_idx[:, i]]\n output2_sentence = [self.vocab[x.item()] for x in action]\n \n target_sentence = {'sentence': [' '.join(target_sentence)]}\n output1_sentence = {'sentence': ' '.join(output1_sentence)}\n output2_sentence = {'sentence': ' '.join(output2_sentence)}\n\n bleu_1 = self.evaluate(live=True, cand=output1_sentence, ref=target_sentence)\n bleu_2 = self.evaluate(live=True, cand=output2_sentence, ref=target_sentence)\n\n bleu_diff = bleu_2 - bleu_1\n mask = action != output1_idx[:, i]\n loss = - bleu_diff * select_log_p[mask] # - 0.003 * entropy[mask]\n loss = loss.sum() / mask.sum()\n bleu_loss += loss / target.size(1)\n\n global count_n\n count_n += 1\n if count_n % 1000 == 0:\n print('BLEU_1: {} | BLEU_2: {} | loss: {}_{} | entropy: {} | mask: {}'.format(\\\n bleu_1, bleu_2, loss.item(), bleu_loss.item(), entropy[mask].sum().item(), \\\n mask.sum().item() / target.size(0)))\n\n batch_stats.update(self.stats(loss, action, target[:, i]))\n return bleu_loss, batch_stats\n\n def stats(self, loss, pred, target):\n \"\"\"\n Args:\n loss (:obj:`FloatTensor`): the loss computed by the loss criterion.\n scores (:obj:`FloatTensor`): a score for each possible output\n target (:obj:`FloatTensor`): true targets\n\n Returns:\n :obj:`onmt.utils.Statistics` : statistics for this batch.\n \"\"\"\n # pred = scores.max(1)[1]\n non_padding = target.ne(self.padding_idx)\n num_correct = pred.eq(target).masked_select(non_padding).sum().item()\n num_non_padding = non_padding.sum().item()\n return onmt.utils.Statistics(loss.item(), num_non_padding, num_correct)\n\n def impl(self, probs):\n m = torch.distributions.Categorical(probs=probs)\n action = m.sample().view(-1)\n select_log_p = m.log_prob(action)\n entropy = m.entropy()\n return action, select_log_p, entropy\n\n def convert(self, data):\n if isinstance(data, basestring):\n return data.encode('utf-8')\n elif isinstance(data, collections.Mapping):\n return dict(map(convert, data.items()))\n elif isinstance(data, collections.Iterable):\n return type(data)(map(convert, data))\n else:\n return data\n\n def score(self, ref, hypo):\n final_scores = {}\n for scorer, method in self.scorers:\n score, scores = scorer.compute_score(ref, hypo)\n if type(score) == list:\n for m, s in zip(method, score):\n final_scores[m] = s\n else:\n final_scores[method] = score\n\n return final_scores\n\n def evaluate(self, live=False, **kwargs):\n if live:\n temp_ref = kwargs.pop('ref', {})\n cand = kwargs.pop('cand', {})\n else:\n reference_path = kwargs.pop('ref', '')\n candidate_path = kwargs.pop('cand', '')\n\n # load caption data\n with open(reference_path, 'rb') as f:\n temp_ref = pickle.load(f)\n with open(candidate_path, 'rb') as f:\n cand = pickle.load(f)\n\n # make dictionary\n hypo = {}\n ref = {}\n i = 0\n for vid, caption in cand.items():\n hypo[i] = [caption]\n ref[i] = temp_ref[vid]\n i += 1\n\n # compute scores\n final_scores = self.score(ref, hypo)\n final_scores = final_scores['Bleu_4'] # 0.1 * final_scores['Bleu_1'] + 0.2 * final_scores['Bleu_2'] + \\\n # 0.3 * final_scores['Bleu_3'] + 0.4 * final_scores['Bleu_4']\n return final_scores\n\n\nif __name__ == '__main__':\n '''\n cands = {'generated_description1': 'how are you', 'generated_description2': 'Hello how are you'}\n refs = {'generated_description1': ['what are you', 'where are you'],\n 'generated_description2': ['Hello how are you', 'Hello how is your day']}\n '''\n cands = ['how are you', 'Hello how are you']\n refs = ['how are you', 'Hello how are you']\n cands = {'generated_description'+str(i):x.strip() for i,x in enumerate(cands)}\n refs = {'generated_description'+str(i):[x.strip()] for i,x in enumerate(refs)}\n x = Evaluate()\n final_scores = x.evaluate(live=True, cand=cands, ref=refs)\n print(final_scores)\n"
] |
[
[
"torch.nn.Dropout",
"torch.ones",
"torch.zeros",
"torch.nn.LayerNorm",
"torch.gt"
],
[
"torch.log_softmax",
"torch.softmax",
"torch.distributions.Categorical"
]
] |
jseparovic/ib_insync
|
[
"fc85fb0181b358115fdae86ab16dceb9dc3590bd"
] |
[
"ib_insync/util.py"
] |
[
"import datetime\nimport logging\nimport sys\nimport math\nimport signal\nimport asyncio\nimport time\nfrom typing import List, Iterator\nfrom collections.abc import Awaitable\n\nfrom ib_insync.objects import Object, DynamicObject\n\n\ndef df(objs, labels=None):\n \"\"\"\n Create pandas DataFrame from the sequence of same-type objects.\n When a list of labels is given then only retain those labels and\n drop the rest.\n \"\"\"\n import pandas as pd\n if objs:\n objs = list(objs)\n obj = objs[0]\n if isinstance(obj, Object):\n df = pd.DataFrame.from_records(o.tuple() for o in objs)\n df.columns = obj.__class__.defaults\n elif isinstance(obj, DynamicObject):\n df = pd.DataFrame.from_records(o.__dict__ for o in objs)\n else:\n df = pd.DataFrame.from_records(objs)\n if isinstance(obj, tuple) and hasattr(obj, '_fields'):\n # assume it's a namedtuple\n df.columns = obj.__class__._fields\n else:\n df = None\n if labels:\n exclude = [label for label in df if label not in labels]\n df = df.drop(exclude, axis=1)\n return df\n\n\ndef tree(obj):\n \"\"\"\n Convert object to a tree of lists, dicts and simple values.\n The result can be serialized to JSON.\n \"\"\"\n if isinstance(obj, (bool, int, float, str, bytes)):\n return obj\n elif isinstance(obj, (datetime.date, datetime.time)):\n return obj.isoformat()\n elif isinstance(obj, dict):\n return {k: tree(v) for k, v in obj.items()}\n elif isinstance(obj, (list, tuple, set)):\n return [tree(i) for i in obj]\n elif isinstance(obj, Object):\n return {obj.__class__.__name__: tree(obj.nonDefaults())}\n else:\n return str(obj)\n\n\ndef barplot(bars, title='', upColor='blue', downColor='red'):\n \"\"\"\n Create candlestick plot for the given bars. The bars can be given as\n a DataFrame or as a list of bar objects.\n \"\"\"\n import pandas as pd\n import matplotlib.pyplot as plt\n from matplotlib.lines import Line2D\n from matplotlib.patches import Rectangle\n\n if isinstance(bars, pd.DataFrame):\n ohlcTups = [tuple(v) for v in\n bars[['open', 'high', 'low', 'close']].values]\n else:\n ohlcTups = [(b.open, b.high, b.low, b.close) for b in bars]\n\n fig, ax = plt.subplots()\n ax.set_title(title)\n ax.grid(True)\n fig.set_size_inches(10, 6)\n for n, (open_, high, low, close) in enumerate(ohlcTups):\n if close >= open_:\n color = upColor\n bodyHi, bodyLo = close, open_\n else:\n color = downColor\n bodyHi, bodyLo = open_, close\n line = Line2D(\n xdata=(n, n),\n ydata=(low, bodyLo),\n color=color,\n linewidth=1)\n ax.add_line(line)\n line = Line2D(\n xdata=(n, n),\n ydata=(high, bodyHi),\n color=color,\n linewidth=1)\n ax.add_line(line)\n rect = Rectangle(\n xy=(n - 0.3, bodyLo),\n width=0.6,\n height=bodyHi - bodyLo,\n edgecolor=color,\n facecolor=color,\n alpha=0.4,\n antialiased=True\n )\n ax.add_patch(rect)\n\n ax.autoscale_view()\n return fig\n\n\ndef allowCtrlC():\n \"\"\"\n Allow Control-C to end program.\n \"\"\"\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n\ndef logToFile(path, level=logging.INFO, ibapiLevel=logging.ERROR):\n \"\"\"\n Create a log handler that logs to the given file.\n \"\"\"\n logger = logging.getLogger()\n f = RootLogFilter(ibapiLevel)\n logger.addFilter(f)\n logger.setLevel(level)\n formatter = logging.Formatter(\n '%(asctime)s %(name)s %(levelname)s %(message)s')\n handler = logging.FileHandler(path)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n\ndef logToConsole(level=logging.INFO, ibapiLevel=logging.ERROR):\n \"\"\"\n Create a log handler that logs to the console.\n \"\"\"\n logger = logging.getLogger()\n f = RootLogFilter(ibapiLevel)\n logger.addFilter(f)\n logger.setLevel(level)\n formatter = logging.Formatter(\n '%(asctime)s %(name)s %(levelname)s %(message)s')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.handlers = [h for h in logger.handlers\n if type(h) is not logging.StreamHandler]\n logger.addHandler(handler)\n\n\nclass RootLogFilter:\n\n def __init__(self, ibapiLevel=logging.ERROR):\n self.ibapiLevel = ibapiLevel\n\n def filter(self, record):\n # if it's logged on the root logger assume it's from ibapi\n if record.name == 'root' and record.levelno < self.ibapiLevel:\n return False\n else:\n return True\n\n\ndef isNan(x: float):\n \"\"\"\n Not a number test.\n \"\"\"\n return x != x\n\n\ndef formatSI(n):\n \"\"\"\n Format the integer or float n to 3 significant digits + SI prefix.\n \"\"\"\n s = ''\n if n < 0:\n n = -n\n s += '-'\n if type(n) is int and n < 1000:\n s = str(n) + ' '\n elif n < 1e-22:\n s = '0.00 '\n else:\n assert n < 9.99e26\n log = int(math.floor(math.log10(n)))\n i, j = divmod(log, 3)\n for _try in range(2):\n templ = '%.{}f'.format(2 - j)\n val = templ % (n * 10 ** (-3 * i))\n if val != '1000':\n break\n i += 1\n j = 0\n s += val + ' '\n if i != 0:\n s += 'yzafpnm kMGTPEZY'[i + 7]\n return s\n\n\nclass timeit:\n \"\"\"\n Context manager for timing.\n \"\"\"\n\n def __init__(self, title='Run'):\n self.title = title\n\n def __enter__(self):\n self.t0 = time.time()\n\n def __exit__(self, *_args):\n print(self.title + ' took ' + formatSI(time.time() - self.t0) + 's')\n\n\ndef run(*awaitables: List[Awaitable], timeout=None):\n \"\"\"\n By default run the event loop forever.\n\n When awaitables (like Tasks, Futures or coroutines) are given then\n run the event loop until each has completed and return their results.\n \n An optional timeout (in seconds) can be given that will raise\n asyncio.TimeoutError if the awaitables are not ready within the\n timeout period.\n \"\"\"\n loop = asyncio.get_event_loop()\n if not awaitables:\n if loop.is_running():\n return\n result = loop.run_forever()\n f = asyncio.gather(*asyncio.Task.all_tasks())\n f.cancel()\n try:\n loop.run_until_complete(f)\n except asyncio.CancelledError:\n pass\n else:\n if len(awaitables) == 1:\n future = awaitables[0]\n else:\n future = asyncio.gather(*awaitables)\n if timeout:\n future = asyncio.wait_for(future, timeout)\n result = syncAwait(future)\n return result\n\n\ndef schedule(time, callback, *args):\n \"\"\"\n Schedule the callback to be run at the given time with\n the given arguments.\n \"\"\"\n loop = asyncio.get_event_loop()\n if isinstance(time, datetime.time):\n dt = datetime.datetime.combine(datetime.date.today(), time)\n else:\n dt = time\n now = datetime.datetime.now(dt.tzinfo)\n delay = (dt - now).total_seconds()\n loop.call_later(delay, callback, *args)\n\n\ndef sleep(secs: float=0.02) -> True:\n \"\"\"\n Wait for the given amount of seconds while everything still keeps\n processing in the background. Never use time.sleep().\n \"\"\"\n run(asyncio.sleep(secs))\n return True\n\n\ndef timeRange(start: datetime.time, end: datetime.time,\n step: float) -> Iterator[datetime.datetime]:\n \"\"\"\n Iterator that waits periodically until certain time points are\n reached while yielding those time points.\n \n The startTime and dateTime parameters can be specified as\n datetime.datetime, or as datetime.time in which case today\n is used as the date.\n \n The step parameter is the number of seconds of each period.\n \"\"\"\n assert step > 0\n if isinstance(start, datetime.time):\n start = datetime.datetime.combine(datetime.date.today(), start)\n if isinstance(end, datetime.time):\n end = datetime.datetime.combine(datetime.date.today(), end)\n delta = datetime.timedelta(seconds=step)\n t = start\n while t < datetime.datetime.now():\n t += delta\n while t <= end:\n waitUntil(t)\n yield t\n t += delta\n\n\ndef waitUntil(t: datetime.time) -> True:\n \"\"\"\n Wait until the given time t is reached.\n \n The time can be specified as datetime.datetime,\n or as datetime.time in which case today is used as the date.\n \"\"\"\n if isinstance(t, datetime.time):\n t = datetime.datetime.combine(datetime.date.today(), t)\n now = datetime.datetime.now(t.tzinfo)\n secs = (t - now).total_seconds()\n run(asyncio.sleep(secs))\n return True\n\n\ndef patchAsyncio():\n \"\"\"\n Patch asyncio to use pure Python implementation of Future and Task,\n to deal with nested event loops in syncAwait.\n \"\"\"\n asyncio.Task = asyncio.tasks._CTask = asyncio.tasks.Task = \\\n asyncio.tasks._PyTask\n asyncio.Future = asyncio.futures._CFuture = asyncio.futures.Future = \\\n asyncio.futures._PyFuture\n\n\ndef syncAwait(future):\n \"\"\"\n Synchronously wait until future is done, accounting for the possibility\n that the event loop is already running.\n \"\"\"\n loop = asyncio.get_event_loop()\n\n try:\n import quamash\n isQuamash = isinstance(loop, quamash.QEventLoop)\n except ImportError:\n isQuamash = False\n\n if not loop.is_running():\n result = loop.run_until_complete(future)\n elif isQuamash:\n result = _syncAwaitQt(future)\n else:\n result = _syncAwaitAsyncio(future)\n return result\n\n\ndef _syncAwaitAsyncio(future):\n assert asyncio.Task is asyncio.tasks._PyTask, \\\n 'To allow nested event loops, use util.patchAsyncio()'\n loop = asyncio.get_event_loop()\n preserved_ready = list(loop._ready)\n loop._ready.clear()\n future = asyncio.ensure_future(future)\n current_tasks = asyncio.Task._current_tasks\n preserved_task = current_tasks.get(loop)\n while not future.done():\n loop._run_once()\n if loop._stopping:\n break\n loop._ready.extendleft(preserved_ready)\n if preserved_task is not None:\n current_tasks[loop] = preserved_task\n else:\n current_tasks.pop(loop, None)\n return future.result()\n\n\ndef _syncAwaitQt(future):\n import PyQt5.Qt as qt\n loop = asyncio.get_event_loop()\n future = asyncio.ensure_future(future, loop=loop)\n qLoop = qt.QEventLoop()\n future.add_done_callback(lambda f: qLoop.quit())\n qLoop.exec_()\n return future.result() if future.done() else None\n\n\ndef startLoop():\n \"\"\"\n Use asyncio event loop for Jupyter notebooks.\n \"\"\"\n patchAsyncio()\n loop = asyncio.get_event_loop()\n if not loop.is_running():\n from ipykernel.eventloops import register_integration, enable_gui\n register_integration('asyncio')(_ipython_loop_asyncio)\n enable_gui('asyncio')\n\n\ndef _ipython_loop_asyncio(kernel):\n '''\n Use asyncio event loop for the given IPython kernel.\n '''\n loop = asyncio.get_event_loop()\n\n def kernel_handler():\n kernel.do_one_iteration()\n loop.call_later(kernel._poll_interval, kernel_handler)\n\n loop.call_soon(kernel_handler)\n try:\n if not loop.is_running():\n loop.run_forever()\n finally:\n if not loop.is_running():\n loop.run_until_complete(loop.shutdown_asyncgens())\n loop.close()\n\n\ndef useQt():\n \"\"\"\n Integrate asyncio and Qt loops:\n Let the Qt event loop spin the asyncio event loop\n (does not work with nested event loops in Windows)\n \"\"\"\n import PyQt5.Qt as qt\n import quamash\n if isinstance(asyncio.get_event_loop(), quamash.QEventLoop):\n return\n if not qt.QApplication.instance():\n _ = qt.QApplication(sys.argv)\n loop = quamash.QEventLoop()\n asyncio.set_event_loop(loop)\n\n\ndef formatIBDatetime(dt):\n \"\"\"\n Format date or datetime to string that IB uses.\n \"\"\"\n if not dt:\n s = ''\n elif isinstance(dt, datetime.datetime):\n if dt.tzinfo:\n # convert to local system timezone\n dt = dt.astimezone()\n s = dt.strftime('%Y%m%d %H:%M:%S')\n elif isinstance(dt, datetime.date):\n s = dt.strftime('%Y%m%d 23:59:59')\n else:\n s = dt\n return s\n\n\ndef parseIBDatetime(s):\n \"\"\"\n Parse string in IB date or datetime format to datetime.\n \"\"\"\n if len(s) == 8:\n # YYYYmmdd\n y = int(s[0:4])\n m = int(s[4:6])\n d = int(s[6:8])\n dt = datetime.date(y, m, d)\n elif s.isdigit():\n dt = datetime.datetime.fromtimestamp(\n int(s), datetime.timezone.utc)\n else:\n dt = datetime.datetime.strptime(s, '%Y%m%d %H:%M:%S')\n return dt\n"
] |
[
[
"pandas.DataFrame.from_records",
"matplotlib.patches.Rectangle",
"matplotlib.lines.Line2D",
"matplotlib.pyplot.subplots"
]
] |
lzamparo/SdA_reduce
|
[
"1dc11f23b395f316df5f41448542d2d5c2e619ff"
] |
[
"utils/kernel_pca_pipeline.py"
] |
[
"\"\"\"\n==========\nKernel PCA gamma parameter CV pipeline\n==========\n\nUse a pipeline to find the best value for gamma parameter in the RBF kernel for kernel PCA.\n\nAdapted from: \n http://scikit-learn.org/stable/auto_examples/decomposition/plot_kernel_pca.html#example-decomposition-plot-kernel-pca-py\n http://scikit-learn.org/stable/auto_examples/grid_search_digits.html#example-grid-search-digits-py\n\n\"\"\"\nimport numpy as np\nimport pickle\n\nfrom optparse import OptionParser\nfrom tables import *\n\nfrom sklearn.decomposition import KernelPCA\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import v_measure_score, make_scorer, homogeneity_score\nfrom extract_datasets import extract_labeled_chunkrange\nfrom sklearn.preprocessing import scale\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.grid_search import GridSearchCV\n\n\nnp.random.seed(0)\n\n# parse commandline arguments\nop = OptionParser()\nop.add_option(\"--h5file\",\n dest=\"inputfile\", help=\"Read data input from this hdf5 file.\")\nop.add_option(\"--size\",\n dest=\"size\", type=\"int\", help=\"Extract the first size chunks of the data set and labels.\")\nop.add_option(\"--sample-size\",\n dest=\"samplesize\", type=\"int\", help=\"The max size of the samples\")\nop.add_option(\"--output\",\n dest=\"outfile\", help=\"Write the estimator model to this file.\")\nop.add_option(\"--num-jobs\",\n dest=\"jobs\", type=\"int\", help=\"Use these number of jobs in parallel for GridSearchCV\")\n\n(opts, args) = op.parse_args()\n\n###############################################################################\n# Load a training set from the given .h5 file\ndatafile = openFile(opts.inputfile, mode = \"r\", title = \"Data is stored here\")\n\n# Extract some of the dataset from the datafile\nX, labels = extract_labeled_chunkrange(datafile, opts.size)\n\n# Sample from the dataset\nwt_points = np.nonzero(labels[:,0] == 0)[0]\nfoci_points = np.nonzero(labels[:,0] == 1)[0]\nab_nuclei_points = np.nonzero(labels[:,0] == 2)[0]\n\nwt_data = X[wt_points,5:]\nfoci_data = X[foci_points,5:]\nab_nuclei_data = X[ab_nuclei_points,5:]\n\nwt_labels = labels[wt_points,0]\nfoci_labels = labels[foci_points,0]\nab_nuclei_labels = labels[ab_nuclei_points,0]\n\n# Figure out the sample sizes based on the shape of the *_labels arrays and the \n# sample size argument\n\nwt_samplesize = min(opts.samplesize,wt_data.shape[0])\nfoci_samplesize = min(opts.samplesize,foci_data.shape[0])\nab_nuclei_samplesize = min(opts.samplesize, ab_nuclei_data.shape[0]) \n\n# Use np.random.permutation(array)[0:size,:] to sample u at random\n# from the strata.\nwt_data_sample = np.random.permutation(wt_data)[0:wt_samplesize,:]\nfoci_data_sample = np.random.permutation(foci_data)[0:foci_samplesize,:]\nab_nuclei_sample = np.random.permutation(ab_nuclei_data)[0:ab_nuclei_samplesize,:]\n\nD = np.vstack((wt_data_sample,foci_data_sample,ab_nuclei_sample))\nD_labels = np.hstack((wt_labels[0:wt_samplesize],foci_labels[0:foci_samplesize],ab_nuclei_labels[0:ab_nuclei_samplesize]))\nD_scaled = scale(D)\n\ndatafile.close()\n\n##################\n\n# Set up the kPCA -> kmeans -> v-measure pipeline\nkpca = KernelPCA(n_components=30, kernel=\"rbf\")\nkmeans = KMeans(n_clusters=3)\npipe = Pipeline(steps=[('kpca', kpca), ('kmeans', kmeans)])\n\n# Range of parameters to consider for gamma in the RBF kernel for kPCA\ngammas = np.logspace(-3,3,num=40)\n\n# Make a scoring function for the pipeline\nv_measure_scorer = make_scorer(v_measure_score)\nhomogeneity_scorer = make_scorer(homogeneity_score)\n\n# Set the kpca model parameters to cycle over using '__' a prefix\nestimator = GridSearchCV(pipe, dict(kpca__gamma=gammas), scoring=homogeneity_scorer, n_jobs=opts.jobs)\nestimator.fit(D_scaled,D_labels)\n\n# Dump the estimator to a file\nf = file(opts.outfile, 'wb')\npickle.dump(estimator, f)\nf.close()\n\n# Report the best parameter values\nprint(\"Best estimator found on test data set:\")\nprint()\nprint(estimator.best_estimator_)\nprint()\nprint(\"Best parameters fond on test data set:\")\nprint()\nprint(estimator.best_params_)\nprint()\nprint(\"Grid scores on development set:\")\nprint()\nfor params, mean_score, scores in estimator.grid_scores_:\n print(\"%0.3f (+/-%0.03f) for %r\"\n % (mean_score, scores.std() / 2, params))\nprint()\n\n\n\n\n\n"
] |
[
[
"numpy.hstack",
"numpy.random.seed",
"sklearn.cluster.KMeans",
"numpy.logspace",
"numpy.nonzero",
"sklearn.pipeline.Pipeline",
"numpy.random.permutation",
"sklearn.metrics.make_scorer",
"sklearn.decomposition.KernelPCA",
"sklearn.preprocessing.scale",
"numpy.vstack"
]
] |
deanna-abrams/PyEIS
|
[
"ad8db40fe74dccba3ef2005064a7ad5219364814"
] |
[
"PyEIS/PyEIS.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 5 12:13:33 2018\n\n@author: Kristian B. Knudsen ([email protected] / [email protected])\n\"\"\"\nimport pandas as pd\nfrom bokeh.layouts import layout, gridplot\nfrom bokeh.models import Range1d, LinearAxis, Legend, Div\nfrom bokeh.palettes import small_palettes\nfrom bokeh.plotting import figure\nfrom lmfit import minimize, report_fit, fit_report\nimport seaborn as sns\nimport numpy as np\n\nfrom .circuits import CIRCUIT_DICT, leastsq_errorfunc\nfrom .PyEIS_Data_extraction import *\nfrom .PyEIS_Lin_KK import *\nfrom .PyEIS_Advanced_tools import *\n\npd.options.mode.chained_assignment = None\n\n# mpl.rc('mathtext', fontset='stixsans', default='regular')\n# mpl.rcParams.update({'axes.labelsize': 10})\n# mpl.rc('xtick', labelsize=10)\n# mpl.rc('ytick', labelsize=10)\n# mpl.rc('legend', fontsize=10)\n\n\n# Frequency generator\ndef freq_gen(f_start, f_stop, pts_decade=7):\n \"\"\"\n Frequency Generator with logspaced freqencies\n\n Inputs\n ----------\n f_start = frequency start [Hz]\n f_stop = frequency stop [Hz]\n pts_decade = Points/decade, default 7 [-]\n\n Output\n ----------\n [0] = frequency range [Hz]\n [1] = Angular frequency range [1/s]\n \"\"\"\n f_decades = np.log10(f_start) - np.log10(f_stop)\n f_range = np.logspace(np.log10(f_start), np.log10(f_stop),\n num=int(np.around(pts_decade*f_decades)), endpoint=True)\n w_range = 2 * np.pi * f_range\n return f_range, w_range\n\n\n# Fitting Class\nclass EIS_exp:\n \"\"\"\n This class is used to plot and/or analyze experimental impedance data. The class has three\n major functions:\n - EIS_plot()\n - Lin_KK()\n - EIS_fit()\n\n - EIS_plot() is used to plot experimental data with or without fit\n - Lin_KK() performs a linear Kramers-Kronig analysis of the experimental data set.\n - EIS_fit() performs complex non-linear least-squares fitting of the experimental data to an\n equivalent circuit\n\n Kristian B. Knudsen ([email protected] || [email protected])\n\n Inputs\n -----------\n - path: path of datafile(s) as a string\n - data: datafile(s) including extension, e.g. ['EIS_data1', 'EIS_data2']\n - cycle: Specific cycle numbers can be extracted using the cycle function. Default is\n 'none', which includes all cycle numbers.\n Specific cycles can be extracted using this parameter, insert cycle numbers in brackets,\n e.g. cycle number 1,4, and 6 are wanted. cycle=[1,4,6]\n - mask: ['high frequency' , 'low frequency'], if only a high- or low-frequency is desired\n use 'none' for the other, e.g. maks=[10**4,'none']\n \"\"\"\n def __init__(self, path, data, cycle='off', mask=['none','none']):\n self.df_raw0 = []\n self.cycleno = []\n for j in range(len(data)):\n if data[j].find(\".mpt\") != -1: #file is a .mpt file\n self.df_raw0.append(extract_mpt(path=path, EIS_name=data[j])) #reads all datafiles\n elif data[j].find(\".DTA\") != -1: #file is a .dta file\n self.df_raw0.append(extract_dta(path=path, EIS_name=data[j])) #reads all datafiles\n elif data[j].find(\".z\") != -1: #file is a .z file\n self.df_raw0.append(extract_solar(path=path, EIS_name=data[j])) #reads all datafiles\n else:\n print('Data file(s) could not be identified')\n\n self.cycleno.append(self.df_raw0[j].cycle_number)\n if np.min(self.cycleno[j]) <= np.max(self.cycleno[j-1]):\n if j > 0: #corrects cycle_number except for the first data file\n self.df_raw0[j].update({'cycle_number': self.cycleno[j]+np.max(self.cycleno[j-1])}) #corrects cycle number\n# else:\n# print('__init__ Error (#1)')\n\n #currently need to append a cycle_number coloumn to gamry files\n\n # adds individual dataframes into one\n self.df_raw = pd.concat([df for df in self.df_raw0], axis=0)\n # creates a new coloumn with the angular frequency\n self.df_raw = self.df_raw.assign(w=2*np.pi*self.df_raw.f)\n\n #Masking data to each cycle\n self.df_pre = []\n self.df_limited = []\n self.df_limited2 = []\n self.df = []\n if mask == ['none','none'] and cycle == 'off':\n for i in range(len(self.df_raw.cycle_number.unique())): #includes all data\n self.df.append(self.df_raw[self.df_raw.cycle_number == self.df_raw.cycle_number.unique()[i]])\n elif mask == ['none','none'] and cycle != 'off':\n for i in range(len(cycle)):\n self.df.append(self.df_raw[self.df_raw.cycle_number == cycle[i]]) #extracting dataframe for each cycle\n elif mask[0] != 'none' and mask[1] == 'none' and cycle == 'off':\n self.df_pre = self.df_raw.mask(self.df_raw.f > mask[0])\n self.df_pre.dropna(how='all', inplace=True)\n for i in range(len(self.df_pre.cycle_number.unique())): #Appending data based on cycle number\n self.df.append(self.df_pre[self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]])\n elif mask[0] != 'none' and mask[1] == 'none' and cycle != 'off': # or [i for i, e in enumerate(mask) if e == 'none'] == [0]\n self.df_limited = self.df_raw.mask(self.df_raw.f > mask[0])\n for i in range(len(cycle)):\n self.df.append(self.df_limited[self.df_limited.cycle_number == cycle[i]])\n elif mask[0] == 'none' and mask[1] != 'none' and cycle == 'off':\n self.df_pre = self.df_raw.mask(self.df_raw.f < mask[1])\n self.df_pre.dropna(how='all', inplace=True)\n for i in range(len(self.df_raw.cycle_number.unique())): #includes all data\n self.df.append(self.df_pre[self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]])\n elif mask[0] == 'none' and mask[1] != 'none' and cycle != 'off':\n self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])\n for i in range(len(cycle)):\n self.df.append(self.df_limited[self.df_limited.cycle_number == cycle[i]])\n elif mask[0] != 'none' and mask[1] != 'none' and cycle != 'off':\n self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])\n self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])\n for i in range(len(cycle)):\n self.df.append(self.df_limited[self.df_limited2.cycle_number == cycle[i]])\n elif mask[0] != 'none' and mask[1] != 'none' and cycle == 'off':\n self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])\n self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])\n for i in range(len(self.df_raw.cycle_number.unique())):\n self.df.append(self.df_limited[self.df_limited2.cycle_number == self.df_raw.cycle_number.unique()[i]])\n else:\n print('__init__ error (#2)')\n\n # other attrs\n self.fit = []\n self.circuit_fit = []\n self.init_fit = []\n self.fit_reports = []\n\n def Lin_KK(self,\n num_RC='auto',\n legend='on',\n plot='residuals',\n bode='off',\n nyq_xlim='none',\n nyq_ylim='none',\n weight_func='Boukamp',\n savefig='none'):\n \"\"\"\n Plots the Linear Kramers-Kronig (KK) Validity Test\n The script is based on Boukamp and Schōnleber et al.'s papers for fitting the resistances of multiple -(RC)- circuits\n to the data. A data quality analysis can hereby be made on the basis of the relative residuals\n\n Ref.:\n - Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27\n - Boukamp, B.A. J. Electrochem. Soc., 142, 6, 1885-1894\n\n The function performs the KK analysis and as default the relative residuals in each subplot\n\n Note, that weigh_func should be equal to 'Boukamp'.\n\n Kristian B. Knudsen ([email protected] || [email protected])\n\n Optional Inputs\n -----------------\n - num_RC:\n - 'auto' applies an automatic algorithm developed by Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27\n that ensures no under- or over-fitting occurs\n - can be hardwired by inserting any number (RC-elements/decade)\n\n - plot:\n - 'residuals' = plots the relative residuals in subplots correspoding to the cycle numbers picked\n - 'w_data' = plots the relative residuals with the experimental data, in Nyquist and bode plot if desired, see 'bode =' in description\n\n - nyq_xlim/nyq_xlim: Change the x/y-axis limits on nyquist plot, if not equal to 'none' state [min,max] value\n\n - legend:\n - 'on' = displays cycle number\n - 'potential' = displays average potential which the spectra was measured at\n - 'off' = off\n\n bode = Plots Bode Plot - options:\n 'on' = re, im vs. log(freq)\n 'log' = log(re, im) vs. log(freq)\n\n 're' = re vs. log(freq)\n 'log_re' = log(re) vs. log(freq)\n\n 'im' = im vs. log(freq)\n 'log_im' = log(im) vs. log(freq)\n \"\"\"\n if num_RC == 'auto':\n print('cycle || No. RC-elements || u')\n self.decade = []\n self.Rparam = []\n self.t_const = []\n self.Lin_KK_Fit = []\n self.R_names = []\n self.KK_R0 = []\n self.KK_R = []\n self.number_RC = []\n self.number_RC_sort = []\n\n self.KK_u = []\n self.KK_Rgreater = []\n self.KK_Rminor = []\n M = 2\n for i in range(len(self.df)):\n #determine the number of RC circuits based on the number of decades measured and num_RC\n self.decade.append(np.log10(np.max(self.df[i].f))-np.log10(np.min(self.df[i].f)))\n self.number_RC.append(M)\n self.number_RC_sort.append(M) # needed for self.KK_R\n #Creates intial guesses for R's\n self.Rparam.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[0])\n #Creates time constants values for self.number_RC -(RC)- circuits\n self.t_const.append(KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC[i])))\n\n self.Lin_KK_Fit.append(minimize(KK_errorfunc, self.Rparam[i], method='leastsq',\n args=(self.df[i].w.values,\n self.df[i].re.values,\n self.df[i].im.values,\n self.number_RC[i],\n weight_func,\n self.t_const[i]))) # maxfev=99\n self.R_names.append(KK_Rnam_val(re=self.df[i].re,\n re_start=self.df[i].re.idxmin(),\n num_RC=int(self.number_RC[i]))[1]) # creates R names\n for j in range(len(self.R_names[i])):\n self.KK_R0.append(self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value)\n self.number_RC_sort.insert(0,0) #needed for self.KK_R\n for i in range(len(self.df)):\n self.KK_R.append(self.KK_R0[int(np.cumsum(self.number_RC_sort)[i]):int(np.cumsum(self.number_RC_sort)[i+1])]) #assigns resistances from each spectra to their respective df\n self.KK_Rgreater.append(np.where(np.array(self.KK_R)[i] >= 0, np.array(self.KK_R)[i], 0) )\n self.KK_Rminor.append(np.where(np.array(self.KK_R)[i] < 0, np.array(self.KK_R)[i], 0) )\n self.KK_u.append(1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i]))))\n\n for i in range(len(self.df)):\n while self.KK_u[i] <= 0.75 or self.KK_u[i] >= 0.88:\n self.number_RC_sort0 = []\n self.KK_R_lim = []\n self.number_RC[i] = self.number_RC[i] + 1\n self.number_RC_sort0.append(self.number_RC)\n self.number_RC_sort = np.insert(self.number_RC_sort0, 0,0)\n #Creates intial guesses for R's\n self.Rparam[i] = KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[0]\n #Creates time constants values for self.number_RC -(RC)- circuits\n self.t_const[i] = KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC[i]))\n self.Lin_KK_Fit[i] = minimize(KK_errorfunc, self.Rparam[i], method='leastsq',\n args=(self.df[i].w.values,\n self.df[i].re.values,\n self.df[i].im.values,\n int(self.number_RC[i]),\n weight_func,\n self.t_const[i]) ) #maxfev=99\n self.R_names[i] = KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[1] #creates R names\n self.KK_R0 = np.delete(np.array(self.KK_R0), np.s_[0:len(self.KK_R0)])\n self.KK_R0 = []\n for q in range(len(self.df)):\n for j in range(len(self.R_names[q])):\n self.KK_R0.append(self.Lin_KK_Fit[q].params.get(self.R_names[q][j]).value)\n self.KK_R_lim = np.cumsum(self.number_RC_sort) #used for KK_R[i]\n\n #assigns resistances from each spectra to their respective df\n self.KK_R[i] = self.KK_R0[self.KK_R_lim[i]:self.KK_R_lim[i+1]]\n self.KK_Rgreater[i] = np.where(np.array(self.KK_R[i]) >= 0, np.array(self.KK_R[i]), 0)\n self.KK_Rminor[i] = np.where(np.array(self.KK_R[i]) < 0, np.array(self.KK_R[i]), 0)\n self.KK_u[i] = 1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i])))\n else:\n print('['+str(i+1)+']'+' '+str(self.number_RC[i]),\n ' '+str(np.round(self.KK_u[i],2)))\n\n elif num_RC != 'auto': #hardwired number of RC-elements/decade\n print('cycle || u')\n self.decade = []\n self.number_RC0 = []\n self.number_RC = []\n self.Rparam = []\n self.t_const = []\n self.Lin_KK_Fit = []\n self.R_names = []\n self.KK_R0 = []\n self.KK_R = []\n for i in range(len(self.df)):\n #determine the number of RC circuits based on the number of decades measured and num_RC\n self.decade.append(np.log10(np.max(self.df[i].f))-np.log10(np.min(self.df[i].f)))\n self.number_RC0.append(np.round(num_RC * self.decade[i]))\n self.number_RC.append(np.round(num_RC * self.decade[i])) #Creats the the number of -(RC)- circuits\n #Creates intial guesses for R's\n self.Rparam.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(),\n num_RC=int(self.number_RC0[i]))[0])\n #Creates time constants values for self.number_RC -(RC)- circuits\n self.t_const.append(KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC0[i])))\n self.Lin_KK_Fit.append(minimize(KK_errorfunc, self.Rparam[i], method='leastsq',\n args=(self.df[i].w.values,\n self.df[i].re.values,\n self.df[i].im.values,\n int(self.number_RC0[i]),\n weight_func,\n self.t_const[i]))) #maxfev=99\n #creates R names\n self.R_names.append(KK_Rnam_val(re=self.df[i].re,\n re_start=self.df[i].re.idxmin(),\n num_RC=int(self.number_RC0[i]))[1])\n for j in range(len(self.R_names[i])):\n self.KK_R0.append(self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value)\n self.number_RC0.insert(0,0)\n\n # print(report_fit(self.Lin_KK_Fit[i])) # prints fitting report\n\n self.KK_circuit_fit = []\n self.KK_rr_re = []\n self.KK_rr_im = []\n self.KK_Rgreater = []\n self.KK_Rminor = []\n self.KK_u = []\n for i in range(len(self.df)):\n #assigns resistances from each spectra to their respective df\n self.KK_R.append(self.KK_R0[int(np.cumsum(self.number_RC0)[i]):int(np.cumsum(self.number_RC0)[i+1])])\n self.KK_Rx = np.array(self.KK_R)\n self.KK_Rgreater.append(np.where(self.KK_Rx[i] >= 0, self.KK_Rx[i], 0) )\n self.KK_Rminor.append(np.where(self.KK_Rx[i] < 0, self.KK_Rx[i], 0) )\n self.KK_u.append(1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i])))) #currently gives incorrect values\n print('['+str(i+1)+']'+' '+str(np.round(self.KK_u[i],2)))\n else:\n print('num_RC incorrectly defined')\n\n self.KK_circuit_fit = []\n self.KK_rr_re = []\n self.KK_rr_im = []\n for i in range(len(self.df)):\n self.KK_circuit_fit.append(KK_RC(w=self.df[i].w,\n Rs=self.Lin_KK_Fit[i].params.get('Rs').value,\n R_values=self.KK_R[i],\n t_values=self.t_const[i],\n num_RC=int(self.number_RC[i])))\n\n # relative residuals for the real part\n self.KK_rr_re.append(residual_real(re=self.df[i].re,\n fit_re=self.KK_circuit_fit[i].real,\n fit_im=-self.KK_circuit_fit[i].imag))\n # relative residuals for the imag part\n self.KK_rr_im.append(residual_imag(im=self.df[i].im,\n fit_re=self.KK_circuit_fit[i].real,\n fit_im=-self.KK_circuit_fit[i].imag))\n\n ### Plotting Linear_kk results\n ##\n #\n ### Label functions\n self.label_re_1 = []\n self.label_im_1 = []\n self.label_cycleno = []\n if legend == 'on':\n for i in range(len(self.df)):\n self.label_re_1.append(\"Z' (#\"+str(i+1)+\")\")\n self.label_im_1.append(\"Z'' (#\"+str(i+1)+\")\")\n self.label_cycleno.append('#'+str(i+1))\n elif legend == 'potential':\n for i in range(len(self.df)):\n self.label_re_1.append(\"Z' (\"+str(np.round(np.average(self.df[i].E_avg), 2))+' V)')\n self.label_im_1.append(\"Z'' (\"+str(np.round(np.average(self.df[i].E_avg), 2))+' V)')\n self.label_cycleno.append(str(np.round(np.average(self.df[i].E_avg), 2))+' V')\n\n if plot == 'w_data':\n fig = figure(figsize=(6, 8), dpi=120, facecolor='w', edgecolor='k')\n fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)\n ax = fig.add_subplot(311, aspect='equal')\n ax1 = fig.add_subplot(312)\n ax2 = fig.add_subplot(313)\n\n colors = sns.color_palette(\"colorblind\", n_colors=len(self.df))\n colors_real = sns.color_palette(\"Blues\", n_colors=len(self.df)+2)\n colors_imag = sns.color_palette(\"Oranges\", n_colors=len(self.df)+2)\n\n ### Nyquist Plot\n for i in range(len(self.df)):\n ax.plot(self.df[i].re, self.df[i].im, marker='o', ms=4, lw=2, color=colors[i],\n ls='-', alpha=.7, label=self.label_cycleno[i])\n\n ### Bode Plot\n if bode == 'on':\n for i in range(len(self.df)):\n ax1.plot(np.log10(self.df[i].f), self.df[i].re, color=colors_real[i+1],\n marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_re_1[i])\n ax1.plot(np.log10(self.df[i].f), self.df[i].im, color=colors_imag[i+1],\n marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_im_1[i])\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"Z', -Z'' [$\\Omega$]\")\n if legend == 'on' or legend == 'potential':\n ax1.legend(loc='best', frameon=False)\n\n elif bode == 're':\n for i in range(len(self.df)):\n ax1.plot(np.log10(self.df[i].f), self.df[i].re, color=colors_real[i+1],\n marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"Z' [$\\Omega$]\")\n if legend == 'on' or legend == 'potential':\n ax1.legend(loc='best', frameon=False)\n\n elif bode == 'log_re':\n for i in range(len(self.df)):\n ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].re), color=colors_real[i+1],\n marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"log(Z') [$\\Omega$]\")\n if legend == 'on' or legend == 'potential':\n ax1.legend(loc='best', frameon=False)\n\n elif bode == 'im':\n for i in range(len(self.df)):\n ax1.plot(np.log10(self.df[i].f), self.df[i].im, color=colors_imag[i+1],\n marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"-Z'' [$\\Omega$]\")\n if legend == 'on' or legend == 'potential':\n ax1.legend(loc='best', frameon=False)\n\n elif bode == 'log_im':\n for i in range(len(self.df)):\n ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].im), color=colors_imag[i+1],\n marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"log(-Z'') [$\\Omega$]\")\n if legend == 'on' or legend == 'potential':\n ax1.legend(loc='best', frameon=False)\n\n elif bode == 'log':\n for i in range(len(self.df)):\n ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].re), color=colors_real[i+1],\n marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_re_1[i])\n ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].im), color=colors_imag[i+1],\n marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_im_1[i])\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"log(Z', -Z'') [$\\Omega$]\")\n if legend == 'on' or legend == 'potential':\n ax1.legend(loc='best', frameon=False)\n\n # Kramers-Kronig Relative Residuals\n for i in range(len(self.df)):\n ax2.plot(np.log10(self.df[i].f), self.KK_rr_re[i]*100, color=colors_real[i+1],\n marker='D', ls='--', ms=6, alpha=.7, label=self.label_re_1[i])\n ax2.plot(np.log10(self.df[i].f), self.KK_rr_im[i]*100, color=colors_imag[i+1],\n marker='s', ls='--', ms=6, alpha=.7, label=self.label_im_1[i])\n ax2.set_xlabel(\"log(f) [Hz]\")\n ax2.set_ylabel(\"$\\Delta$Z', $\\Delta$-Z'' [%]\")\n if legend == 'on' or legend == 'potential':\n ax2.legend(loc='best', frameon=False)\n ax2.axhline(0, ls='--', c='k', alpha=.5)\n\n # Setting ylims and write 'KK-Test' on RR subplot\n self.KK_rr_im_min = []\n self.KK_rr_im_max = []\n self.KK_rr_re_min = []\n self.KK_rr_re_max = []\n for i in range(len(self.df)):\n self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))\n self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))\n self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))\n self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))\n if np.min(self.KK_rr_im_min) > np.min(self.KK_rr_re_min):\n ax2.set_ylim(np.min(self.KK_rr_re_min)*100*1.5, np.max(np.abs(self.KK_rr_re_min))*100*1.5)\n ax2.annotate('Lin-KK',\n xy=[np.min(np.log10(self.df[0].f)), np.max(self.KK_rr_re_max)*100*.9],\n color='k', fontweight='bold')\n elif np.min(self.KK_rr_im_min) < np.min(self.KK_rr_re_min):\n ax2.set_ylim(np.min(self.KK_rr_im_min)*100*1.5, np.max(self.KK_rr_im_max)*100*1.5)\n ax2.annotate('Lin-KK',\n xy=[np.min(np.log10(self.df[0].f)), np.max(self.KK_rr_im_max)*100*.9],\n color='k', fontweight='bold')\n\n ### Figure specifics\n if legend == 'on' or legend == 'potential':\n ax.legend(loc='best', frameon=False)\n ax.set_xlabel(\"Z' [$\\Omega$]\")\n ax.set_ylabel(\"-Z'' [$\\Omega$]\")\n if nyq_xlim != 'none':\n ax.set_xlim(nyq_xlim[0], nyq_xlim[1])\n if nyq_ylim != 'none':\n ax.set_ylim(nyq_ylim[0], nyq_ylim[1])\n #Save Figure\n if savefig != 'none':\n fig.savefig(savefig)\n\n ### Illustrating residuals only\n\n elif plot == 'residuals':\n colors_real = sns.color_palette(\"Blues\", n_colors=9)\n colors_imag = sns.color_palette(\"Oranges\", n_colors=9)\n\n ### 1 Cycle\n if len(self.df) == 1:\n fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')\n fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)\n ax = fig.add_subplot(231)\n ax.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3],\n marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3],\n marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax.set_xlabel(\"log(f) [Hz]\")\n ax.set_ylabel(\"$\\Delta$Z', $\\Delta$-Z'' [%]\")\n if legend == 'on' or legend == 'potential':\n ax.legend(loc='best', frameon=False)\n ax.axhline(0, ls='--', c='k', alpha=.5)\n\n ### Setting ylims and write 'KK-Test' on RR subplot\n self.KK_rr_im_min = np.min(self.KK_rr_im)\n self.KK_rr_im_max = np.max(self.KK_rr_im)\n self.KK_rr_re_min = np.min(self.KK_rr_re)\n self.KK_rr_re_max = np.max(self.KK_rr_re)\n if self.KK_rr_re_max > self.KK_rr_im_max:\n self.KK_ymax = self.KK_rr_re_max\n else:\n self.KK_ymax = self.KK_rr_im_max\n if self.KK_rr_re_min < self.KK_rr_im_min:\n self.KK_ymin = self.KK_rr_re_min\n else:\n self.KK_ymin = self.KK_rr_im_min\n if np.abs(self.KK_ymin) > self.KK_ymax:\n ax.set_ylim(self.KK_ymin*100*1.5, np.abs(self.KK_ymin)*100*1.5)\n if legend == 'on':\n ax.annotate('Lin-KK, #1',\n xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin)*100*1.3],\n color='k', fontweight='bold')\n elif legend == 'potential':\n ax.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)',\n xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin)*100*1.3],\n color='k', fontweight='bold')\n elif np.abs(self.KK_ymin) < self.KK_ymax:\n ax.set_ylim(np.negative(self.KK_ymax)*100*1.5, np.abs(self.KK_ymax)*100*1.5)\n if legend == 'on':\n ax.annotate('Lin-KK, #1',\n xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax*100*1.3],\n color='k', fontweight='bold')\n elif legend == 'potential':\n ax.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)',\n xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax*100*1.3],\n color='k', fontweight='bold')\n\n #Save Figure\n if savefig != 'none':\n fig.savefig(savefig)\n\n ### 2 Cycles\n elif len(self.df) == 2:\n fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')\n fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)\n ax1 = fig.add_subplot(231)\n ax2 = fig.add_subplot(232)\n\n #cycle 1\n ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"$\\Delta$Z', $\\Delta$-Z'' [%]\", )\n if legend == 'on' or legend == 'potential':\n ax1.legend(loc='best', frameon=False)\n ax1.axhline(0, ls='--', c='k', alpha=.5)\n\n #cycle 2\n ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax2.set_xlabel(\"log(f) [Hz]\")\n if legend == 'on' or legend == 'potential':\n ax2.legend(loc='best', frameon=False)\n ax2.axhline(0, ls='--', c='k', alpha=.5)\n\n ### Setting ylims and labeling plot with 'KK-Test' in RR subplot\n self.KK_rr_im_min = []\n self.KK_rr_im_max = []\n self.KK_rr_re_min = []\n self.KK_rr_re_max = []\n self.KK_ymin = []\n self.KK_ymax = []\n for i in range(len(self.df)):\n self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))\n self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))\n self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))\n self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))\n if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:\n self.KK_ymax.append(self.KK_rr_re_max[i])\n else:\n self.KK_ymax.append(self.KK_rr_im_max[i])\n if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:\n self.KK_ymin.append(self.KK_rr_re_min[i])\n else:\n self.KK_ymin.append(self.KK_rr_im_min[i])\n\n if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:\n ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)\n if legend == 'on':\n ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')\n elif legend == 'potential':\n ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:\n ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)\n if legend == 'on':\n ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.3], color='k', fontweight='bold')\n elif legend == 'potential':\n ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.3], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:\n ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)\n if legend == 'on':\n ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')\n elif legend == 'potential':\n ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:\n ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)\n if legend == 'on':\n ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')\n elif legend == 'potential':\n ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')\n #Save Figure\n if savefig != 'none':\n fig.savefig(savefig)\n\n ### 3 Cycles\n elif len(self.df) == 3:\n fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')\n fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)\n ax1 = fig.add_subplot(231)\n ax2 = fig.add_subplot(232)\n ax3 = fig.add_subplot(233)\n\n #cycle 1\n ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"$\\Delta$Z', $\\Delta$-Z'' [%]\", )\n if legend == 'on' or legend == 'potential':\n ax1.legend(loc='best', frameon=False)\n ax1.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 2\n ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax2.set_xlabel(\"log(f) [Hz]\")\n if legend == 'on' or legend == 'potential':\n ax2.legend(loc='best', frameon=False)\n ax2.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 3\n ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax3.set_xlabel(\"log(f) [Hz]\")\n if legend == 'on' or legend == 'potential':\n ax3.legend(loc='best', frameon=False)\n ax3.axhline(0, ls='--', c='k', alpha=.5)\n\n ### Setting ylims and labeling plot with 'KK-Test' in RR subplot\n self.KK_rr_im_min = []\n self.KK_rr_im_max = []\n self.KK_rr_re_min = []\n self.KK_rr_re_max = []\n self.KK_ymin = []\n self.KK_ymax = []\n for i in range(len(self.df)):\n self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))\n self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))\n self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))\n self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))\n if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:\n self.KK_ymax.append(self.KK_rr_re_max[i])\n else:\n self.KK_ymax.append(self.KK_rr_im_max[i])\n if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:\n self.KK_ymin.append(self.KK_rr_re_min[i])\n else:\n self.KK_ymin.append(self.KK_rr_im_min[i])\n if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:\n ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)\n if legend == 'on':\n ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')\n elif legend == 'potential':\n ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:\n ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)\n if legend == 'on':\n ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.3], color='k', fontweight='bold')\n elif legend == 'potential':\n ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.3], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:\n ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)\n if legend == 'on':\n ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')\n elif legend == 'potential':\n ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:\n ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)\n if legend == 'on':\n ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')\n elif legend == 'potential':\n ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:\n ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)\n if legend == 'on':\n ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.3], color='k', fontweight='bold')\n elif legend == 'potential':\n ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.3], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:\n ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)\n if legend == 'on':\n ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.3], color='k', fontweight='bold')\n elif legend == 'potential':\n ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.3], color='k', fontweight='bold')\n #Save Figure\n if savefig != 'none':\n fig.savefig(savefig)\n\n ### 4 Cycles\n elif len(self.df) == 4:\n fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')\n fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n\n #cycle 1\n ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax1.set_ylabel(\"$\\Delta$Z', $\\Delta$-Z'' [%]\", )\n if legend == 'on' or legend == 'potential':\n ax1.legend(loc='best', frameon=False)\n ax1.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 2\n ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax2.set_xlabel(\"log(f) [Hz]\")\n if legend == 'on' or legend == 'potential':\n ax2.legend(loc='best', frameon=False)\n ax2.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 3\n ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax3.set_xlabel(\"log(f) [Hz]\")\n ax3.set_ylabel(\"$\\Delta$Z', $\\Delta$-Z'' [%]\", )\n if legend == 'on' or legend == 'potential':\n ax3.legend(loc='best', frameon=False)\n ax3.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 4\n ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax4.set_xlabel(\"log(f) [Hz]\")\n if legend == 'on' or legend == 'potential':\n ax4.legend(loc='best', frameon=False)\n ax4.axhline(0, ls='--', c='k', alpha=.5)\n\n ### Setting ylims and labeling plot with 'KK-Test' in RR subplot\n self.KK_rr_im_min = []\n self.KK_rr_im_max = []\n self.KK_rr_re_min = []\n self.KK_rr_re_max = []\n self.KK_ymin = []\n self.KK_ymax = []\n for i in range(len(self.df)):\n self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))\n self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))\n self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))\n self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))\n if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:\n self.KK_ymax.append(self.KK_rr_re_max[i])\n else:\n self.KK_ymax.append(self.KK_rr_im_max[i])\n if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:\n self.KK_ymin.append(self.KK_rr_re_min[i])\n else:\n self.KK_ymin.append(self.KK_rr_im_min[i])\n if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:\n ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)\n if legend == 'on':\n ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:\n ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)\n if legend == 'on':\n ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:\n ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)\n if legend == 'on':\n ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:\n ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)\n if legend == 'on':\n ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:\n ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)\n if legend == 'on':\n ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:\n ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)\n if legend == 'on':\n ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:\n ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)\n if legend == 'on':\n ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:\n ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)\n if legend == 'on':\n ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')\n\n #Save Figure\n if savefig != 'none':\n fig.savefig(savefig)\n\n ### 5 Cycles\n elif len(self.df) == 5:\n fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')\n fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)\n ax1 = fig.add_subplot(231)\n ax2 = fig.add_subplot(232)\n ax3 = fig.add_subplot(233)\n ax4 = fig.add_subplot(234)\n ax5 = fig.add_subplot(235)\n\n #cycle 1\n ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax1.set_ylabel(\"$\\Delta$Z', $\\Delta$-Z'' [%]\", )\n if legend == 'on' or legend == 'potential':\n ax1.legend(loc='best', frameon=False)\n ax1.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 2\n ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n if legend == 'on' or legend == 'potential':\n ax2.legend(loc='best', frameon=False)\n ax2.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 3\n ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax3.set_xlabel(\"log(f) [Hz]\")\n if legend == 'on' or legend == 'potential':\n ax3.legend(loc='best', frameon=False)\n ax3.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 4\n ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax4.set_ylabel(\"$\\Delta$Z', $\\Delta$-Z'' [%]\", )\n ax4.set_xlabel(\"log(f) [Hz]\")\n if legend == 'on' or legend == 'potential':\n ax4.legend(loc='best', frameon=False)\n ax4.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 5\n ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax5.set_xlabel(\"log(f) [Hz]\")\n if legend == 'on' or legend == 'potential':\n ax5.legend(loc='best', frameon=False)\n ax5.axhline(0, ls='--', c='k', alpha=.5)\n\n ### Setting ylims and labeling plot with 'KK-Test' in RR subplot\n self.KK_rr_im_min = []\n self.KK_rr_im_max = []\n self.KK_rr_re_min = []\n self.KK_rr_re_max = []\n self.KK_ymin = []\n self.KK_ymax = []\n for i in range(len(self.df)):\n self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))\n self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))\n self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))\n self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))\n if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:\n self.KK_ymax.append(self.KK_rr_re_max[i])\n else:\n self.KK_ymax.append(self.KK_rr_im_max[i])\n if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:\n self.KK_ymin.append(self.KK_rr_re_min[i])\n else:\n self.KK_ymin.append(self.KK_rr_im_min[i])\n if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:\n ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)\n if legend == 'on':\n ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:\n ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)\n if legend == 'on':\n ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:\n ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)\n if legend == 'on':\n ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:\n ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)\n if legend == 'on':\n ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:\n ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)\n if legend == 'on':\n ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:\n ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)\n if legend == 'on':\n ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:\n ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)\n if legend == 'on':\n ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:\n ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)\n if legend == 'on':\n ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:\n ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)\n if legend == 'on':\n ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:\n ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)\n if legend == 'on':\n ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')\n #Save Figure\n if savefig != 'none':\n fig.savefig(savefig)\n\n ### 6 Cycles\n elif len(self.df) == 6:\n fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')\n fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)\n ax1 = fig.add_subplot(231)\n ax2 = fig.add_subplot(232)\n ax3 = fig.add_subplot(233)\n ax4 = fig.add_subplot(234)\n ax5 = fig.add_subplot(235)\n ax6 = fig.add_subplot(236)\n\n #cycle 1\n ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax1.set_ylabel(\"$\\Delta$Z', $\\Delta$-Z'' [%]\", fontsize=15)\n if legend == 'on' or legend == 'potential':\n ax1.legend(loc='best', frameon=False)\n ax1.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 2\n ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n if legend == 'on' or legend == 'potential':\n ax2.legend(loc='best', frameon=False)\n ax2.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 3\n ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n if legend == 'on' or legend == 'potential':\n ax3.legend(loc='best', frameon=False)\n ax3.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 4\n ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax4.set_xlabel(\"log(f) [Hz]\")\n ax4.set_ylabel(\"$\\Delta$Z', $\\Delta$-Z'' [%]\", fontsize=15)\n if legend == 'on' or legend == 'potential':\n ax4.legend(loc='best', frameon=False)\n ax4.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 5\n ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax5.set_xlabel(\"log(f) [Hz]\")\n if legend == 'on' or legend == 'potential':\n ax5.legend(loc='best', frameon=False)\n ax5.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 6\n ax6.plot(np.log10(self.df[5].f), self.KK_rr_re[5]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax6.plot(np.log10(self.df[5].f), self.KK_rr_im[5]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax6.set_xlabel(\"log(f) [Hz]\")\n if legend == 'on' or legend == 'potential':\n ax6.legend(loc='best', frameon=False)\n ax6.axhline(0, ls='--', c='k', alpha=.5)\n\n ### Setting ylims and labeling plot with 'KK-Test' in RR subplot\n self.KK_rr_im_min = []\n self.KK_rr_im_max = []\n self.KK_rr_re_min = []\n self.KK_rr_re_max = []\n self.KK_ymin = []\n self.KK_ymax = []\n for i in range(len(self.df)):\n self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))\n self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))\n self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))\n self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))\n if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:\n self.KK_ymax.append(self.KK_rr_re_max[i])\n else:\n self.KK_ymax.append(self.KK_rr_im_max[i])\n if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:\n self.KK_ymin.append(self.KK_rr_re_min[i])\n else:\n self.KK_ymin.append(self.KK_rr_im_min[i])\n if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:\n ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)\n if legend == 'on':\n ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:\n ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)\n if legend == 'on':\n ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:\n ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)\n if legend == 'on':\n ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:\n ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)\n if legend == 'on':\n ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:\n ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)\n if legend == 'on':\n ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:\n ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)\n if legend == 'on':\n ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:\n ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)\n if legend == 'on':\n ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:\n ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)\n if legend == 'on':\n ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:\n ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)\n if legend == 'on':\n ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:\n ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)\n if legend == 'on':\n ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[5]) > self.KK_ymax[5]:\n ax6.set_ylim(self.KK_ymin[5]*100*1.5, np.abs(self.KK_ymin[5])*100*1.5)\n if legend == 'on':\n ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax6.annotate('Lin-KK ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[5]) < self.KK_ymax[5]:\n ax6.set_ylim(np.negative(self.KK_ymax[5])*100*1.5, np.abs(self.KK_ymax[5])*100*1.5)\n if legend == 'on':\n ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymax[5])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax6.annotate('Lin-KK, ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), self.KK_ymax[5]*100*1.2], color='k', fontweight='bold')\n #Save Figure\n if savefig != 'none':\n fig.savefig(savefig)\n\n ### 7 Cycles\n elif len(self.df) == 7:\n fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')\n fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)\n ax1 = fig.add_subplot(331)\n ax2 = fig.add_subplot(332)\n ax3 = fig.add_subplot(333)\n ax4 = fig.add_subplot(334)\n ax5 = fig.add_subplot(335)\n ax6 = fig.add_subplot(336)\n ax7 = fig.add_subplot(337)\n\n #cycle 1\n ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax1.set_ylabel(\"$\\Delta$Z', $\\Delta$-Z'' [%]\", fontsize=15)\n if legend == 'on' or legend == 'potential':\n ax1.legend(loc='best', frameon=False)\n ax1.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 2\n ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n if legend == 'on' or legend == 'potential':\n ax2.legend(loc='best', frameon=False)\n ax2.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 3\n ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax3.set_xlabel(\"log(f) [Hz]\")\n if legend == 'on' or legend == 'potential':\n ax3.legend(loc='best', frameon=False)\n ax3.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 4\n ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax4.set_ylabel(\"$\\Delta$Z', $\\Delta$-Z'' [%]\", fontsize=15)\n if legend == 'on' or legend == 'potential':\n ax4.legend(loc='best', frameon=False)\n ax4.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 5\n ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax5.set_xlabel(\"log(f) [Hz]\")\n if legend == 'on' or legend == 'potential':\n ax5.legend(loc='best', frameon=False)\n ax5.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 6\n ax6.plot(np.log10(self.df[5].f), self.KK_rr_re[5]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax6.plot(np.log10(self.df[5].f), self.KK_rr_im[5]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax6.set_xlabel(\"log(f) [Hz]\")\n if legend == 'on' or legend == 'potential':\n ax6.legend(loc='best', frameon=False)\n ax6.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 7\n ax7.plot(np.log10(self.df[6].f), self.KK_rr_re[6]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax7.plot(np.log10(self.df[6].f), self.KK_rr_im[6]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax7.set_xlabel(\"log(f) [Hz]\")\n ax7.set_ylabel(\"$\\Delta$Z', $\\Delta$-Z'' [%]\", fontsize=15)\n if legend == 'on' or legend == 'potential':\n ax7.legend(loc='best', frameon=False)\n ax7.axhline(0, ls='--', c='k', alpha=.5)\n\n ### Setting ylims and labeling plot with 'KK-Test' in RR subplot\n self.KK_rr_im_min = []\n self.KK_rr_im_max = []\n self.KK_rr_re_min = []\n self.KK_rr_re_max = []\n self.KK_ymin = []\n self.KK_ymax = []\n for i in range(len(self.df)):\n self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))\n self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))\n self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))\n self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))\n if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:\n self.KK_ymax.append(self.KK_rr_re_max[i])\n else:\n self.KK_ymax.append(self.KK_rr_im_max[i])\n if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:\n self.KK_ymin.append(self.KK_rr_re_min[i])\n else:\n self.KK_ymin.append(self.KK_rr_im_min[i])\n if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:\n ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)\n if legend == 'on':\n ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:\n ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)\n if legend == 'on':\n ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:\n ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)\n if legend == 'on':\n ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')\n elif legend == 'potential':\n ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:\n ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)\n if legend == 'on':\n ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:\n ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)\n if legend == 'on':\n ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:\n ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)\n if legend == 'on':\n ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:\n ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)\n if legend == 'on':\n ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:\n ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)\n if legend == 'on':\n ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:\n ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)\n if legend == 'on':\n ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:\n ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)\n if legend == 'on':\n ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[5]) > self.KK_ymax[5]:\n ax6.set_ylim(self.KK_ymin[5]*100*1.5, np.abs(self.KK_ymin[5])*100*1.5)\n if legend == 'on':\n ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax6.annotate('Lin-KK ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[5]) < self.KK_ymax[5]:\n ax6.set_ylim(np.negative(self.KK_ymax[5])*100*1.5, np.abs(self.KK_ymax[5])*100*1.5)\n if legend == 'on':\n ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymax[5])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax6.annotate('Lin-KK, ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), self.KK_ymax[5]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[6]) > self.KK_ymax[6]:\n ax7.set_ylim(self.KK_ymin[6]*100*1.5, np.abs(self.KK_ymin[6])*100*1.5)\n if legend == 'on':\n ax7.annotate('Lin-KK, #7', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax7.annotate('Lin-KK ('+str(np.round(np.average(self.df[6].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[6]) < self.KK_ymax[6]:\n ax7.set_ylim(np.negative(self.KK_ymax[6])*100*1.5, np.abs(self.KK_ymax[6])*100*1.5)\n if legend == 'on':\n ax7.annotate('Lin-KK, #7', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymax[6])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax7.annotate('Lin-KK, ('+str(np.round(np.average(self.df[6].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[6].f)), self.KK_ymax[6]*100*1.2], color='k', fontweight='bold')\n #Save Figure\n if savefig != 'none':\n fig.savefig(savefig)\n\n ### 8 Cycles\n elif len(self.df) == 8:\n fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')\n fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)\n ax1 = fig.add_subplot(331)\n ax2 = fig.add_subplot(332)\n ax3 = fig.add_subplot(333)\n ax4 = fig.add_subplot(334)\n ax5 = fig.add_subplot(335)\n ax6 = fig.add_subplot(336)\n ax7 = fig.add_subplot(337)\n ax8 = fig.add_subplot(338)\n\n #cycle 1\n ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax1.set_ylabel(\"$\\Delta$Z', $\\Delta$-Z'' [%]\", fontsize=14)\n if legend == 'on' or legend == 'potential':\n ax1.legend(loc='best', frameon=False)\n ax1.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 2\n ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n if legend == 'on' or legend == 'potential':\n ax2.legend(loc='best', frameon=False)\n ax2.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 3\n ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n if legend == 'on' or legend == 'potential':\n ax3.legend(loc='best', frameon=False)\n ax3.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 4\n ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax4.set_ylabel(\"$\\Delta$Z', $\\Delta$-Z'' [%]\", fontsize=14)\n if legend == 'on' or legend == 'potential':\n ax4.legend(loc='best', frameon=False)\n ax4.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 5\n ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n if legend == 'on' or legend == 'potential':\n ax5.legend(loc='best', frameon=False)\n ax5.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 6\n ax6.plot(np.log10(self.df[5].f), self.KK_rr_re[5]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax6.plot(np.log10(self.df[5].f), self.KK_rr_im[5]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax6.set_xlabel(\"log(f) [Hz]\")\n if legend == 'on' or legend == 'potential':\n ax6.legend(loc='best', frameon=False)\n ax6.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 7\n ax7.plot(np.log10(self.df[6].f), self.KK_rr_re[6]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax7.plot(np.log10(self.df[6].f), self.KK_rr_im[6]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax7.set_xlabel(\"log(f) [Hz]\")\n ax7.set_ylabel(\"$\\Delta$Z', $\\Delta$-Z'' [%]\", fontsize=14)\n if legend == 'on' or legend == 'potential':\n ax7.legend(loc='best', frameon=False)\n ax7.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 8\n ax8.plot(np.log10(self.df[7].f), self.KK_rr_re[7]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax8.plot(np.log10(self.df[7].f), self.KK_rr_im[7]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax8.set_xlabel(\"log(f) [Hz]\")\n if legend == 'on' or legend == 'potential':\n ax8.legend(loc='best', frameon=False)\n ax8.axhline(0, ls='--', c='k', alpha=.5)\n\n ### Setting ylims and labeling plot with 'KK-Test' in RR subplot\n self.KK_rr_im_min = []\n self.KK_rr_im_max = []\n self.KK_rr_re_min = []\n self.KK_rr_re_max = []\n self.KK_ymin = []\n self.KK_ymax = []\n for i in range(len(self.df)):\n self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))\n self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))\n self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))\n self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))\n if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:\n self.KK_ymax.append(self.KK_rr_re_max[i])\n else:\n self.KK_ymax.append(self.KK_rr_im_max[i])\n if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:\n self.KK_ymin.append(self.KK_rr_re_min[i])\n else:\n self.KK_ymin.append(self.KK_rr_im_min[i])\n if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:\n ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)\n if legend == 'on':\n ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:\n ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)\n if legend == 'on':\n ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:\n ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)\n if legend == 'on':\n ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')\n elif legend == 'potential':\n ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:\n ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)\n if legend == 'on':\n ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:\n ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)\n if legend == 'on':\n ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:\n ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)\n if legend == 'on':\n ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:\n ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)\n if legend == 'on':\n ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:\n ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)\n if legend == 'on':\n ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:\n ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)\n if legend == 'on':\n ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:\n ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)\n if legend == 'on':\n ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[5]) > self.KK_ymax[5]:\n ax6.set_ylim(self.KK_ymin[5]*100*1.5, np.abs(self.KK_ymin[5])*100*1.5)\n if legend == 'on':\n ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax6.annotate('Lin-KK ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[5]) < self.KK_ymax[5]:\n ax6.set_ylim(np.negative(self.KK_ymax[5])*100*1.5, np.abs(self.KK_ymax[5])*100*1.5)\n if legend == 'on':\n ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymax[5])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax6.annotate('Lin-KK, ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), self.KK_ymax[5]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[6]) > self.KK_ymax[6]:\n ax7.set_ylim(self.KK_ymin[6]*100*1.5, np.abs(self.KK_ymin[6])*100*1.5)\n if legend == 'on':\n ax7.annotate('Lin-KK, #7', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax7.annotate('Lin-KK ('+str(np.round(np.average(self.df[6].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[6]) < self.KK_ymax[6]:\n ax7.set_ylim(np.negative(self.KK_ymax[6])*100*1.5, np.abs(self.KK_ymax[6])*100*1.5)\n if legend == 'on':\n ax7.annotate('Lin-KK, #7', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymax[6])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax7.annotate('Lin-KK, ('+str(np.round(np.average(self.df[6].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[6].f)), self.KK_ymax[6]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[7]) > self.KK_ymax[7]:\n ax8.set_ylim(self.KK_ymin[7]*100*1.5, np.abs(self.KK_ymin[7])*100*1.5)\n if legend == 'on':\n ax8.annotate('Lin-KK, #8', xy=[np.min(np.log10(self.df[7].f)), np.abs(self.KK_ymin[7])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax8.annotate('Lin-KK ('+str(np.round(np.average(self.df[7].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[7].f)), np.abs(self.KK_ymin[7])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[7]) < self.KK_ymax[7]:\n ax8.set_ylim(np.negative(self.KK_ymax[7])*100*1.5, np.abs(self.KK_ymax[7])*100*1.5)\n if legend == 'on':\n ax8.annotate('Lin-KK, #8', xy=[np.min(np.log10(self.df[7].f)), np.abs(self.KK_ymax[7])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax8.annotate('Lin-KK, ('+str(np.round(np.average(self.df[7].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[7].f)), self.KK_ymax[7]*100*1.2], color='k', fontweight='bold')\n #Save Figure\n if savefig != 'none':\n fig.savefig(savefig)\n\n ### 9 Cycles\n elif len(self.df) == 9:\n fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')\n fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)\n ax1 = fig.add_subplot(331)\n ax2 = fig.add_subplot(332)\n ax3 = fig.add_subplot(333)\n ax4 = fig.add_subplot(334)\n ax5 = fig.add_subplot(335)\n ax6 = fig.add_subplot(336)\n ax7 = fig.add_subplot(337)\n ax8 = fig.add_subplot(338)\n ax9 = fig.add_subplot(339)\n\n #cycle 1\n ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax1.set_ylabel(\"$\\Delta$Z', $\\Delta$-Z'' [%]\", fontsize=15)\n if legend == 'on':\n ax1.legend(loc='best', frameon=False)\n ax1.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 2\n ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n if legend == 'on':\n ax2.legend(loc='best', frameon=False)\n ax2.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 3\n ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n if legend == 'on':\n ax3.legend(loc='best', frameon=False)\n ax3.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 4\n ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax4.set_ylabel(\"$\\Delta$Z', $\\Delta$-Z'' [%]\", fontsize=15)\n if legend == 'on':\n ax4.legend(loc='best', frameon=False)\n ax4.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 5\n ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n if legend == 'on':\n ax5.legend(loc='best', frameon=False)\n ax5.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 6\n ax6.plot(np.log10(self.df[5].f), self.KK_rr_re[5]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax6.plot(np.log10(self.df[5].f), self.KK_rr_im[5]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n if legend == 'on':\n ax6.legend(loc='best', frameon=False)\n ax6.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 7\n ax7.plot(np.log10(self.df[6].f), self.KK_rr_re[6]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax7.plot(np.log10(self.df[6].f), self.KK_rr_im[6]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax7.set_ylabel(\"$\\Delta$Z', $\\Delta$-Z'' [%]\", fontsize=15)\n ax7.set_xlabel(\"log(f) [Hz]\")\n if legend == 'on':\n ax7.legend(loc='best', frameon=False)\n ax7.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 8\n ax8.plot(np.log10(self.df[7].f), self.KK_rr_re[7]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax8.plot(np.log10(self.df[7].f), self.KK_rr_im[7]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax8.set_xlabel(\"log(f) [Hz]\")\n if legend == 'on':\n ax8.legend(loc='best', frameon=False)\n ax8.axhline(0, ls='--', c='k', alpha=.5)\n\n # Cycle 9\n ax9.plot(np.log10(self.df[8].f), self.KK_rr_re[8]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label=\"$\\Delta$Z'\")\n ax9.plot(np.log10(self.df[8].f), self.KK_rr_im[8]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label=\"$\\Delta$-Z''\")\n ax9.set_xlabel(\"log(f) [Hz]\")\n if legend == 'on':\n ax9.legend(loc='best', frameon=False)\n ax9.axhline(0, ls='--', c='k', alpha=.5)\n\n ### Setting ylims and labeling plot with 'KK-Test' in RR subplot\n self.KK_rr_im_min = []\n self.KK_rr_im_max = []\n self.KK_rr_re_min = []\n self.KK_rr_re_max = []\n self.KK_ymin = []\n self.KK_ymax = []\n for i in range(len(self.df)):\n self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))\n self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))\n self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))\n self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))\n if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:\n self.KK_ymax.append(self.KK_rr_re_max[i])\n else:\n self.KK_ymax.append(self.KK_rr_im_max[i])\n if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:\n self.KK_ymin.append(self.KK_rr_re_min[i])\n else:\n self.KK_ymin.append(self.KK_rr_im_min[i])\n if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:\n ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)\n if legend == 'on':\n ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:\n ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)\n if legend == 'on':\n ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:\n ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)\n if legend == 'on':\n ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')\n elif legend == 'potential':\n ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:\n ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)\n if legend == 'on':\n ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:\n ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)\n if legend == 'on':\n ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:\n ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)\n if legend == 'on':\n ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:\n ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)\n if legend == 'on':\n ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:\n ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)\n if legend == 'on':\n ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:\n ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)\n if legend == 'on':\n ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:\n ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)\n if legend == 'on':\n ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[5]) > self.KK_ymax[5]:\n ax6.set_ylim(self.KK_ymin[5]*100*1.5, np.abs(self.KK_ymin[5])*100*1.5)\n if legend == 'on':\n ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax6.annotate('Lin-KK ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[5]) < self.KK_ymax[5]:\n ax6.set_ylim(np.negative(self.KK_ymax[5])*100*1.5, np.abs(self.KK_ymax[5])*100*1.5)\n if legend == 'on':\n ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymax[5])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax6.annotate('Lin-KK, ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), self.KK_ymax[5]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[6]) > self.KK_ymax[6]:\n ax7.set_ylim(self.KK_ymin[6]*100*1.5, np.abs(self.KK_ymin[6])*100*1.5)\n if legend == 'on':\n ax7.annotate('Lin-KK, #7', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax7.annotate('Lin-KK ('+str(np.round(np.average(self.df[6].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[6]) < self.KK_ymax[6]:\n ax7.set_ylim(np.negative(self.KK_ymax[6])*100*1.5, np.abs(self.KK_ymax[6])*100*1.5)\n if legend == 'on':\n ax7.annotate('Lin-KK, #7', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymax[6])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax7.annotate('Lin-KK, ('+str(np.round(np.average(self.df[6].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[6].f)), self.KK_ymax[6]*100*1.2], color='k', fontweight='bold')\n if np.abs(self.KK_ymin[7]) > self.KK_ymax[7]:\n ax8.set_ylim(self.KK_ymin[7]*100*1.5, np.abs(self.KK_ymin[7])*100*1.5)\n if legend == 'on':\n ax8.annotate('Lin-KK, #8', xy=[np.min(np.log10(self.df[7].f)), np.abs(self.KK_ymin[7])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax8.annotate('Lin-KK ('+str(np.round(np.average(self.df[7].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[7].f)), np.abs(self.KK_ymin[7])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[7]) < self.KK_ymax[7]:\n ax8.set_ylim(np.negative(self.KK_ymax[7])*100*1.5, np.abs(self.KK_ymax[7])*100*1.5)\n if legend == 'on':\n ax8.annotate('Lin-KK, #8', xy=[np.min(np.log10(self.df[7].f)), np.abs(self.KK_ymax[7])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax8.annotate('Lin-KK, ('+str(np.round(np.average(self.df[7].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[7].f)), self.KK_ymax[7]*100*1.2], color='k', fontweight='bold')\n\n if np.abs(self.KK_ymin[8]) > self.KK_ymax[8]:\n ax9.set_ylim(self.KK_ymin[8]*100*1.5, np.abs(self.KK_ymin[8])*100*1.5)\n if legend == 'on':\n ax9.annotate('Lin-KK, #9', xy=[np.min(np.log10(self.df[8].f)), np.abs(self.KK_ymin[8])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax9.annotate('Lin-KK ('+str(np.round(np.average(self.df[8].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[8].f)), np.abs(self.KK_ymin[8])*100*1.2], color='k', fontweight='bold')\n elif np.abs(self.KK_ymin[8]) < self.KK_ymax[8]:\n ax9.set_ylim(np.negative(self.KK_ymax[8])*100*1.5, np.abs(self.KK_ymax[8])*100*1.5)\n if legend == 'on':\n ax9.annotate('Lin-KK, #9', xy=[np.min(np.log10(self.df[8].f)), np.abs(self.KK_ymax[8])*100*1.2], color='k', fontweight='bold')\n elif legend == 'potential':\n ax9.annotate('Lin-KK, ('+str(np.round(np.average(self.df[8].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[8].f)), self.KK_ymax[8]*100*1.2], color='k', fontweight='bold')\n\n #Save Figure\n if savefig != 'none':\n fig.savefig(savefig)\n else:\n print('Too many spectras, cannot plot all. Maximum spectras allowed = 9')\n\n def EIS_fit(self, params, circuit, init_only=False, weight_func='modulus', nan_policy='raise'):\n \"\"\"\n EIS_fit() fits experimental data to an equivalent circuit model using complex non-linear\n least-squares (CNLS) fitting procedure and allows for batch fitting.\n\n Kristian B. Knudsen ([email protected] / [email protected])\n\n Inputs\n ------------\n - circuit:\n Choose an equivalent circuits and defined circuit as a string. The available circuits\n are the keys of CIRCUIT_DICT in the circuits file.\n\n - weight_func\n The weight function to which the CNLS fitting is performed\n - modulus (default)\n - unity\n - proportional\n\n - nan_policy\n How to handle Nan or missing values in dataset\n - ‘raise’ = raise a value error (default)\n - ‘propagate’ = do nothing\n - ‘omit’ = drops missing data\n\n Returns\n ------------\n Returns the fitted impedance spectra(s) but also the fitted parameters that were used in\n the initial guesses. To call these use e.g. self.fit_Rs\n \"\"\"\n self.fit = []\n self.circuit_fit = []\n self.init_fit = []\n self.fit_reports = []\n if init_only:\n for param_name in params:\n params[param_name].vary = False\n for i in range(len(self.df)):\n self.fit.append(minimize(leastsq_errorfunc, params, method='leastsq',\n args=(self.df[i].w.values,\n self.df[i].re.values,\n self.df[i].im.values,\n circuit,\n weight_func), nan_policy=nan_policy, max_nfev=99999))\n self.fit_reports.append(fit_report(self.fit[i]))\n if not init_only:\n final_params = {p: self.fit[i].params[p].value for p in self.fit[i].params}\n self.circuit_fit.append(CIRCUIT_DICT[circuit](params=final_params, w=self.df[i].w))\n init_params = {p: self.fit[i].params[p].init_value for p in self.fit[i].params}\n self.init_fit.append(CIRCUIT_DICT[circuit](params=init_params, w=self.df[i].w))\n\n def EIS_plot(self, fitting=False, legend=False):\n \"\"\"\n Plots Experimental and fitted impedance data in three subplots:\n a) Nyquist, b) Bode, c) relative residuals between experimental and fit\n\n Kristian B. Knudsen ([email protected] / [email protected])\n\n Optional Inputs\n -----------------\n - legend:\n whether or not to show the legend\n\n - fitting:\n If EIS_fit() has been called. To plot experimental- and fitted data turn fitting on.\n Assumes only one data set!\n \"\"\"\n init_only = len(self.circuit_fit) == 0\n if fitting and not init_only:\n plot_width = 450\n plot_height = 360\n else:\n plot_width = 620\n plot_height = 500\n\n # Colors\n if len(self.df) == 1:\n colors = small_palettes['Category20'][10]\n light_colors = (colors[6],)\n dark_colors = (colors[0],)\n fit_colors = [colors[6], colors[1], colors[7]]\n bode_color_left = dark_colors[0]\n bode_color_right = light_colors[0]\n else:\n colors = small_palettes['Category20'][20]\n light_colors = colors[1::2]\n dark_colors = colors[::2]\n fit_colors = []\n bode_color_left = 'black'\n bode_color_right = colors[14]\n\n # nyquist + bode figure setup\n plot_n = figure(plot_width=plot_width, plot_height=plot_height,\n tooltips=[(\"x\", \"$x\"), (\"y\", \"$y\")],\n tools=\"pan,box_zoom,crosshair,hover,reset,save\",\n x_axis_label=\"Re(Z) [Ω]\", y_axis_label=\"-Im(Z) [Ω]\")\n plot_n.title.text = 'Nyquist'\n plot_n.title.align = 'center'\n\n plot_b = figure(plot_width=plot_width, plot_height=plot_height,\n tooltips=[(\"x\", \"$x\"), (\"y\", \"$y\")],\n tools=\"pan,box_zoom,crosshair,hover,reset,save\",\n x_axis_label=\"log(f) [Hz]\", y_axis_label=\"log(|Z|) [Ω]\")\n plot_b.title.text = 'Bode'\n plot_b.title.align = 'center'\n\n max_phase = np.max([np.max(df.Z_phase) for df in self.df])\n min_phase = np.min([np.min(df.Z_phase) for df in self.df])\n max_mag = np.max([np.max(np.log10(df.Z_mag)) for df in self.df])\n min_mag = np.min([np.min(np.log10(df.Z_mag)) for df in self.df])\n\n plot_b.y_range = Range1d(start=min_mag - 0.01, end=max_mag + 0.01)\n plot_b.extra_y_ranges = {'y2': Range1d(start=min_phase - 1, end=max_phase + 1)}\n plot_b.add_layout(LinearAxis(y_range_name=\"y2\", axis_label=\"Z phase [deg]\"), 'right')\n\n plot_b.yaxis[0].axis_line_color = bode_color_left\n plot_b.yaxis[0].major_label_text_color = bode_color_left\n plot_b.yaxis[0].major_tick_line_color = bode_color_left\n plot_b.yaxis[0].minor_tick_line_color = bode_color_left\n plot_b.yaxis[0].axis_label_text_color = bode_color_left\n\n plot_b.yaxis[1].axis_line_color = bode_color_right\n plot_b.yaxis[1].major_label_text_color = bode_color_right\n plot_b.yaxis[1].major_tick_line_color = bode_color_right\n plot_b.yaxis[1].minor_tick_line_color = bode_color_right\n plot_b.yaxis[1].axis_label_text_color = bode_color_right\n\n if fitting and not init_only:\n # nyquist\n plot_n_resid = figure(plot_width=plot_width, plot_height=100,\n x_range=plot_n.x_range, x_axis_label=\"Re(Z) [Ω]\")\n plot_n.xaxis.visible = False\n plot_n.min_border_bottom = 0\n plot_n_resid.ray(x=[np.mean(self.df[0].re)], y=[0], length=0, angle=0, color='black')\n plot_n_resid.ray(x=[np.mean(self.df[0].re)], y=[0], length=0, angle=np.pi, color='black')\n\n # bode\n plot_b_resid = figure(plot_width=plot_width, plot_height=100,\n x_range=plot_b.x_range, x_axis_label=\"log(f) [Hz]\")\n plot_b.xaxis.visible = False\n plot_b.min_border_bottom = 0\n plot_b_resid.ray(x=[np.mean(np.log10(self.df[0].f))], y=[0], length=0, angle=0, color='black')\n plot_b_resid.ray(x=[np.mean(np.log10(self.df[0].f))], y=[0], length=0, angle=np.pi, color='black')\n\n magnitude_0 = np.abs(self.circuit_fit[0].values)\n phase_0 = np.angle(self.circuit_fit[0].values, deg=True)\n resid_mag_0 = np.log10(magnitude_0) - np.log10(self.df[0].Z_mag)\n resid_phase_0 = phase_0 - self.df[0].Z_phase\n\n plot_b_resid.y_range = Range1d(start=np.min(resid_mag_0) - 0.002,\n end=np.max(resid_mag_0) + 0.002)\n plot_b_resid.extra_y_ranges = {'y2': Range1d(start=np.min(resid_phase_0) - .15,\n end=np.max(resid_phase_0) + .15)}\n plot_b_resid.add_layout(LinearAxis(y_range_name=\"y2\"), 'right')\n\n plot_b_resid.yaxis[0].axis_line_color = bode_color_left\n plot_b_resid.yaxis[0].major_label_text_color = bode_color_left\n plot_b_resid.yaxis[0].major_tick_line_color = bode_color_left\n plot_b_resid.yaxis[0].minor_tick_line_color = bode_color_left\n plot_b_resid.yaxis[0].axis_label_text_color = bode_color_left\n\n plot_b_resid.yaxis[1].axis_line_color = bode_color_right\n plot_b_resid.yaxis[1].major_label_text_color = bode_color_right\n plot_b_resid.yaxis[1].major_tick_line_color = bode_color_right\n plot_b_resid.yaxis[1].minor_tick_line_color = bode_color_right\n plot_b_resid.yaxis[1].axis_label_text_color = bode_color_right\n\n for i in range(len(self.df)):\n plot_n.scatter(self.df[i].re, self.df[i].im, color=dark_colors[i], legend_label=\"Data\")\n\n plot_b.scatter(np.log10(self.df[i].f), np.log10(self.df[i].Z_mag),\n color=dark_colors[i], legend_label=\"Data \")\n plot_b.scatter(np.log10(self.df[i].f), self.df[i].Z_phase,\n y_range_name='y2', color=light_colors[i], legend_label=\"Data\")\n\n if fitting:\n if not init_only:\n # fit\n plot_n.line(self.circuit_fit[i].values.real, -self.circuit_fit[i].values.imag,\n color=fit_colors[0], legend_label='Fit')\n magnitude = np.abs(self.circuit_fit[i].values)\n phase = np.angle(self.circuit_fit[i].values, deg=True)\n plot_b.line(np.log10(self.df[i].f), np.log10(magnitude),\n color=fit_colors[1], legend_label=\"Fit \")\n plot_b.line(np.log10(self.df[i].f), phase, color=fit_colors[2],\n y_range_name=\"y2\", legend_label='Fit')\n # residuals\n plot_n_resid.scatter(self.circuit_fit[i].values.real,\n -self.circuit_fit[i].values.imag - self.df[i].im,\n color=dark_colors[i])\n\n plot_b_resid.scatter(np.log10(self.df[i].f),\n np.log10(magnitude) - np.log10(self.df[i].Z_mag),\n color=dark_colors[i])\n plot_b_resid.scatter(np.log10(self.df[i].f), phase - self.df[i].Z_phase,\n color=light_colors[i], y_range_name='y2')\n\n # initial guess\n plot_n.line(self.init_fit[i].values.real, -self.init_fit[i].values.imag,\n color='black', line_dash='dashed', legend_label=\"Init\")\n magnitude_init = np.abs(self.init_fit[i].values)\n phase_init = np.angle(self.init_fit[i].values, deg=True)\n plot_b.line(np.log10(self.df[i].f), np.log10(magnitude_init),\n color=fit_colors[1], line_dash='dashed', legend_label=\"Init \")\n plot_b.line(np.log10(self.df[i].f), phase_init,\n color=fit_colors[2], y_range_name=\"y2\",\n line_dash='dashed', legend_label=\"Init\")\n\n if legend:\n plot_n.legend.location = 'bottom_right'\n plot_n.legend.click_policy = 'hide'\n plot_b.legend.location = 'center_left'\n plot_b.legend.click_policy = 'hide'\n else:\n plot_n.legend.visible = False\n plot_b.legend.visible = False\n\n if fitting:\n if not init_only:\n plot_n_resid.yaxis[0].ticker.desired_num_ticks = 3\n nyquist = gridplot([[plot_n], [plot_n_resid]], toolbar_location='right')\n\n plot_b_resid.yaxis[0].ticker.desired_num_ticks = 3\n plot_b_resid.yaxis[1].ticker.desired_num_ticks = 3\n bode = gridplot([[plot_b], [plot_b_resid]], toolbar_location='right')\n else:\n nyquist = plot_n\n bode = plot_b\n report = self.fit_reports[0].split(\"[[Correlations]]\")[0]\n fr = Div(text=f'<pre>{report}</pre>'.replace('\\n', \"<br />\"), width=400)\n l = layout([[nyquist, bode], [fr]], sizing_mode='scale_width')\n else:\n l = layout(plot_n, plot_b)\n return l\n\n def plot_Cdl_E(self, interface, BET_Area, m_electrode):\n \"\"\"\n Normalizing Q to C_eff or Cdl using either norm_nonFara_Q_C() or norm_Fara_Q_C()\n\n Refs:\n - G. J.Brug, A.L.G. vandenEeden, M.Sluyters-Rehbach, and J.H.Sluyters, J.Elec-\n troanal. Chem. Interfacial Electrochem., 176, 275 (1984)\n - B. Hirschorn, ElectrochimicaActa, 55, 6218 (2010)\n\n Kristian B. Knudsen ([email protected] || [email protected])\n\n Inputs\n ---------\n interface = faradaic / nonfaradaic\n BET_Area = BET surface area of electrode material [cm]\n m_electrode = mass of electrode [cm2/mg]\n\n Inputs\n ---------\n C_eff/C_dl = Normalized Double-layer capacitance measured from impedance [uF/cm2]\n (normalized by norm_nonFara_Q_C() or norm_Fara_Q_C())\n \"\"\"\n fig = figure(dpi=120, facecolor='w', edgecolor='w')\n fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)\n ax = fig.add_subplot(111)\n\n self.Q_norm = []\n self.E = []\n if interface == 'nonfaradaic':\n self.Q_norm = []\n for i in range(len(self.df)):\n self.Q_norm.append(norm_nonFara_Q_C(Rs=self.fit[i].params.get('Rs').value,\n Q=self.fit[i].params.get('Q').value,\n n=self.fit[i].params.get('n').value))\n self.E.append(np.average(self.df[i].E_avg))\n\n elif interface == 'faradaic':\n self.Q_norm = []\n for j in range(len(self.df)):\n self.Q_norm.append(norm_Fara_Q_C(Rs=self.fit[j].params.get('Rs').value,\n Rct=self.fit[j].params.get('R').value,\n n=self.fit[j].params.get('n').value,\n fs=self.fit[j].params.get('fs').value,\n L=self.fit[j].params.get('L').value))\n self.E.append(np.average(self.df[j].E_avg))\n\n self.C_norm = (np.array(self.Q_norm)/(m_electrode*BET_Area))*10**6 #'uF/cm2'\n ax.plot(self.E, self.C_norm, 'o--', label='C$_{dl}$')\n ax.set_xlabel('Voltage [V]')\n ax.set_ylabel('C$_{dl}$ [$\\mu$F/cm$^2$]')\n\n\nclass EIS_sim:\n \"\"\"\n Simulates and plots Electrochemical Impedance Spectroscopy based-on build-in equivalent cirucit models\n\n Kristian B. Knudsen ([email protected] || [email protected])\n\n Implemented circuits can be found in CIRCUIT_DICT in the circuits file\n\n Inputs\n --------\n - nyq_xlim/nyq_xlim:\n x/y-axis on nyquist plot, if not equal to 'none' state [min,max] value\n\n - bode: Plots following Bode plots\n - 'off'\n - 'on' = re, im vs. log(freq)\n - 'log' = log(re, im) vs. log(freq)\n\n - 're' = re vs. log(freq)\n - 'log_re' = log(re) vs. log(freq)\n\n - 'im' = im vs. log(freq)\n - 'log_im' = log(im) vs. log(freq)\n \"\"\"\n def __init__(self, circuit, frange, bode='off', nyq_xlim='none', nyq_ylim='none', legend='on', savefig='none'):\n self.f = frange\n self.w = 2*np.pi*frange\n self.re = circuit.real\n self.im = -circuit.imag\n\n if bode == 'off':\n fig = figure(dpi=120, facecolor='w', edgecolor='w')\n fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)\n ax = fig.add_subplot(111, aspect='equal')\n\n elif bode in ['on', 'log', 're', 'log_re', 'im', 'log_im', 'log']:\n fig = figure(figsize=(6, 4.5), dpi=120, facecolor='w', edgecolor='w')\n fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)\n ax = fig.add_subplot(211, aspect='equal')\n ax1 = fig.add_subplot(212)\n\n colors = sns.color_palette(\"colorblind\", n_colors=1)\n colors_real = sns.color_palette(\"Blues\", n_colors=1)\n colors_imag = sns.color_palette(\"Oranges\", n_colors=1)\n\n ### Nyquist Plot\n ax.plot(self.re, self.im, color=colors[0], marker='o', ms=4, lw=2, ls='-', label='Sim')\n\n ### Bode Plot\n if bode == 'on':\n ax1.plot(np.log10(self.f), self.re, color=colors_real[0], marker='D', ms=3, lw=2.25, ls='-', label=\"Z'\")\n ax1.plot(np.log10(self.f), self.im, color=colors_imag[0], marker='s', ms=3, lw=2.25, ls='-', label=\"-Z''\")\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"Z', -Z'' [$\\Omega$]\")\n if legend == 'on':\n ax1.legend(loc='best', frameon=False)\n\n elif bode == 're':\n ax1.plot(np.log10(self.f), self.re, color=colors_real[0], marker='D', ms=3, lw=2.25, ls='-', label=\"Z'\")\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"Z' [$\\Omega$]\")\n if legend == 'on':\n ax1.legend(loc='best', frameon=False)\n\n elif bode == 'log_re':\n ax1.plot(np.log10(self.f), np.log10(self.re), color=colors_real[0], marker='D', ms=3, lw=2.25, ls='-', label=\"Z''\")\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"log(Z') [$\\Omega$]\")\n if legend == 'on':\n ax1.legend(loc='best', frameon=False)\n\n elif bode=='im':\n ax1.plot(np.log10(self.f), self.im, color=colors_imag[0], marker='s', ms=3, lw=2.25, ls='-', label=\"-Z''\")\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"-Z'' [$\\Omega$]\")\n if legend == 'on':\n ax1.legend(loc='best', frameon=False)\n\n elif bode=='log_im':\n ax1.plot(np.log10(self.f), np.log10(self.im), color=colors_imag[0], marker='s', ms=3, lw=2.25, ls='-', label=\"-Z''\")\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"log(-Z'') [$\\Omega$]\")\n if legend == 'on':\n ax1.legend(loc='best', frameon=False)\n\n elif bode == 'log':\n ax1.plot(np.log10(self.f), np.log10(self.re), color=colors_real[0], marker='D', ms=3, lw=2.25, ls='-', label=\"Z''\")\n ax1.plot(np.log10(self.f), np.log10(self.im), color=colors_imag[0], marker='s', ms=3, lw=2.25, ls='-', label=\"-Z''\")\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"log(Z', -Z'') [$\\Omega$]\")\n if legend == 'on':\n ax1.legend(loc='best', frameon=False)\n\n ### Figure specifics\n if legend == 'on':\n ax.legend(loc='best', frameon=False)\n ax.set_xlabel(\"Z' [$\\Omega$]\")\n ax.set_ylabel(\"-Z'' [$\\Omega$]\")\n if nyq_xlim != 'none':\n ax.set_xlim(nyq_xlim[0], nyq_xlim[1])\n if nyq_ylim != 'none':\n ax.set_ylim(nyq_ylim[0], nyq_ylim[1])\n\n #Save Figure\n if savefig != 'none':\n fig.savefig(savefig) #saves figure if fix text is given\n\n\n def EIS_sim_fit(self,\n params,\n circuit,\n weight_func='modulus',\n nan_policy='raise',\n bode='on',\n nyq_xlim='none',\n nyq_ylim='none',\n legend='on',\n savefig='none'):\n \"\"\"\n This function fits simulations with a selected circuit. This function is mainly used to\n test fitting functions prior to being used on experimental data\n\n Kristian B. Knudsen ([email protected] / [email protected])\n\n Inputs\n ------------\n - Circuit: Equivlaent circuit models are defined in the CIRCUIT_DICT of the circuits file\n\n - weight_func = Weight function, Three options:\n - modulus (default)\n - unity\n - proportional\n\n - nyq_xlim/nyq_xlim: x/y-axis on nyquist plot, if not equal to 'none' state [min,max] value\n\n - legend: Display legend\n Turn 'on', 'off'\n\n - bode = Plots Bode Plot - options:\n 'on' = re, im vs. log(freq)\n 'log' = log(re, im) vs. log(freq)\n\n 're' = re vs. log(freq)\n 'log_re' = log(re) vs. log(freq)\n\n 'im' = im vs. log(freq)\n 'log_im' = log(im) vs. log(freq)\n\n Returns\n ------------\n The fitted impedance spectra(s) but also the fitted parameters that were used in the initial\n guesses. To call these use e.g. self.fit_Rs\n \"\"\"\n self.Fit = minimize(leastsq_errorfunc, params, method='leastsq',\n args=(self.w, self.re, self.im, circuit, weight_func),\n max_nfev=99999, nan_policy=nan_policy)\n print(report_fit(self.Fit))\n\n if circuit in list(CIRCUIT_DICT.keys()):\n self.circuit_fit = CIRCUIT_DICT[circuit](params=self.Fit.params, w=self.w)\n else:\n raise ValueError(f'circuit {circuit} is not a valid option')\n\n fig = figure(figsize=(6, 4.5), dpi=120, facecolor='w', edgecolor='k')\n fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)\n ax = fig.add_subplot(211, aspect='equal')\n ax1 = fig.add_subplot(212)\n\n colors = sns.color_palette(\"colorblind\", n_colors=1)\n colors_real = sns.color_palette(\"Blues\", n_colors=1)\n colors_imag = sns.color_palette(\"Oranges\", n_colors=1)\n\n ### Nyquist Plot\n ax.plot(self.re, self.im, color=colors[0], marker='o', ms=4, lw=2, ls='-', label='Sim')\n ax.plot(self.circuit_fit.real, -self.circuit_fit.imag, lw=0, marker='o', ms=8, mec='r', mew=1, mfc='none', label='Fit')\n\n ### Bode Plot\n if bode=='on':\n ax1.plot(np.log10(self.f), self.re, color=colors_real[0], marker='D', ms=3, lw=2.25, ls='-', label=\"Z'\")\n ax1.plot(np.log10(self.f), self.im, color=colors_imag[0], marker='s', ms=3, lw=2.25, ls='-', label=\"-Z''\")\n ax1.plot(np.log10(self.f), self.circuit_fit.real, lw=0, marker='D', ms=8, mec='r', mew=1, mfc='none', label='Fit')\n ax1.plot(np.log10(self.f), -self.circuit_fit.imag, lw=0, marker='s', ms=8, mec='r', mew=1, mfc='none')\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"Z', -Z'' [$\\Omega$]\")\n if legend == 'on':\n ax1.legend(loc='best', frameon=False)\n\n elif bode == 're':\n ax1.plot(np.log10(self.f), self.re, color=colors_real[0], marker='D', ms=3, lw=2.25, ls='-', label=\"Z'\")\n ax1.plot(np.log10(self.f), self.circuit_fit.real, lw=0, marker='D', ms=8, mec='r', mew=1, mfc='none', label='Fit')\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"Z' [$\\Omega$]\")\n if legend == 'on':\n ax1.legend(loc='best', frameon=False)\n\n elif bode == 'log_re':\n ax1.plot(np.log10(self.f), np.log10(self.re), color=colors_real[0], marker='D', ms=3, lw=2.25, ls='-', label=\"Z''\")\n ax1.plot(np.log10(self.f), np.log10(self.circuit_fit.real), lw=0, marker='D', ms=8, mec='r', mew=1, mfc='none', label='Fit')\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"log(Z') [$\\Omega$]\")\n if legend == 'on':\n ax1.legend(loc='best', frameon=False)\n\n elif bode=='im':\n ax1.plot(np.log10(self.f), self.im, color=colors_imag[0], marker='s', ms=3, lw=2.25, ls='-', label=\"-Z''\")\n ax1.plot(np.log10(self.f), -self.circuit_fit.imag, lw=0, marker='s', ms=8, mec='r', mew=1, mfc='none', label='Fit')\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"-Z'' [$\\Omega$]\")\n if legend == 'on':\n ax1.legend(loc='best', frameon=False)\n\n elif bode=='log_im':\n ax1.plot(np.log10(self.f), np.log10(self.im), color=colors_imag[0], marker='s', ms=3, lw=2.25, ls='-', label=\"-Z''\")\n ax1.plot(np.log10(self.f), np.log10(-self.circuit_fit.imag), lw=0, marker='s', ms=8, mec='r', mew=1, mfc='none', label='Fit')\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"log(-Z'') [$\\Omega$]\")\n if legend == 'on':\n ax1.legend(loc='best', frameon=False)\n\n elif bode == 'log':\n ax1.plot(np.log10(self.f), np.log10(self.re), color=colors_real[0], marker='D', ms=3, lw=2.25, ls='-', label=\"Z''\")\n ax1.plot(np.log10(self.f), np.log10(self.im), color=colors_imag[0], marker='s', ms=3, lw=2.25, ls='-', label=\"-Z''\")\n ax1.plot(np.log10(self.f), np.log10(self.circuit_fit.real), lw=0, marker='D', ms=8, mec='r', mew=1, mfc='none', label='Fit')\n ax1.plot(np.log10(self.f), np.log10(-self.circuit_fit.imag), lw=0, marker='s', ms=8, mec='r', mew=1, mfc='none')\n ax1.set_xlabel(\"log(f) [Hz]\")\n ax1.set_ylabel(\"log(Z', -Z'') [$\\Omega$]\")\n if legend == 'on':\n ax1.legend(loc='best', frameon=False)\n\n ### Figure specifics\n if legend == 'on':\n ax.legend(loc='best', frameon=False)\n ax.set_xlabel(\"Z' [$\\Omega$]\")\n ax.set_ylabel(\"-Z'' [$\\Omega$]\")\n\n if nyq_xlim != 'none':\n ax.set_xlim(nyq_xlim[0], nyq_xlim[1])\n if nyq_ylim != 'none':\n ax.set_ylim(nyq_ylim[0], nyq_ylim[1])\n\n #Save Figure\n if savefig != 'none':\n fig.savefig(savefig) #saves figure if fix text is given\n"
] |
[
[
"pandas.concat",
"numpy.abs",
"numpy.min",
"numpy.around",
"numpy.cumsum",
"numpy.round",
"numpy.max",
"numpy.log10",
"numpy.mean",
"numpy.insert",
"numpy.average",
"numpy.negative",
"numpy.angle",
"numpy.array",
"numpy.where",
"numpy.sum"
]
] |
kellerberrin/OSM-QSAR
|
[
"3cdc411ee3f9a3cea178898171e0a57fb5282e40"
] |
[
"OSMRegression.py"
] |
[
"# MIT License\n#\n# Copyright (c) 2017 \n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n#\n#\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport time\nimport math\n\nimport scipy.stats as st\nfrom sklearn.metrics import r2_score\n\nfrom OSMBase import OSMBaseModel\n\n\n# ============================================================================\n# The Regression Results Presentation Object.\n# ============================================================================\n\n\nclass OSMRegression(OSMBaseModel):\n def __init__(self, args, log):\n super(OSMRegression, self).__init__(args, log)\n\n # Shallow copies of the runtime environment.\n self.log = log\n self.args = args\n\n #####################################################################################\n #\n # Virtual member functions called from OSMBase\n #\n #####################################################################################\n\n def model_is_regression(self):\n return True\n\n def model_classification_results(self):\n self.train_predictions = self.model_prediction(self.data.training()) # Returns a dict. with \"prediction\" and \"actual\"\n self.train_stats = self.model_accuracy(self.train_predictions) # Returns a dictionary of accuracy tests.\n\n self.test_predictions = self.model_prediction(self.data.testing())\n self.test_stats = self.model_accuracy(self.test_predictions)\n # Send statistics to the console and log file.\n self.model_log_statistics()\n # Generate graphics (only if the virtual function defined at model level).\n self.model_graphics()\n # Append statistics to the stats files.\n self.model_write_statistics()\n\n def model_log_statistics(self):\n self.log_train_statistics(self.data.training(), self.train_stats, self.train_predictions)\n self.log_test_statistics(self.data.testing(), self.test_stats, self.test_predictions)\n\n def model_write_statistics(self):\n self.write_statistics(self.data.training(), self.train_stats, self.train_predictions, self.args.trainDirectory)\n self.write_statistics(self.data.testing(), self.test_stats, self.test_predictions, self.args.testDirectory)\n\n def model_accuracy(self, predictions):\n predict = predictions[\"prediction\"]\n actual = predictions[\"actual\"]\n\n MUE = 0\n RMSE = 0\n for i in range(len(predict)):\n diff = abs(predict[i] - actual[i])\n MUE += diff\n RMSE = diff * diff\n\n MUE = MUE / len(predict)\n RMSE = RMSE / len(predict)\n RMSE = math.sqrt(RMSE)\n\n # R^2\n\n R2 = r2_score(actual, predict)\n\n # Sort rankings.\n\n predict_ranks = st.rankdata(predict, method='average')\n actual_ranks = st.rankdata(actual, method='average')\n\n # generate Kendel rank correlation coefficient\n\n kendall = {}\n tau, p_value = st.kendalltau(actual_ranks, predict_ranks)\n kendall[\"tau\"] = tau\n kendall[\"p-value\"] = p_value\n spearman = {}\n rho, p_value = st.spearmanr(actual_ranks, predict_ranks)\n spearman[\"rho\"] = rho\n spearman[\"p-value\"] = p_value\n\n # Return the model analysis statistics in a dictionary.\n return {\"MUE\": MUE, \"RMSE\": RMSE, \"predict_ranks\": predict_ranks,\n \"actual_ranks\": actual_ranks, \"kendall\": kendall, \"spearman\": spearman, \"R2\" : R2}\n\n\n def model_prediction_records(self, data, predictions, statistics):\n\n prediction_list = []\n for idx in range(len(data.get_field(\"ID\"))):\n prediction_record = []\n prediction_record.append(data.get_field(\"ID\")[idx])\n prediction_record.append(statistics[\"actual_ranks\"][idx])\n prediction_record.append(statistics[\"predict_ranks\"][idx])\n prediction_record.append(predictions[\"actual\"][idx])\n prediction_record.append(predictions[\"prediction\"][idx])\n prediction_record.append(data.get_field(\"SMILE\")[idx])\n\n prediction_list.append(prediction_record)\n\n # Sort by actual ranking.\n sorted_predict_list= sorted(prediction_list, key=lambda predict_record: predict_record[1])\n\n return sorted_predict_list\n\n\n #####################################################################################\n #\n # Local member functions\n #\n #####################################################################################\n\n def log_train_statistics(self, train, statistics, predictions):\n\n self.log.info(\"Training Compounds R-squared (R^2): %f\", statistics[\"R2\"])\n self.log.info(\"Training Compounds Mean Unsigned Error (MUE): %f\", self.train_stats[\"MUE\"])\n self.log.info(\"Training Compounds RMS Error: %f\", self.train_stats[\"RMSE\"])\n\n # Display the classification results and write to the log file.\n def log_test_statistics(self, data, statistics, predictions):\n \"\"\"Display all the calculated statistics for each model; run\"\"\"\n\n independent_list = []\n for var in self.model_arguments()[\"INDEPENDENT\"]:\n independent_list.append(var[\"VARIABLE\"])\n dependent_var = self.model_arguments()[\"DEPENDENT\"][\"VARIABLE\"]\n\n self.log.info(\"Dependent (Target) Variable: %s\", dependent_var)\n for var in independent_list:\n self.log.info(\"Independent (Input) Variable(s): %s\", var)\n\n self.log.info(\"Training Epochs: %d\", self.model_epochs())\n\n self.log.info(\"Test Compounds %s R-squared (R^2): %f\", dependent_var, statistics[\"R2\"])\n self.log.info(\"Test Compounds %s Mean Unsigned Error (MUE): %f\", dependent_var, statistics[\"MUE\"])\n self.log.info(\"Test Compounds %s RMS Error: %f\", dependent_var, statistics[\"RMSE\"])\n\n self.log.info(\"Test Compounds Kendall's Rank Coefficient (tau): %f, p-value: %f\",\n statistics[\"kendall\"][\"tau\"], statistics[\"kendall\"][\"p-value\"])\n self.log.info(\"Test Compounds Spearman Coefficient (rho): %f, p-value: %f\",\n statistics[\"spearman\"][\"rho\"], statistics[\"spearman\"][\"p-value\"])\n self.log.info(\"Test Compounds %s Mean Unsigned Error (MUE): %f\", dependent_var, statistics[\"MUE\"])\n self.log.info(\"Test Compounds Results\")\n self.log.info(\"ID, Tested Rank, Pred. Rank, Tested %s, Pred. %s\", dependent_var, dependent_var)\n self.log.info(\"======================================================\")\n\n predict_list = self.model_prediction_records(data, predictions, statistics)\n\n for idx in range(len(predict_list)):\n line = \"{:10s} {:4.1f} {:4.1f} {:10.4f} {:10.4f}\".format(predict_list[idx][0],\n predict_list[idx][1],\n predict_list[idx][2],\n predict_list[idx][3],\n predict_list[idx][4])\n self.log.info(line)\n\n def write_statistics(self, data, statistics, predictions, directory):\n\n # Open the statistics file and append the model results statistics.\n\n stats_filename = os.path.join(directory, self.args.statsFilename)\n independent_list = []\n for var in self.model_arguments()[\"INDEPENDENT\"]:\n independent_list.append(var[\"VARIABLE\"])\n dependent_var = self.model_arguments()[\"DEPENDENT\"][\"VARIABLE\"]\n try:\n\n with open(stats_filename, 'a') as stats_file:\n\n line = \"****************,Classification,******************\\n\"\n stats_file.write(line)\n line = \"Model, {}\\n\".format(self.model_name())\n stats_file.write(line)\n line = \"DependentVar(Target), {}\\n\".format(dependent_var)\n stats_file.write(line)\n for var in independent_list:\n line = \"IndependentVar(Input), {}\\n\".format(var)\n stats_file.write(line)\n line = \"TrainingEpochs, {}\\n\".format(self.model_epochs())\n stats_file.write(line)\n line = \"Runtime, {}\\n\".format(time.asctime(time.localtime(time.time())))\n stats_file.write(line)\n line = \"CPUtime, {}\\n\".format(time.clock())\n stats_file.write(line)\n line = \"++++++++++++++++,Test_Statistics,++++++++++++++++\\n\"\n stats_file.write(line)\n line = \"R^2, {}\\n\".format(statistics[\"R2\"])\n stats_file.write(line)\n line = \"MUE, {}\\n\".format(statistics[\"MUE\"])\n stats_file.write(line)\n line = \"RMSE, {}\\n\".format(statistics[\"RMSE\"])\n stats_file.write(line)\n line = \"Kendall, {}, {}\\n\".format(statistics[\"kendall\"][\"tau\"],\n statistics[\"kendall\"][\"p-value\"])\n stats_file.write(line)\n line = \"Spearman, {}, {}\\n\".format(statistics[\"spearman\"][\"rho\"],\n statistics[\"spearman\"][\"p-value\"])\n stats_file.write(line)\n line = \"++++++++++++++++,Compound_Statistics,++++++++++++++++\\n\"\n stats_file.write(line)\n line = \"ID, Rank, Pred_Rank, Tested_{}, Pred_{}, SMILE\\n\".format(dependent_var, dependent_var)\n stats_file.write(line)\n\n predict_list = self.model_prediction_records(data, predictions, statistics)\n\n for idx in range(len(data.get_field(\"ID\"))):\n line = \"{}, {}, {}, {}, {}, {}\\n\".format(predict_list[idx][0],\n predict_list[idx][1],\n predict_list[idx][2],\n predict_list[idx][3],\n predict_list[idx][4],\n predict_list[idx][5])\n stats_file.write(line)\n\n except IOError:\n self.log.error(\"Problem writing to statistics file %s, check path and permissions\", stats_filename)\n"
] |
[
[
"scipy.stats.spearmanr",
"scipy.stats.kendalltau",
"sklearn.metrics.r2_score",
"scipy.stats.rankdata"
]
] |
kjanjua26/Are_Neural_Nets_Dirty
|
[
"4fdb710e80fe25ff80c23c8ef9419ba71baf7244"
] |
[
"utils.py"
] |
[
"import numpy as np\nfrom glob import glob\nimport cv2\nimport os\nimport random \nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split \n\nIMG_LIST = []\nLABEL_LIST = []\n\ndef normalize(x):\n min_val = np.min(x)\n max_val = np.max(x)\n x = (x-min_val) / (max_val-min_val)\n return x\n\ndef next_batch(num, data, labels):\n idx = np.arange(0, len(data))\n np.random.shuffle(idx)\n idx = idx[:num]\n data_shuffle = [data[ i] for i in idx]\n labels_shuffle = [labels[ i] for i in idx]\n return np.asarray(data_shuffle), np.asarray(labels_shuffle)\n\ndef one_hot_encode(x, NROFCLASSES):\n encoded = np.zeros((len(x), NROFCLASSES))\n for idx, val in enumerate(x):\n encoded[idx][val] = 1\n return encoded\n\ndef read_train_valid_data(directory):\n for img_file in glob(directory + \"/NSFW/*.png\"):\n nsfw_label = img_file.split('/')[-2]\n img = cv2.imread(img_file)\n try:\n img = cv2.resize(img, (224, 224))\n IMG_LIST.append(img)\n LABEL_LIST.append(nsfw_label)\n except:\n print('Error at {}'.format(img_file))\n for img_file in glob(directory + \"/SFW/*.png\"):\n img = cv2.imread(img_file)\n sfw_label = img_file.split('/')[-2]\n try:\n img = cv2.resize(img, (224, 224))\n IMG_LIST.append(img)\n LABEL_LIST.append(sfw_label)\n except:\n print('Error at {}'.format(img_file))\n np.save('images_224.npy', IMG_LIST)\n np.save('labels_224.npy', LABEL_LIST)\n assert(len(IMG_LIST) == len(LABEL_LIST))\n\ndef train_test_split_data(NROFCLASSES):\n mapping_file = open(\"mapping_file_labels.txt\", \"w+\")\n if os.path.isfile('images.npy') and os.path.isfile('labels.npy'):\n print('Numpy Arrays Exists!')\n IMAGES = np.load('images_224.npy')\n LABELS = np.load('labels_224.npy')\n unique_labels = list(set(LABELS))\n labelEncoder = preprocessing.LabelEncoder()\n labelEncoder.fit(unique_labels)\n mapping_file.write(str(unique_labels))\n mapping_file.write(\"\\n\")\n mapping_file.write(str(list(labelEncoder.transform(unique_labels))))\n mapping_file.close()\n labelEncoder.fit(LABELS)\n encoded_labels = labelEncoder.transform(LABELS)\n one_hot_labels = one_hot_encode(encoded_labels, NROFCLASSES)\n x_train, x_val, y_train, y_val = train_test_split(IMAGES, one_hot_labels, test_size=0.2, random_state=42)\n x_train = np.asarray(x_train)\n x_val = np.asarray(x_val)\n y_val = np.asarray(y_val)\n y_train = np.asarray(y_train)\n print(\"Done Data Formation.\")\n print('X_TRAIN, Y_TRAIN Shape', x_train.shape, y_train.shape)\n print('X_VAL, Y_VAL Shape', x_val.shape, y_val.shape)\n return x_train, x_val, y_train, y_val\n else:\n print(\"Doesn't exist\")\n read_train_valid_data(\"/Dataset/Train\")\n print('Recall TRAIN_TEST_SPLIT_DATA() to get the data.')"
] |
[
[
"numpy.min",
"numpy.asarray",
"sklearn.model_selection.train_test_split",
"numpy.save",
"numpy.random.shuffle",
"numpy.max",
"numpy.load",
"sklearn.preprocessing.LabelEncoder"
]
] |
jfthuong/pydpf-core
|
[
"bf2895ebc546e0004f759289bfc9a23196559ac3"
] |
[
"tests/test_propertyfield.py"
] |
[
"import numpy as np\nimport pytest\n\nfrom ansys import dpf\nfrom ansys.dpf import core\nfrom ansys.dpf.core.common import locations, natures\n\n\[email protected]()\ndef property_field(simple_bar):\n \"\"\"Return a property field from the simple bar model\"\"\"\n model = dpf.core.Model(simple_bar)\n mesh = model.metadata.meshed_region\n op = dpf.core.Operator(\"meshed_skin_sector\")\n op.inputs.mesh.connect(mesh)\n property_field = op.outputs.property_field_new_elements_to_old()\n return property_field\n\n\ndef test_scopingdata_property_field():\n pfield = dpf.core.PropertyField()\n list_ids = [1, 2, 4, 6, 7]\n scop = core.Scoping(ids=list_ids, location=locations.nodal)\n pfield.scoping = scop\n list_data = [20, 30, 50, 70, 80]\n pfield.data = list_data\n pfield.data\n assert np.allclose(pfield.data, list_data)\n assert np.allclose(pfield.scoping.ids, list_ids)\n\n\ndef test_set_get_data_property_field():\n field = dpf.core.PropertyField(nentities=20, nature=natures.scalar)\n data = []\n for i in range(0, 20):\n data.append(i)\n field.data = data\n assert np.allclose(field.data, data)\n\n\ndef test_create_property_field_push_back():\n f_vec = core.PropertyField(1, core.natures.vector, core.locations.nodal)\n f_vec.append([1, 2, 4], 1)\n assert len(f_vec.data) == 3\n assert f_vec.data[0] == 1\n assert f_vec.data[1] == 2\n assert f_vec.data[2] == 4\n assert f_vec.scoping.ids == [1]\n assert len(f_vec.scoping.ids) == 1\n\n f_scal = core.PropertyField(1, core.natures.scalar, core.locations.nodal)\n f_scal.append([2], 1)\n f_scal.append([5], 2)\n assert len(f_scal.data) == 2\n assert f_scal.data[0] == 2\n assert f_scal.data[1] == 5\n assert len(f_scal.scoping.ids) == 2\n assert f_scal.scoping.ids[0] == 1\n assert f_scal.scoping.ids[1] == 2\n\n\ndef check_on_property_field_from_simplebar(prop_field):\n assert prop_field is not None\n assert len(prop_field.data) != 0\n assert isinstance(prop_field, core.field_base._FieldBase)\n assert isinstance(prop_field, core.PropertyField)\n assert prop_field.component_count == 1\n assert prop_field.data is not None\n assert len(prop_field.data) != 0\n assert len(prop_field.data) == 1400\n assert prop_field.data[15] == 29\n assert np.allclose(prop_field.data[12], 10)\n assert prop_field.elementary_data_count == 1400\n assert prop_field.data[1201] == 2500\n assert prop_field.elementary_data_shape == 1\n assert prop_field.get_entity_data(8) == [7]\n assert prop_field.get_entity_data_by_id(23) == [60]\n assert prop_field.location == locations.elemental\n assert prop_field.location == prop_field.scoping.location\n assert prop_field.size == 1400\n assert np.allclose(prop_field.scoping.ids[201], 202)\n\n\ndef test_getoutput_property_field_operator(property_field):\n check_on_property_field_from_simplebar(property_field)\n\n\ndef test_set_location(property_field):\n assert property_field.location == locations.elemental\n property_field.location = locations.nodal\n assert property_field.location == locations.nodal\n\n\ndef test_set_prop_field_from_message(property_field):\n prop_field_message = property_field._message\n new_prop_field = dpf.core.PropertyField(property_field=prop_field_message)\n assert isinstance(new_prop_field, dpf.core.PropertyField)\n check_on_property_field_from_simplebar(new_prop_field)\n\n\ndef test_set_prop_field_from_prop_field(property_field):\n new_prop_field = dpf.core.PropertyField(property_field=property_field)\n assert isinstance(new_prop_field, dpf.core.PropertyField)\n check_on_property_field_from_simplebar(new_prop_field)\n\n\ndef test_connect_property_field_operator():\n f_vec = dpf.core.PropertyField(1, natures.vector, locations.nodal)\n f_vec.append([1, 2, 4], 1)\n op = dpf.core.operators.utility.forward()\n op.inputs.connect(f_vec)\n out = op.get_output(0, core.types.property_field)\n assert out is not None\n assert np.allclose(out.data, [1, 2, 4])\n assert np.allclose(out.scoping.ids, [1])\n\n\ndef test_getoutput_property_field_workflow(simple_bar):\n model = dpf.core.Model(simple_bar)\n mesh = model.metadata.meshed_region\n op = dpf.core.Operator(\"meshed_skin_sector\")\n op.inputs.mesh.connect(mesh)\n\n wf = dpf.core.Workflow()\n wf.add_operator(op)\n wf.set_output_name(\"field_out\", op, 3)\n\n property_field = wf.get_output(\"field_out\", dpf.core.types.property_field)\n check_on_property_field_from_simplebar(property_field)\n\n\ndef test_connect_property_field_workflow():\n f_vec = dpf.core.PropertyField(1, natures.vector, locations.nodal)\n f_vec.append([1, 2, 4], 1)\n op = dpf.core.operators.utility.forward()\n\n wf = dpf.core.Workflow()\n wf.add_operator(op)\n wf.set_input_name(\"field_in\", op, 0)\n wf.connect(\"field_in\", f_vec)\n wf.set_output_name(\"field_out\", op, 0)\n\n out = wf.get_output(\"field_out\", core.types.property_field)\n assert out is not None\n assert np.allclose(out.data, [1, 2, 4])\n assert np.allclose(out.scoping.ids, [1])\n\n\ndef test_local_property_field():\n num_entities = 100\n field_to_local = dpf.core.PropertyField(\n num_entities, dpf.core.natures.scalar, locations.nodal\n )\n data = []\n data_pointer = []\n scoping_ids = []\n with field_to_local.as_local_field() as f:\n for i in range(1, num_entities + 1):\n current_data = range(i, i + 3)\n current_data = list(current_data)\n data_pointer.append(3 * (i - 1))\n scoping_ids.append(i)\n data.extend(current_data)\n f.append(np.array(current_data), i)\n assert np.allclose(f.get_entity_data(i - 1), current_data)\n assert np.allclose(f.get_entity_data_by_id(i), current_data)\n\n with field_to_local.as_local_field() as f:\n for i in range(1, num_entities + 1):\n assert np.allclose(f.get_entity_data(i - 1), range(i, i + 3))\n assert np.allclose(f.get_entity_data_by_id(i), list(range(i, i + 3)))\n\n assert np.allclose(field_to_local.data, data)\n assert np.allclose(field_to_local.scoping.ids, scoping_ids)\n assert np.allclose(\n field_to_local._data_pointer, data_pointer[0: len(data_pointer)]\n )\n\n with field_to_local.as_local_field() as f:\n assert np.allclose(f.data, data)\n assert np.allclose(f._data_pointer, data_pointer[0: len(data_pointer)])\n\n\nif __name__ == \"__main__\":\n test_local_property_field()\n"
] |
[
[
"numpy.array",
"numpy.allclose"
]
] |
adbarwell/DottyGenerator
|
[
"380281f3d7333db4f5fde52f25a4c32e471536b3"
] |
[
"benchmark/apigeneration/generate.py"
] |
[
"from dottygen.cli import generate\nfrom .counter import Counter\nimport random\nimport os\nimport shutil\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\n\nparent_output_dir = os.path.abspath(os.path.join('benchmark', 'apigeneration', 'scr-sandbox'))\noutput_file = os.path.join(parent_output_dir, 'test.scr')\ntwo_participant_template_path = os.path.abspath(os.path.join('benchmark', 'apigeneration', 'scr-templates', 'BasicTwoParticipants.scr'))\ntwo_participants = [\"Client\", \"Svr\"]\npayloads = [\"number\", \"string\"]\n\ndef _init_sandbox_():\n if os.path.exists(parent_output_dir) and os.path.isdir(parent_output_dir):\n shutil.rmtree(parent_output_dir)\n os.mkdir(parent_output_dir)\n\ndef multiple_loops_two_participant_protocol(label,payload_string1, payload_string2, participant, j, protocol):\n\n return f\"rec Loop{j} {{ \" \\\n f\" choice at {two_participants[participant]} {{ \\n HELLO{label[0]}({payload_string1}) from {two_participants[participant]} to {two_participants[1- participant]}; \\n \\n continue Loop{j}; \\n }} or {{ \\n HELLO{label[1]}({payload_string2}) from {two_participants[participant]} to {two_participants[1- participant]}; \\n {protocol} \\n }} }}\"\n\ndef nested_choices_two_participant_protocol(label, payload_string1, payload_string2, participant, protocol):\n\n return f\"choice at {two_participants[participant]} {{ \\n HELLO{label[0]}({payload_string1}) from {two_participants[participant]} to {two_participants[1 - participant]}; \\n {protocol} \\n }}\\n\" \\\n f\"or {{ \\n HELLO{label[1]}({payload_string2}) from {two_participants[participant]} to {two_participants[1 - participant]}; }} \\n\"\n\n\n\n\n\ndef multiple_send_receive_two_participant_protocols(label, payload_string, participant):\n\n return f\"HELLO{label}({payload_string}) from {two_participants[participant]} to {two_participants[1-participant]}; \\n\"\n\n\n\n\ndef generate_api_test(test_name):\n x = []\n function_time = []\n nuscr_time = []\n efsm_time = []\n class_time = []\n type_time = []\n merge_time = []\n\n max_range = 100\n range_val = 100\n\n for i in range(1, max_range):\n # counter = Counter()\n # for l in range(range_val):\n # protocol = \"\"\n # for j in range(1, i):\n #\n # random.seed(datetime.now())\n # participant = random.randint(0, 1)\n # label = random.sample(range(1, 100), 2)\n # # payload = random.sample(range(1, 10), 2)\n # payload = [2,2]\n # payload_string1 = \"string, number\"\n # payload_string2 = \"number, string\"\n #\n # if test_name == \"send_receive\":\n # protocol += multiple_send_receive_two_participant_protocols(label[0], payload_string1, participant)\n # elif test_name == \"choices\":\n # protocol = nested_choices_two_participant_protocol(label, payload_string1, payload_string2, participant, protocol)\n # elif test_name == \"loop\":\n # protocol = multiple_loops_two_participant_protocol(label,payload_string1, payload_string2, participant, j, protocol)\n #\n #\n # _init_sandbox_()\n # shutil.copyfile(two_participant_template_path, output_file)\n # with open(output_file) as f:\n # newText = f.read().replace('PROTOCOLS', protocol)\n #\n # with open(output_file, \"w\") as f:\n # f.write(newText)\n #\n # generate(True, '/home/dev/effpi_sandbox/src/main/scala', \"Test\", output_file, [], False, False, False, counter)\n x.append(i)\n # function_time.append(counter.get_function_time()/range_val)\n # nuscr_time.append(counter.get_nuscr_time()/range_val)\n # type_time.append(counter.get_type_time()/range_val)\n # efsm_time.append(counter.get_efsm_time()/range_val)\n # class_time.append(counter.get_class_time()/range_val)\n # merge_time.append(counter.get_merge_time()/range_val)\n\n plot_graph(x, function_time, type_time, nuscr_time, efsm_time, class_time, merge_time, test_name)\n\n\ndef plot_graph(x, function_time, type_time, nuscr_time, efsm_time, class_time, merge_time, test_name):\n\n # for i in range(len(function_time)):\n # function_time[i] += (type_time[i] + class_time[i] +merge_time[i])\n #\n # for i in range(len(efsm_time)):\n # efsm_time[i] += nuscr_time[i]\n\n # plt.plot(x, efsm_time, label=\"EFSM + nuscr Generation\")\n # plt.plot(x, function_time, label=\"Other Generations\")\n\n if test_name == \"loop\":\n test_type = \"Loops\"\n elif test_name ==\"choices\":\n test_type = \" Choices\"\n else:\n test_type = \"Send/Receives\"\n\n function_time = [9.46044921875e-05, 0.00023527622222900392, 0.0004251384735107422, 0.0005990386009216309, 0.0007482933998107911, 0.0009140276908874512, 0.0011518311500549317, 0.001336381435394287, 0.0015484809875488282, 0.001637434959411621, 0.0019532251358032227, 0.0021854496002197267, 0.002546083927154541, 0.002699434757232666, 0.0030072665214538573, 0.003286550045013428, 0.003641471862792969, 0.004016246795654297, 0.004350357055664063, 0.004914844036102295, 0.00507228136062622, 0.005479629039764404, 0.0058901596069335935, 0.006390979290008545, 0.006894409656524658, 0.0073245978355407715, 0.007879862785339356, 0.008374435901641846, 0.008742797374725341, 0.00943345308303833, 0.010042648315429687, 0.010610241889953614, 0.011415693759918213, 0.01200960874557495, 0.012804124355316162, 0.013380460739135742, 0.014278500080108643, 0.015047721862792969, 0.015755362510681152, 0.01677237033843994, 0.01962787628173828, 0.020416438579559326, 0.021074609756469728, 0.021988306045532226, 0.022018308639526366, 0.023090219497680663, 0.025253133773803713, 0.02548638582229614, 0.02786893606185913, 0.028194987773895265, 0.029886202812194826, 0.030819389820098877, 0.03251984119415283, 0.03339240074157715, 0.03490227460861206, 0.03736378669738769, 0.0380333924293518, 0.039761250019073484, 0.04157984256744385, 0.042718420028686525, 0.04461002111434936, 0.04704120874404907, 0.04835848569869995, 0.0501546049118042, 0.050995101928710936, 0.05379164218902588, 0.05524696826934814, 0.05759610891342163, 0.06075080633163451, 0.07199412584304808, 0.07017539024353027, 0.079891249179840, 0.07600955009460449, 0.07889044046401977, 0.08027092218399048, 0.08369166374206544, 0.08352739095687867, 0.08826331377029419, 0.09773673295974732, 0.09504824638366699, 0.09945924043655395, 0.10000756740570069, 0.09742478132247925, 0.10032025337219239, 0.10350032329559326, 0.10760244846343994, 0.11068723440170287, 0.11356919527053833, 0.1184844160079956, 0.12168210506439209, 0.12423785448074341, 0.12819464445114137, 0.15981560707092285, 0.13625078916549682, 0.1407883071899414, 0.15209259271621703, 0.14924859762191772, 0.15600711107254028, 0.1586965274810791]\n\n type_time = [3.0944347381591795e-05, 6.0510635375976565e-05, 9.326457977294922e-05, 0.00012586355209350585, 0.00016436338424682617, 0.00019985198974609375, 0.00025680065155029295, 0.0003191876411437988, 0.0003481292724609375, 0.000375974178314209, 0.0004423403739929199, 0.0005057072639465332, 0.0005841875076293946, 0.0006192135810852051, 0.0006903982162475585, 0.0007286190986633301, 0.0008138751983642578, 0.0008888697624206543, 0.0009854555130004883, 0.0010971546173095704, 0.0011340093612670899, 0.0012485432624816896, 0.0013087630271911622, 0.001438889503479004, 0.0015498733520507813, 0.0016551089286804199, 0.0017692828178405761, 0.0018593120574951173, 0.0019420003890991211, 0.002084801197052002, 0.0022014904022216795, 0.002328324317932129, 0.0024547195434570313, 0.0025975704193115234, 0.002787296772003174, 0.002845714092254639, 0.003071267604827881, 0.00316605806350708, 0.0032878923416137695, 0.00353346586227417, 0.004055113792419434, 0.004187514781951904, 0.004262142181396485, 0.004394538402557373, 0.004423272609710693, 0.004614183902740479, 0.005023813247680664, 0.004985766410827637, 0.005416650772094727, 0.005395228862762451, 0.005663647651672364, 0.005842976570129395, 0.006104388236999512, 0.006235430240631104, 0.006417384147644043, 0.006805768013000488, 0.006897411346435547, 0.0070580863952636715, 0.007430541515350342, 0.007470974922180176, 0.007738499641418457, 0.00822068452835083, 0.008136334419250489, 0.008472952842712402, 0.00860086441040039, 0.008967366218566895, 0.009071226119995118, 0.009390640258789062, 0.011969820022583008, 0.01123591184616089, 0.01277489900588989, 0.0124513792991637, 0.011295623779296875, 0.012027385234832764, 0.012118360996246337, 0.012543962001800538, 0.012294845581054687, 0.01279038667678833, 0.01395296335220337, 0.013636536598205566, 0.014118549823760986, 0.013911070823669434, 0.013329939842224121, 0.013554124832153321, 0.013863911628723144, 0.014013156890869141, 0.01460460662841797, 0.014516563415527343, 0.014902482032775879, 0.015293331146240234, 0.01539363145828247, 0.015777003765106202, 0.020545358657836913, 0.01617483615875244, 0.01656710147857666, 0.018242475986480714, 0.01731841802597046, 0.017971222400665284, 0.018004372119903564]\n\n class_time = [6.802082061767578e-05, 0.00012280464172363282, 0.00023105621337890625, 0.0002621889114379883, 0.0003191971778869629, 0.0003472399711608887, 0.0004610657691955566, 0.0005087471008300782, 0.0005612516403198242, 0.0005915951728820801, 0.0006412029266357422, 0.0007145595550537109, 0.0008933138847351074, 0.0009584856033325195, 0.0010208845138549804, 0.0011844134330749512, 0.0014114141464233398, 0.0014801859855651856, 0.0015305376052856445, 0.0017577862739562987, 0.0018500089645385742, 0.0020192694664001466, 0.0021392989158630373, 0.002423651218414307, 0.002630617618560791, 0.0027683019638061524, 0.0037270212173461915, 0.003452885150909424, 0.003346524238586426, 0.003750913143157959, 0.003912172317504883, 0.004360768795013428, 0.004500167369842529, 0.004841821193695068, 0.0053162527084350585, 0.005447967052459717, 0.005971155166625977, 0.006345865726470948, 0.006548550128936768, 0.007782673835754395, 0.009244956970214845, 0.008821077346801758, 0.00928377389907837, 0.00963484525680542, 0.009837582111358642, 0.010315911769866943, 0.01149502992630005, 0.011729440689086913, 0.0129351806640625, 0.012921802997589112, 0.013684325218200684, 0.014207839965820312, 0.015212130546569825, 0.01677511930465698, 0.01679512023925781, 0.01810025691986084, 0.01799179792404175, 0.018917381763458252, 0.02015599489212036, 0.020858354568481445, 0.021688435077667236, 0.02353755235671997, 0.023527798652648927, 0.02476895570755005, 0.025907669067382812, 0.026738431453704834, 0.02739769220352173, 0.028557255268096923, 0.028221056461334226, 0.02866306066513061, 0.02946342086791992, 0.025460696220398, 0.036709277629852294, 0.039040937423706054, 0.04475679874420166, 0.04379750967025757, 0.042466824054718015, 0.05135743141174316, 0.04842639923095703, 0.04916533946990967, 0.05564119577407837, 0.05113544225692749, 0.050153985023498535, 0.0521485447883606, 0.05956193447113037, 0.055900447368621826, 0.05694215774536133, 0.05801438570022583, 0.06001677513122559, 0.062030775547027586, 0.06377395868301391, 0.07528239011764526, 0.0852637219429016, 0.06808904886245727, 0.07042655229568481, 0.0872386622428894, 0.07600813627243042, 0.0796918272972107, 0.08032221794128418]\n\n merge_time = [4.9810409545898435e-05, 0.00016489505767822266, 0.0002645134925842285, 0.00033865928649902345, 0.00042276620864868165, 0.000516364574432373, 0.0006284928321838379, 0.0008266663551330566, 0.0007907700538635254, 0.0008352160453796387, 0.0009140944480895996, 0.0011214303970336914, 0.001109488010406494, 0.0011641097068786621, 0.001195380687713623, 0.001509530544281006, 0.001382756233215332, 0.0015133142471313477, 0.0016225337982177734, 0.0017544770240783692, 0.0018738818168640137, 0.001894357204437256, 0.0021629738807678223, 0.002042653560638428, 0.00211045503616333, 0.0021875691413879394, 0.002789666652679443, 0.0023116374015808103, 0.0026281929016113282, 0.0025330591201782227, 0.00263810396194458, 0.0029016709327697753, 0.0028259420394897462, 0.0029659438133239745, 0.003235633373260498, 0.003131074905395508, 0.0033851385116577147, 0.0036361122131347654, 0.003765387535095215, 0.0035639119148254393, 0.003374009609222412, 0.00432051420211792, 0.004731330871582031, 0.004496560096740722, 0.004751310348510742, 0.005254724884033203, 0.005148921012878418, 0.0051249170303344724, 0.005162191390991211, 0.005336785316467285, 0.005259411334991455, 0.0063111972808837895, 0.005883369445800781, 0.005691583156585693, 0.005794599056243897, 0.006635212898254394, 0.006467981338500977, 0.006259605884552002, 0.006426656246185302, 0.006438558101654053, 0.006782424449920654, 0.007133135795593262, 0.0072249722480773925, 0.007219276428222656, 0.00738053560256958, 0.0075936555862426754, 0.00805335283279419, 0.007959895133972168, 0.01235990047454834, 0.012923400402069093, 0.01370938777923584, 0.01475120735168457, 0.014097838401794434, 0.011896419525146484, 0.0094211483001709, 0.009533915519714355, 0.009840672016143798, 0.01023622989654541, 0.03268199682235718, 0.01053819179534912, 0.010532965660095215, 0.010797972679138184, 0.010428667068481445, 0.010867466926574707, 0.010808031558990478, 0.010738580226898194, 0.011543276309967042, 0.011110405921936035, 0.012406401634216309, 0.011994776725769042, 0.01220714807510376, 0.011874401569366455, 0.014919404983520507, 0.011626191139221191, 0.012894935607910156, 0.012607793807983398, 0.013215892314910889, 0.015318527221679687, 0.013969733715057372]\n\n nuscr_time = [0.2746428608894348, 0.28131083250045774, 0.28276429414749144, 0.2827449703216553, 0.2852255582809448, 0.28054195642471313, 0.287175612449646, 0.29051335096359254, 0.2789213418960571, 0.2822346019744873, 0.2811269664764404, 0.2830137085914612, 0.29324174642562867, 0.28296435832977296, 0.2780307841300964, 0.28503659009933474, 0.28493049621582034, 0.28665425062179567, 0.2813376832008362, 0.29756635904312134, 0.27863076210021975, 0.2731794404983521, 0.2706669616699219, 0.26631707906723023, 0.27038227081298827, 0.27137664556503294, 0.2726215577125549, 0.2728453326225281, 0.27415313720703127, 0.27459217309951783, 0.27636420249938964, 0.27594996213912965, 0.27557106971740725, 0.27717833042144774, 0.2785419726371765, 0.2747219634056091, 0.27790616035461424, 0.27763847827911375, 0.2772816491127014, 0.27886994123458864, 0.3272753643989563, 0.312198748588562, 0.3223417735099792, 0.3065609908103943, 0.2911711406707764, 0.28940715789794924, 0.29502684593200684, 0.2883837008476257, 0.30802777767181394, 0.2920421266555786, 0.2976131057739258, 0.2966902542114258, 0.30037245988845823, 0.2947498059272766, 0.29464221000671387, 0.31872074842453, 0.2989064884185791, 0.30054455518722534, 0.30030815839767455, 0.30057382106781005, 0.3032486867904663, 0.30633177518844606, 0.3034829807281494, 0.30709386348724366, 0.3033760905265808, 0.30810377120971677, 0.3082999157905579, 0.3097019863128662, 0.35410852432251, 0.365166156291962, 0.39568554401397, 0.3408375906944274, 0.349596791267395, 0.36586608171463014, 0.3684359502792358, 0.3578595423698425, 0.35222063064575193, 0.36387698650360106, 0.3836271333694458, 0.37145964622497557, 0.3806936454772949, 0.3676475977897644, 0.3354528498649597, 0.3350493860244751, 0.33391714334487915, 0.34063244104385376, 0.3374485206604004, 0.3439794421195984, 0.3488873553276062, 0.3475524520874023, 0.34797205686569216, 0.3462175750732422, 0.48442505359649657, 0.35162564754486086, 0.35421777963638307, 0.3934969186782837, 0.3586452078819275, 0.3697826862335205, 0.3657154774665832]\n\n efsm_time = [0.008676965236663819, 0.026883776187896728, 0.04579216957092285, 0.0636829662322998, 0.08295372724533082, 0.10010012626647949, 0.12035857439041138, 0.1379173231124878, 0.15192272663116455, 0.16960755825042725, 0.18632059574127197, 0.2055148220062256, 0.23433029651641846, 0.24171532154083253, 0.25463310003280637, 0.28330260038375854, 0.2982481002807617, 0.31737821102142333, 0.3282706642150879, 0.365275456905365, 0.36163743019104005, 0.3748601484298706, 0.38803958892822266, 0.4055929589271545, 0.4217769503593445, 0.44040488243103026, 0.4550558352470398, 0.47325342178344726, 0.49227487564086914, 0.5077622556686401, 0.5260715436935425, 0.5428478908538819, 0.5598997211456299, 0.5803431844711304, 0.598787293434143, 0.6111454916000366, 0.6328365683555603, 0.6485451602935791, 0.6654319882392883, 0.6824657535552978, 0.8095584082603454, 0.7989681053161621, 0.8312009835243225, 0.8127891540527343, 0.7924024891853333, 0.8029484772682189, 0.8299696660041809, 0.8455252647399902, 0.8988074231147766, 0.8816045832633972, 0.9075847005844117, 0.9251994037628174, 0.9498557019233703, 0.9474294996261596, 0.9656365990638733, 1.0288971757888794, 1.005663468837738, 1.0418953490257263, 1.0430011701583863, 1.0597689080238342, 1.0778371334075927, 1.1087351608276368, 1.1154913806915283, 1.1450537300109864, 1.1564229106903077, 1.1826330208778382, 1.1923355221748353, 1.2176303243637085, 1.120722332000732, 1.17114844083786, 1.257730293273926, 1.3923047542572, 1.4163194584846497, 1.50210599899292, 1.5102623081207276, 1.5287169194221497, 1.5018903708457947, 1.5401203799247742, 1.657681496143341, 1.6221377897262572, 1.6546122646331787, 1.6404151797294617, 1.5125936579704284, 1.5401837062835693, 1.5473741602897644, 1.5717141103744507, 1.5875270342826844, 1.6033613967895508, 1.6381663036346437, 1.6560366177558898, 1.6704990839958191, 1.677191779613495, 1.670842266082763, 1.736377546787262, 1.7477165508270263, 1.8849027967453003, 1.7865635275840759, 1.8282225155830383, 1.8325116753578186]\n\n plt.plot(x, function_time, label=\"Function Generation\")\n plt.plot(x, type_time, label=\"Type Generation\")\n plt.plot(x, nuscr_time, label=\"Nuscr Generation\")\n plt.plot(x, efsm_time, label=\"EFSM Generation\")\n plt.plot(x, class_time, label=\"Class Generation\")\n plt.plot(x, merge_time, label=\"Merging\")\n plt.xlabel(f'Number of {test_type}')\n plt.ylabel('Time taken (s)')\n plt.title(f'Multiple {test_type} with Two Participants')\n plt.legend()\n plt.savefig(f\"benchmark/apigeneration/graphs/test_{test_name}.png\")\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] |
denjiry/chainer
|
[
"758c74122f4463f3b0de61c6609b8688576f59a6"
] |
[
"tests/chainer_tests/functions_tests/pooling_tests/test_max_pooling_2d.py"
] |
[
"import unittest\n\nimport numpy\nimport six\n\nimport chainer\nfrom chainer.backends import cuda\nfrom chainer import functions\nfrom chainer import gradient_check\nfrom chainer import testing\nfrom chainer.testing import attr\nfrom chainer.testing import backend\n\n\ndef _to_fcontiguous(arrays):\n xp = chainer.backend.get_array_module(*arrays)\n return [xp.asfortranarray(a) for a in arrays]\n\n\[email protected](*testing.product({\n 'cover_all': [True, False],\n 'dtype': [numpy.float16, numpy.float32, numpy.float64],\n 'c_contiguous': [True, False],\n}))\[email protected]_backend_tests(\n ['test_forward',\n 'test_forward_output_size_zero',\n 'test_backward',\n 'test_double_backward'],\n # CPU tests\n testing.product({\n 'use_cuda': [False],\n 'use_ideep': ['never', 'always'],\n })\n # GPU tests\n + testing.product({\n 'use_cuda': [True],\n 'use_cudnn': ['never', 'always'],\n })\n # ChainerX tests\n + testing.product({\n 'use_chainerx': [True],\n 'chainerx_device': ['native:0', 'cuda:0'],\n }))\nclass TestMaxPooling2D(unittest.TestCase):\n\n def setUp(self):\n dtype = self.dtype\n\n # Avoid unstability of numerical gradient\n x = numpy.arange(2 * 3 * 4 * 3, dtype=dtype).reshape(2, 3, 4, 3)\n numpy.random.shuffle(x)\n x = 2 * x / x.size - 1\n if self.cover_all:\n gy = numpy.random.uniform(-1, 1, (2, 3, 3, 2)).astype(dtype)\n else:\n gy = numpy.random.uniform(-1, 1, (2, 3, 2, 2)).astype(dtype)\n ggx = numpy.random.uniform(-1, 1, (2, 3, 4, 3)).astype(dtype)\n\n self.output_shape = gy.shape\n\n self.inputs = [x]\n self.grad_outputs = [gy]\n self.grad_grad_inputs = [ggx]\n\n if self.dtype == numpy.float16:\n self.check_backward_options = {\n 'atol': 1e-3, 'rtol': 1e-2}\n self.check_double_backward_options = {\n 'atol': 1e-3, 'rtol': 1e-2}\n else:\n self.check_backward_options = {\n 'atol': 1e-4, 'rtol': 1e-3}\n self.check_double_backward_options = {\n 'atol': 1e-4, 'rtol': 1e-3}\n\n def forward_cpu(self, inputs):\n x, = inputs\n expect = numpy.empty(self.output_shape, dtype=self.dtype)\n for i in six.moves.range(2):\n for c in six.moves.range(3):\n xx = x[i, c]\n if self.cover_all:\n expect[i, c] = numpy.array([\n [xx[0:2, 0:2].max(), xx[0:2, 1:3].max()],\n [xx[1:4, 0:2].max(), xx[1:4, 1:3].max()],\n [xx[3:4, 0:2].max(), xx[3:4, 1:3].max()]])\n else:\n expect[i, c] = numpy.array([\n [xx[0:2, 0:2].max(), xx[0:2, 1:3].max()],\n [xx[1:4, 0:2].max(), xx[1:4, 1:3].max()]])\n return expect,\n\n def check_forward(self, inputs, backend_config):\n y_expect, = self.forward_cpu(inputs)\n\n # TODO(sonots): Cleanup to use testing.backend.get_array after\n # chainerx.asfortranarray is implemented.\n if (backend_config.use_cuda\n or (backend_config.use_chainerx\n and backend_config.chainerx_device.startswith('cuda:'))):\n inputs = cuda.to_gpu(inputs)\n if not self.c_contiguous:\n inputs = _to_fcontiguous(inputs)\n if backend_config.use_chainerx:\n inputs = chainer.backend.to_chainerx(inputs)\n\n with backend_config:\n x, = inputs\n y = functions.max_pooling_2d(x, 3, stride=2, pad=1,\n cover_all=self.cover_all)\n assert self.dtype == y.data.dtype\n assert self.output_shape == y.data.shape\n testing.assert_allclose(y_expect, y.data)\n\n def test_forward(self, backend_config):\n self.check_forward(self.inputs, backend_config)\n\n def test_forward_cpu_wide(self): # see #120\n x_data = numpy.random.rand(2, 3, 15, 15).astype(self.dtype)\n x = chainer.Variable(x_data)\n functions.max_pooling_2d(x, 6, stride=6, pad=0)\n\n def test_forward_output_size_zero(self, backend_config):\n with self.assertRaises(Exception):\n x = numpy.random.rand(4, 4, 1, 4).astype(self.dtype)\n # TODO(sonots): Cleanup to use testing.backend.get_array after\n # chainerx.asfortranarray is implemented.\n if (backend_config.use_cuda\n or (backend_config.use_chainerx\n and backend_config.chainerx_device.startswith('cuda:'))):\n x = cuda.to_gpu(x)\n if backend_config.use_chainerx:\n x = chainer.backend.to_chainerx(x)\n x = chainer.Variable(x)\n with backend_config:\n functions.max_pooling_2d(x, 3, stride=2)\n\n with self.assertRaises(Exception):\n x = numpy.random.rand(4, 4, 4, 1).astype(self.dtype)\n # TODO(sonots): Cleanup to use testing.backend.get_array after\n # chainerx.asfortranarray is implemented.\n if (backend_config.use_cuda\n or (backend_config.use_chainerx\n and backend_config.chainerx_device.startswith('cuda:'))):\n x = cuda.to_gpu(x)\n if backend_config.use_chainerx:\n x = chainer.backend.to_chainerx(x)\n x = chainer.Variable(x)\n with backend_config:\n functions.max_pooling_2d(x, 3, stride=2)\n\n def check_backward(self, inputs, grad_outputs, backend_config):\n # TODO(sonots): Cleanup to use testing.backend.get_array after\n # chainerx.asfortranarray is implemented.\n if (backend_config.use_cuda\n or (backend_config.use_chainerx\n and backend_config.chainerx_device.startswith('cuda:'))):\n inputs = cuda.to_gpu(inputs)\n grad_outputs = cuda.to_gpu(grad_outputs)\n if not self.c_contiguous:\n inputs = _to_fcontiguous(inputs)\n grad_outputs = _to_fcontiguous(grad_outputs)\n if backend_config.use_chainerx:\n inputs = chainer.backend.to_chainerx(inputs)\n grad_outputs = chainer.backend.to_chainerx(grad_outputs)\n\n def f(x):\n return functions.max_pooling_2d(\n x, 3, stride=2, pad=1, cover_all=self.cover_all)\n\n with backend_config:\n gradient_check.check_backward(\n f, inputs, grad_outputs, dtype='d',\n **self.check_backward_options)\n\n def test_backward(self, backend_config):\n self.check_backward(self.inputs, self.grad_outputs, backend_config)\n\n def test_backward_cpu_more_than_once(self):\n func = functions.pooling.max_pooling_2d.MaxPooling2D(\n 3, stride=2, pad=1, cover_all=self.cover_all)\n func.apply(self.inputs)\n func.backward((0,), self.grad_outputs)\n func.backward((0,), self.grad_outputs)\n\n def check_double_backward(\n self, inputs, grad_outputs, grad_grad_inputs, backend_config):\n # TODO(sonots): Cleanup to use testing.backend.get_array after\n # chainerx.asfortranarray is implemented.\n if (backend_config.use_cuda\n or (backend_config.use_chainerx\n and backend_config.chainerx_device.startswith('cuda:'))):\n inputs = cuda.to_gpu(inputs)\n grad_outputs = cuda.to_gpu(grad_outputs)\n grad_grad_inputs = cuda.to_gpu(grad_grad_inputs)\n if not self.c_contiguous:\n inputs = _to_fcontiguous(inputs)\n grad_outputs = _to_fcontiguous(grad_outputs)\n grad_grad_inputs = _to_fcontiguous(grad_grad_inputs)\n if backend_config.use_chainerx:\n inputs = chainer.backend.to_chainerx(inputs)\n grad_outputs = chainer.backend.to_chainerx(grad_outputs)\n grad_grad_inputs = chainer.backend.to_chainerx(grad_grad_inputs)\n\n def f(x):\n return functions.max_pooling_2d(\n x, 3, stride=2, pad=1, cover_all=self.cover_all)\n\n with backend_config:\n gradient_check.check_double_backward(\n f, inputs, grad_outputs, grad_grad_inputs,\n dtype='d',\n **self.check_double_backward_options)\n\n def test_double_backward(self, backend_config):\n self.check_double_backward(\n self.inputs, self.grad_outputs, self.grad_grad_inputs,\n backend_config)\n\n\[email protected](*testing.product({\n 'use_cudnn': ['always', 'auto', 'never'],\n 'dtype': [numpy.float16, numpy.float32, numpy.float64],\n}))\[email protected]\nclass TestMaxPooling2DCudnnCall(unittest.TestCase):\n\n def setUp(self):\n self.x = cuda.cupy.arange(\n 2 * 3 * 4 * 3, dtype=self.dtype).reshape(2, 3, 4, 3)\n self.gy = cuda.cupy.random.uniform(-1, 1,\n (2, 3, 2, 2)).astype(self.dtype)\n\n def forward(self):\n x = chainer.Variable(self.x)\n return functions.max_pooling_2d(\n x, 3, stride=2, pad=1, cover_all=False)\n\n def test_call_cudnn_forward(self):\n with chainer.using_config('use_cudnn', self.use_cudnn):\n with testing.patch('cupy.cudnn.pooling_forward') as func:\n self.forward()\n self.assertEqual(func.called,\n chainer.should_use_cudnn('>=auto'))\n\n def test_call_cudnn_backward(self):\n with chainer.using_config('use_cudnn', self.use_cudnn):\n expect = chainer.should_use_cudnn('>=auto')\n y = self.forward()\n # should be consistent to forward regardless of use_cudnn config\n y.grad = self.gy\n with testing.patch('cupy.cudnn.pooling_backward') as func:\n y.backward()\n self.assertEqual(func.called, expect)\n\n\nclass TestMaxPooling2DIndices(unittest.TestCase):\n def setUp(self):\n self.x = numpy.arange(\n 2 * 3 * 4 * 4, dtype=numpy.float32).reshape(2, 3, 4, 4)\n numpy.random.shuffle(self.x)\n\n def _check(self, x):\n out, indices = functions.max_pooling_2d(\n x, 2, cover_all=False, return_indices=True)\n assert isinstance(out, chainer.Variable)\n assert isinstance(out.array, type(x))\n assert isinstance(indices, type(x))\n assert indices.shape == out.array.shape\n\n # Calculate expected indices.\n expect = numpy.zeros(indices.shape, dtype=indices.dtype)\n for i in six.moves.range(2):\n for c in six.moves.range(3):\n xx = x[i, c]\n expect[i, c] = numpy.array([\n [xx[0:2, 0:2].ravel().argmax(),\n xx[0:2, 2:4].ravel().argmax()],\n [xx[2:4, 0:2].ravel().argmax(),\n xx[2:4, 2:4].ravel().argmax()],\n ])\n if out.xp is cuda.cupy:\n expect = cuda.to_gpu(expect)\n assert (expect == indices).all()\n\n def test_cpu(self):\n self._check(self.x)\n\n @attr.gpu\n @attr.cudnn\n def test_gpu(self):\n x = cuda.to_gpu(self.x)\n with chainer.using_config('use_cudnn', 'never'):\n self._check(x)\n with chainer.using_config('use_cudnn', 'always'):\n self._check(x)\n\n\ntesting.run_module(__name__, __file__)\n"
] |
[
[
"numpy.arange",
"numpy.random.shuffle",
"numpy.random.rand",
"numpy.random.uniform",
"numpy.zeros",
"numpy.empty"
]
] |
som-shahlab/net_benefit_ascvd
|
[
"1259692025d90618211206105d8d23bd5a90a66c"
] |
[
"net_benefit_ascvd/prediction_utils/pytorch_utils/group_fairness.py"
] |
[
"import torch\nimport torch.nn.functional as F\nimport itertools\nimport warnings\nimport logging\n\nfrom net_benefit_ascvd.prediction_utils.pytorch_utils.models import (\n TorchModel,\n FixedWidthModel,\n BilevelModel,\n)\nfrom net_benefit_ascvd.prediction_utils.pytorch_utils.layers import FeedforwardNet\nfrom net_benefit_ascvd.prediction_utils.pytorch_utils.pytorch_metrics import (\n weighted_mean,\n roc_auc_score_surrogate,\n precision_surrogate,\n tpr_surrogate,\n fpr_surrogate,\n positive_rate_surrogate,\n weighted_cross_entropy_loss,\n MetricUndefinedError,\n baselined_loss,\n)\n\nfrom net_benefit_ascvd.prediction_utils.pytorch_utils.pytorch_metrics import get_surrogate\n\n# Structure:\n# group_regularized_model -> function returning a model class based on a provided key\n# GroupRegularizedModel -> upper level model class. Defines a regularized loss that can be computed arbitrarily\n# MMDModel -> penalizes MMD between model predictions for conditional prediction parity\n# EqualMeanPredictionModel -> penalizes mean difference in predictions with same interface as MMDModel\n# GroupIRMModel -> Applies the IRM penalty to each group\n# EqualThresholdRateModel -> penalizes thresholded prediction rates across groups with same interface as MMDModel\n# GroupMetricRegularizedModel -> penalizes the differences in a differentiable metric across groups\n# EqualAUCModel\n# EqualPrecisionModel\n# EqualLossModel\n# EqualBrierScoreModel\n# EqualTPRModel\n# EqualFPRModel\n# EqualPositiveRateModel\n# GroupAdversarialModel -> penalizes difference in distribution of predictions across groups w/ discriminator\n\n\ndef group_regularized_model(model_type=\"loss\"):\n \"\"\"\n A function that returns an instance of GroupRegularizedModel\n \"\"\"\n class_dict = {\n \"loss\": EqualLossModel,\n \"baselined_loss\": EqualBaselinedLossModel,\n \"auc\": EqualAUCModel,\n \"brier\": EqualBrierScoreModel,\n \"mmd\": MMDModel,\n \"mean_prediction\": EqualMeanPredictionModel,\n \"threshold_rate\": EqualThresholdRateModel,\n \"precision\": EqualPrecisionModel,\n \"adversarial\": GroupAdversarialModel,\n \"group_irm\": GroupIRMModel,\n }\n the_class = class_dict.get(model_type, None)\n if the_class is None:\n raise ValueError(\"model_type not defined in group_regularized_model\")\n return the_class\n\n\nclass GroupRegularizedModel(TorchModel):\n \"\"\"\n A model that penalizes differences in a quantity across groups\n \"\"\"\n\n def compute_group_regularization_loss(\n self, outputs, labels, group, sample_weight=None\n ):\n \"\"\"\n Computes a regularization term defined in terms of model outputs, labels, and group.\n This class should be overriden and this regularization term defined.\n \"\"\"\n raise NotImplementedError\n\n def get_default_config(self):\n \"\"\"\n Default parameters\n \"\"\"\n config_dict = super().get_default_config()\n update_dict = {\n \"num_hidden\": 1,\n \"hidden_dim\": 128,\n \"drop_prob\": 0.0,\n \"normalize\": False,\n \"sparse\": True,\n \"sparse_mode\": \"csr\", # alternatively, \"convert\"\n \"resnet\": False,\n }\n return {**config_dict, **update_dict}\n\n def init_model(self):\n model = FeedforwardNet(\n in_features=self.config_dict[\"input_dim\"],\n hidden_dim_list=self.config_dict[\"num_hidden\"]\n * [self.config_dict[\"hidden_dim\"]],\n output_dim=self.config_dict[\"output_dim\"],\n drop_prob=self.config_dict[\"drop_prob\"],\n normalize=self.config_dict[\"normalize\"],\n sparse=self.config_dict[\"sparse\"],\n sparse_mode=self.config_dict[\"sparse_mode\"],\n resnet=self.config_dict[\"resnet\"],\n )\n return model\n\n def get_transform_batch_keys(self):\n \"\"\"\n Returns the names of the list of tensors that sent to device\n \"\"\"\n result = super().get_transform_batch_keys()\n result = result + [\"group\"]\n\n def get_loss_names(self):\n return [\"loss\", \"supervised\", \"group_regularization\"]\n\n def forward_on_batch(self, the_data):\n \"\"\"\n Run the forward pass, returning a batch_loss_dict and outputs\n \"\"\"\n loss_dict_batch = {}\n inputs, labels, group = (\n the_data[\"features\"],\n the_data[\"labels\"],\n the_data[\"group\"],\n )\n outputs = self.model(inputs)\n # Compute the loss\n if self.config_dict.get(\"weighted_loss\"):\n loss_dict_batch[\"supervised\"] = self.criterion(\n outputs, labels, sample_weight=the_data.get(\"weights\")\n )\n loss_dict_batch[\n \"group_regularization\"\n ] = self.compute_group_regularization_loss(\n outputs, labels, group, sample_weight=the_data.get(\"weights\")\n )\n else:\n loss_dict_batch[\"supervised\"] = self.criterion(outputs, labels)\n loss_dict_batch[\n \"group_regularization\"\n ] = self.compute_group_regularization_loss(outputs, labels, group)\n\n loss_dict_batch[\"loss\"] = loss_dict_batch[\"supervised\"] + (\n self.config_dict[\"lambda_group_regularization\"]\n * loss_dict_batch[\"group_regularization\"]\n )\n return loss_dict_batch, outputs\n\n\nclass MMDModel(GroupRegularizedModel):\n \"\"\"\n Model that minimizes distributional discrepancy between predictions belonging to different groups.\n In the default case, corresponds to threshold-free demographic parity.\n If made conditional on the outcome, corresponds to equalized odds.\n \"\"\"\n\n def get_default_config(self):\n config_dict = super().get_default_config()\n update_dict = {\n \"mmd_mode\": \"conditional\"\n # \"conditional\" -> eq_odds,\n # \"conditional_pos\" -> eq_opportunity_pos,\n # \"conditional_neg\" -> eq_opportunity_neg,\n # \"unconditional\" -> demographic_parity\n }\n return {**config_dict, **update_dict}\n\n def compute_mmd(self, x, y, x_weights=None, y_weights=None):\n \"\"\"\n Compute an MMD\n \"\"\"\n x_kernel = self.compute_kernel(x, x)\n y_kernel = self.compute_kernel(y, y)\n xy_kernel = self.compute_kernel(x, y)\n\n if (x_weights is None) and (y_weights is None):\n return x_kernel.mean() + y_kernel.mean() - 2 * xy_kernel.mean()\n else:\n if x_weights is None:\n x_weights = torch.ones(x.shape[0])\n if y_weights is None:\n y_weights = torch.ones(y.shape[0])\n\n x_weights_tile = x_weights.unsqueeze(1) * x_weights.unsqueeze(0)\n y_weights_tile = y_weights.unsqueeze(1) * y_weights.unsqueeze(0)\n xy_weights_tile = x_weights.unsqueeze(1) * y_weights.unsqueeze(0)\n\n return (\n ((x_kernel * x_weights_tile).sum() / x_weights_tile.sum())\n + ((y_kernel * y_weights_tile).sum() / y_weights_tile.sum())\n - (2 * (xy_kernel * xy_weights_tile).sum() / xy_weights_tile.sum())\n )\n\n @staticmethod\n def compute_kernel(x, y, gamma=None):\n \"\"\"\n Gaussian RBF kernel for use in an MMD\n \"\"\"\n dim = x.size(1)\n assert dim == y.size(1)\n if gamma is None:\n gamma = dim\n\n kernel_input = (\n (x.unsqueeze(1) - y.unsqueeze(0)).pow(2).sum(2)\n ) # sum over features\n return torch.exp(-gamma * kernel_input) # (x_size, y_size)\n\n def compute_mmd_group(self, x, group, sample_weight=None):\n \"\"\"\n Compute the MMD between data for each group\n \"\"\"\n unique_groups = group.unique()\n mmd = torch.tensor([0.0], dtype=torch.float).to(self.device)\n if len(unique_groups) == 1:\n return mmd\n i = 0\n if self.config_dict[\"group_regularization_mode\"] == \"overall\":\n for the_group in unique_groups:\n mmd = mmd + self.compute_mmd(\n x[group == the_group],\n x,\n x_weights=sample_weight[group == the_group]\n if sample_weight is not None\n else None,\n y_weights=sample_weight if sample_weight is not None else None,\n )\n i = i + 1\n elif self.config_dict[\"group_regularization_mode\"] == \"group\":\n for comb in itertools.combinations(unique_groups, 2):\n mmd = mmd + self.compute_mmd(\n x[group == comb[0]],\n x[group == comb[1]],\n x_weights=sample_weight[group == comb[0]]\n if sample_weight is not None\n else None,\n y_weights=sample_weight[group == comb[1]]\n if sample_weight is not None\n else None,\n )\n i = i + 1\n return mmd / i\n\n def compute_group_regularization_loss(\n self, outputs, labels, group, sample_weight=None\n ):\n \"\"\"\n Partition the data on the labels and compute the group MMD\n \"\"\"\n mmd = torch.FloatTensor([0.0]).to(self.device)\n outputs = F.log_softmax(outputs, dim=1)[:, -1].unsqueeze(1)\n if self.config_dict[\"mmd_mode\"] == \"unconditional\":\n mmd = mmd + self.compute_mmd_group(\n outputs,\n group,\n sample_weight=sample_weight if sample_weight is not None else None,\n )\n else:\n if self.config_dict[\"mmd_mode\"] == \"conditional\":\n unique_labels = labels.unique()\n elif self.config_dict[\"mmd_mode\"] == \"conditional_pos\":\n unique_labels = [1]\n elif self.config_dict[\"mmd_mode\"] == \"conditional_neg\":\n unique_labels = [0]\n else:\n raise ValueError(\"Invalid option provided to mmd_mode\")\n for the_label in unique_labels:\n if (labels == the_label).sum() > 0:\n mmd = mmd + self.compute_mmd_group(\n outputs[labels == the_label],\n group[labels == the_label],\n sample_weight=sample_weight[labels == the_label]\n if sample_weight is not None\n else None,\n )\n else:\n logging.debug(\"Skipping regularization due to no samples\")\n return mmd\n\n\nclass EqualMeanPredictionModel(GroupRegularizedModel):\n def get_default_config(self):\n config_dict = super().get_default_config()\n update_dict = {\n \"mean_prediction_mode\": \"conditional\"\n # \"conditional\" -> eq_odds,\n # \"conditional_pos\" -> eq_opportunity_pos,\n # \"conditional_neg\" -> eq_opportunity_neg,\n # \"unconditional\" -> demographic_parity\n }\n return {**config_dict, **update_dict}\n\n def compute_group_regularization_loss(\n self, outputs, labels, group, sample_weight=None\n ):\n \"\"\"\n Partition the data on the labels and compute the difference in means\n \"\"\"\n result = torch.FloatTensor([0.0]).to(self.device)\n outputs = F.log_softmax(outputs, dim=1)[:, -1]\n\n if self.config_dict[\"mean_prediction_mode\"] == \"unconditional\":\n result = result + self.compute_group_regularization_loss_helper(\n outputs,\n labels,\n group,\n sample_weight=sample_weight if sample_weight is not None else None,\n )\n else:\n if self.config_dict[\"mean_prediction_mode\"] == \"conditional\":\n unique_labels = labels.unique()\n elif self.config_dict[\"mean_prediction_mode\"] == \"conditional_pos\":\n unique_labels = [1]\n elif self.config_dict[\"mean_prediction_mode\"] == \"conditional_neg\":\n unique_labels = [0]\n else:\n raise ValueError(\"Invalid option provided to mean_prediction_mode\")\n for the_label in unique_labels:\n if (labels == the_label).sum() > 0:\n result = result + self.compute_group_regularization_loss_helper(\n outputs[labels == the_label],\n labels[labels == the_label],\n group[labels == the_label],\n sample_weight=sample_weight[labels == the_label]\n if sample_weight is not None\n else None,\n )\n else:\n logging.debug(\"Skipping regularization due to no samples\")\n\n return result\n\n def compute_group_regularization_loss_helper(\n self, outputs, labels, group, sample_weight=None\n ):\n result = torch.FloatTensor([0.0]).to(self.device)\n unique_groups = group.unique()\n i=1\n for i, the_group in enumerate(unique_groups):\n result = result + (\n (\n weighted_mean(\n outputs[group == the_group],\n sample_weight[group == the_group]\n if sample_weight is not None\n else None,\n )\n - weighted_mean(\n outputs, sample_weight if sample_weight is not None else None\n )\n )\n ** 2\n )\n result = result / i\n return result\n\n\nclass GroupIRMModel(GroupRegularizedModel):\n \"\"\"\n Applies IRMv1 over groups\n \"\"\"\n\n def compute_penalty(self, outputs, labels):\n # https://github.com/facebookresearch/InvariantRiskMinimization/blob/master/code/colored_mnist/main.py#L107\n\n scale = torch.FloatTensor([1.0]).requires_grad_().to(self.device)\n loss = self.criterion(outputs * scale, labels)\n grad = torch.autograd.grad(loss, [scale], create_graph=True)[0]\n return torch.sum(grad ** 2)\n\n def compute_group_regularization_loss(\n self, outputs, labels, group, sample_weight=None\n ):\n # (TODO) implemented sample_weight\n result = torch.FloatTensor([0.0]).to(self.device)\n unique_groups = group.unique()\n for i, the_group in enumerate(unique_groups):\n the_penalty = self.compute_penalty(\n outputs[group == the_group], labels[group == the_group]\n )\n result = result + the_penalty\n logging.debug(f\"Group: {the_group}, Penalty: {the_penalty}\")\n return result\n\n\nclass EqualThresholdRateModel(GroupRegularizedModel):\n \"\"\"\n A model that penalizes conditional prediction parity based on a threshold(s)\n (TODO) Harmonize this interface with EqualThresholdRateModel\n \"\"\"\n\n def get_default_config(self):\n config_dict = super().get_default_config()\n update_dict = {\n \"threshold_mode\": \"conditional\",\n # \"conditional\" -> eq_odds,\n # \"conditional_pos\" -> eq_opportunity_pos, (parity in fpr/specificity)\n # \"conditional_neg\" -> eq_opportunity_neg, (parity in fnr/recall)\n # \"unconditional\" -> demographic_parity,\n \"thresholds\": [0.5],\n }\n return {**config_dict, **update_dict}\n\n def compute_group_regularization_loss(\n self, outputs, labels, group, sample_weight=None\n ):\n result = torch.FloatTensor([0.0]).to(self.device)\n outputs = F.log_softmax(outputs, dim=1)[:, -1]\n\n for threshold in self.config_dict.get(\"thresholds\", [0.5]):\n threshold = torch.FloatTensor([threshold]).to(self.device)\n threshold = torch.log(threshold)\n\n if self.config_dict[\"threshold_mode\"] == \"unconditional\":\n result = result + self.compute_group_regularization_loss_helper(\n outputs,\n labels,\n group,\n sample_weight=sample_weight if sample_weight is not None else None,\n threshold=threshold,\n )\n else:\n if self.config_dict[\"threshold_mode\"] == \"conditional\":\n unique_labels = labels.unique()\n elif self.config_dict[\"threshold_mode\"] == \"conditional_pos\":\n unique_labels = [1]\n elif self.config_dict[\"threshold_mode\"] == \"conditional_neg\":\n unique_labels = [0]\n else:\n raise ValueError(\"Invalid option provided to threshold_mode\")\n for the_label in unique_labels:\n if (labels == the_label).sum() > 0:\n result = result + self.compute_group_regularization_loss_helper(\n outputs[labels == the_label],\n labels[labels == the_label],\n group[labels == the_label],\n sample_weight=sample_weight[labels == the_label]\n if sample_weight is not None\n else None,\n threshold=threshold,\n )\n else:\n logging.debug(\"Skipping regularization due to no samples\")\n\n return result\n\n def compute_group_regularization_loss_helper(\n self,\n outputs,\n labels,\n group,\n sample_weight=None,\n threshold=None,\n surrogate=\"logistic\",\n ):\n if threshold is None:\n raise ValueError(\"Threshold can not be None\")\n\n result = torch.FloatTensor([0.0]).to(self.device)\n unique_groups = group.unique()\n surrogate_fn = get_surrogate(surrogate)\n i=1\n for i, the_group in enumerate(unique_groups):\n result = result + (\n (\n weighted_mean(\n surrogate_fn(outputs[group == the_group] - threshold),\n sample_weight[group == the_group]\n if sample_weight is not None\n else None,\n )\n - weighted_mean(surrogate_fn(outputs - threshold), sample_weight,)\n )\n ** 2\n )\n result = result / i\n return result\n\n\nclass GroupMetricRegularizedModel(GroupRegularizedModel):\n \"\"\"\n A model that minimizes the difference in a metric across groups.\n \"\"\"\n\n def get_default_config(self):\n config_dict = super().get_default_config()\n update_dict = {\n \"group_regularization_mode\": \"overall\",\n \"threshold\": 0.5,\n \"surrogate_scale\": 1.0,\n }\n return {**config_dict, **update_dict}\n\n def compute_group_regularization_loss(\n self, outputs, labels, group, sample_weight=None\n ):\n \"\"\"\n Computes the group regularization\n \"\"\"\n unique_groups = group.unique()\n result = torch.FloatTensor([0.0]).to(self.device)\n num_groups = len(unique_groups)\n if (num_groups == 1) or (labels.sum() == 0):\n warnings.warn(\n \"Skipping group regularization because either one group or no outcomes in batch\"\n )\n return result\n\n if self.config_dict[\"group_regularization_mode\"] == \"overall\":\n # Regularize discrepancy in the value of the metric on groups vs. whole population\n try:\n overall_metric = self.compute_metric(\n outputs, labels, sample_weight=sample_weight\n )\n except MetricUndefinedError:\n logging.debug(\"Warning: metric undefined\")\n return result\n\n for i, the_group in enumerate(unique_groups):\n # if labels[group == the_group].sum() > 0:\n try:\n result_group = self.compute_metric(\n outputs[group == the_group],\n labels[group == the_group],\n sample_weight=sample_weight[group == the_group]\n if sample_weight is not None\n else None,\n )\n result = result + ((result_group - overall_metric) ** 2)\n except MetricUndefinedError:\n logging.debug(\"Warning: metric undefined\")\n\n elif self.config_dict[\"group_regularization_mode\"] == \"group\":\n # Regularize discrepancy in the value of the metric between groups pairwise\n # (TODO) Add error handling from compute_metric\n for group_0, group_1 in itertools.combinations(unique_groups, 2):\n if (labels[group == group_0].sum() > 0) and (\n labels[group == group_1].sum() > 0\n ):\n result = (\n result\n + (\n (\n self.compute_metric(\n outputs[group == group_0],\n labels[group == group_0],\n sample_weight=sample_weight[group == group_0]\n if sample_weight is not None\n else None,\n )\n - self.compute_metric(\n outputs[group == group_1],\n labels[group == group_1],\n sample_weight=sample_weight[group == group_1]\n if sample_weight is not None\n else None,\n )\n )\n )\n ** 2\n )\n result = result / num_groups\n return result\n\n def compute_metric(self, outputs, labels, sample_weight=None):\n \"\"\"\n Computes the value of the metric on a batch\n \"\"\"\n raise NotImplementedError\n\n # def surrogate_fn(self, x):\n # (TODO) delete this\n # return self.sigmoid(x, surrogate_scale=self.config_dict[\"surrogate_scale\"])\n\n\nclass EqualAUCModel(GroupMetricRegularizedModel):\n \"\"\"\n A model that optimizes for equal AUC across groups\n \"\"\"\n\n def compute_metric(self, outputs, labels, sample_weight=None):\n return roc_auc_score_surrogate(\n outputs=outputs, labels=labels, sample_weight=sample_weight,\n )\n\n\nclass EqualPrecisionModel(GroupMetricRegularizedModel):\n \"\"\"\n A model that optimizes for equal precision across groups\n \"\"\"\n\n def compute_metric(self, outputs, labels, sample_weight=None):\n return precision_surrogate(\n outputs=outputs,\n labels=labels,\n sample_weight=sample_weight,\n threshold=self.config_dict.get(\"threshold\"),\n )\n\n\nclass EqualTPRModel(GroupMetricRegularizedModel):\n \"\"\"\n A model that optimizes for equal TPR across groups\n \"\"\"\n\n def compute_metric(self, outputs, labels, sample_weight=None):\n return tpr_surrogate(\n outputs=outputs,\n labels=labels,\n sample_weight=sample_weight,\n threshold=self.config_dict.get(\"threshold\"),\n )\n\n\nclass EqualFPRModel(GroupMetricRegularizedModel):\n \"\"\"\n A model that optimizes for equal TPR across groups\n \"\"\"\n\n def compute_metric(self, outputs, labels, sample_weight=None):\n return fpr_surrogate(\n outputs=outputs,\n labels=labels,\n sample_weight=sample_weight,\n threshold=self.config_dict.get(\"threshold\"),\n )\n\n\nclass EqualPositiveRateModel(GroupMetricRegularizedModel):\n \"\"\"\n A model that optimizes for equal TPR across groups\n \"\"\"\n\n def compute_metric(self, outputs, labels, sample_weight=None):\n return positive_rate_surrogate(\n outputs=outputs,\n labels=labels,\n sample_weight=sample_weight,\n threshold=self.config_dict.get(\"threshold\"),\n )\n\nclass EqualLossModel(GroupMetricRegularizedModel):\n \"\"\"\n Model regularized to have equal mean loss across groups.\n \"\"\"\n\n def compute_metric(self, outputs, labels, sample_weight=None):\n return weighted_cross_entropy_loss(\n outputs=outputs, labels=labels, sample_weight=sample_weight\n )\n\n\nclass EqualBaselinedLossModel(GroupMetricRegularizedModel):\n \"\"\"\n Model regularized to have equal mean loss across groups.\n \"\"\"\n\n def compute_metric(self, outputs, labels, sample_weight=None):\n return baselined_loss(\n outputs=outputs, labels=labels, sample_weight=sample_weight\n )\n\n\nclass EqualBrierScoreModel(GroupMetricRegularizedModel):\n \"\"\"\n Model regularized to have equal Brier score across groups.\n \"\"\"\n\n def compute_metric(self, outputs, labels, sample_weight=None):\n outputs = F.softmax(outputs, dim=1)[:, -1]\n return self.brier_score_loss(outputs, labels, sample_weight=sample_weight)\n\n def brier_score_loss(self, outputs, labels, sample_weight=None):\n return weighted_mean((outputs - labels) ** 2, sample_weight=sample_weight)\n\n\n## Bilevel optimization models ##\nclass GroupAdversarialModel(BilevelModel, FixedWidthModel):\n def __init__(self, *args, **kwargs):\n if kwargs.get(\"output_dim_discriminator\") is None:\n kwargs[\"output_dim_discriminator\"] = kwargs.get(\"num_groups\")\n print(kwargs[\"output_dim_discriminator\"])\n super().__init__(*args, **kwargs)\n\n def get_default_config(self):\n \"\"\"\n Defines default hyperparameters that may be overwritten.\n \"\"\"\n config_dict = super().get_default_config()\n update_dict = {\n \"adversarial_mode\": \"unconditional\",\n \"lr_discriminator\": 1e-3,\n \"lambda_group_regularization\": 1e-1,\n \"num_hidden_discriminator\": 1,\n \"hidden_dim_discriminator\": 32,\n \"output_dim_discriminator\": None, # specify at initialization\n \"drop_prob_discriminator\": 0.0,\n \"normalize_discriminator\": False,\n \"spectral_norm\": True,\n \"sparse\": True,\n \"sparse_mode\": \"csr\",\n \"print_grads\": False,\n }\n\n return {**config_dict, **update_dict}\n\n def print_grads(self):\n for name, param in self.models_aux[\"discriminator\"].named_parameters():\n if param.requires_grad and param.grad is not None:\n logging.info(\"{}: {}\".format(name, param.grad.mean()))\n\n def get_loss_names(self):\n return [\"loss\", \"supervised\", \"discriminator\", \"discriminator_alt\"]\n\n def init_optimizers_aux(self):\n return {\n \"discriminator\": torch.optim.Adam(\n [{\"params\": self.models_aux[\"discriminator\"].parameters()}],\n lr=self.config_dict[\"lr_discriminator\"],\n )\n }\n\n def init_models_aux(self):\n models_aux = {\n \"discriminator\": FeedforwardNet(\n in_features=self.config_dict[\"output_dim\"]\n + (1 * (self.config_dict[\"adversarial_mode\"] == \"conditional\")),\n hidden_dim_list=self.config_dict[\"num_hidden_discriminator\"]\n * [self.config_dict[\"hidden_dim_discriminator\"]],\n output_dim=self.config_dict[\"output_dim_discriminator\"],\n drop_prob=self.config_dict[\"drop_prob_discriminator\"],\n normalize=self.config_dict[\"normalize_discriminator\"],\n sparse=False,\n sparse_mode=None,\n resnet=self.config_dict.get(\"resnet\", False),\n spectral_norm=self.config_dict.get(\"spectral_norm\", False),\n )\n }\n for model in models_aux.values():\n model.apply(self.weights_init)\n model.to(self.device)\n return models_aux\n\n def get_transform_batch_keys(self):\n \"\"\"\n Returns the names of the list of tensors that sent to device\n \"\"\"\n result = super().get_transform_batch_keys()\n result = result + [\"group\"]\n\n def forward_on_batch_helper(self, the_data):\n # Run data through the model\n outputs = self.model(the_data[\"features\"])\n\n # Run data through the discriminator\n if self.config_dict[\"adversarial_mode\"] == \"conditional\":\n inputs_discriminator = torch.cat(\n (\n F.log_softmax(outputs, dim=1),\n torch.unsqueeze(the_data[\"labels\"].to(torch.float), dim=1),\n ),\n dim=1,\n )\n else:\n inputs_discriminator = F.log_softmax(outputs, dim=1)\n\n outputs_discriminator = self.models_aux[\"discriminator\"](inputs_discriminator)\n\n if self.config_dict.get(\"weighted_loss\"):\n loss_dict_batch = {\n \"supervised\": self.criterion(\n outputs, the_data[\"labels\"], sample_weight=the_data[\"weights\"]\n ),\n \"discriminator\": self.criterion(\n outputs_discriminator,\n the_data[\"group\"],\n sample_weight=the_data[\"weights\"],\n ),\n }\n else:\n loss_dict_batch = {\n \"supervised\": self.criterion(outputs, the_data[\"labels\"]),\n \"discriminator\": self.criterion(\n outputs_discriminator, the_data[\"group\"]\n ),\n }\n loss_dict_batch[\"discriminator_alt\"] = torch.exp(\n -loss_dict_batch[\"discriminator\"]\n )\n return loss_dict_batch, outputs\n\n def forward_on_batch(self, the_data):\n loss_dict_batch, outputs = self.forward_on_batch_helper(the_data)\n\n loss_dict_batch[\"loss\"] = (\n loss_dict_batch[\"supervised\"]\n - self.config_dict[\"lambda_group_regularization\"]\n * loss_dict_batch[\"discriminator\"]\n )\n\n return loss_dict_batch, outputs\n\n def update_models_aux(self, the_data):\n loss_dict_batch, _ = self.forward_on_batch_helper(the_data)\n loss_dict_batch[\"discriminator\"].backward()\n self.optimizers_aux[\"discriminator\"].step()\n\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.ones",
"torch.nn.functional.log_softmax",
"torch.sum",
"torch.tensor",
"torch.exp",
"torch.log",
"torch.FloatTensor",
"torch.autograd.grad"
]
] |
prz3m37/RandomBenchmarking
|
[
"1e7cf754e016ea248377ee88741e6999359737ae"
] |
[
"main.py"
] |
[
"from BlochSolver.SolversManager import solvers_manager\nfrom BlochSolver.Plotter import bloch_plotter as bs\nfrom BlochSolver.Perturbations.filters import Filters\nfrom BlochSolver.QuantumSolvers.rotations import rotation_handler\nfrom BlochSolver.QuantumSolvers.numerics import numerical_methods\nfrom BlochSolver.Utils.utils import Utils\nimport numpy as np\n\n\n# INFO: Perturbation algorithms are using filter-refilter stuff to avoid problems with raising edge\n\n# Here you can see example of code, solver options to try :\n# 1. default or None - basic GRAPE algorithm\n# 2. unitary - GRAPE for pulses without raising time, unitary evolution\n# 3. perturbation grape - GRAPE for pulses with raising time, non-unitary evolution\n# 4. perturbation unitary - GRAPE for pulses with no raising time, unitary evolution\n\ndef main():\n bloch_plotter = bs.BlochPlotter()\n quantum_solvers = solvers_manager.SolversManager()\n\n angles = [np.pi / 2]\n axes = [\"x\"]\n initial_state =np.array([1,0])\n granulation = 8\n cut_off_time = 0.4e-9\n\n initial_pulses = np.ones(32)* 0.002 #np.random.uniform(0.001 ,0.006, 32)#\n ideal_state, pulses = quantum_solvers.get_solver(solver_type=\"GRAPE\",\n algorithm_type=\"perturbation unitary\",\n penalty=True,\n results_path=\"./\",\n initial_pulses=initial_pulses,\n angles=angles,\n axes=axes,\n initial_state=initial_state,\n cut_off_time=cut_off_time,\n granulation=granulation)\n\n\n bloch_plotter.plot(plot_type=\"pulses\", pulses_final=pulses, pulses_init=initial_pulses)\n bloch_plotter.plot(plot_type=\"evolution\", pulses_final=pulses, init_state=initial_state, target_state=ideal_state)\n\n # Here before plotting you have to put filtered signals because plotting function with granulation option will calculate the effective pulses\n # bloch_plotter.plot(plot_type=\"evolution\", pulses_final=pulses, init_state=ideal_state, granulation=granulation, target_state=ideal_state)\n\n del quantum_solvers\n\n return\n\nif __name__ == '__main__':\n main()"
] |
[
[
"numpy.array",
"numpy.ones"
]
] |
habuimanh/HealthcareLogging
|
[
"156df5eaab71e0e067b418e3ba3f65ebd92fc5a3"
] |
[
"dataset/re_label.py"
] |
[
"import pandas as pd\nfrom datetime import datetime\nimport numpy as np\ndef read_elan_data_sets(file_path):\n data = pd.read_csv(file_path,delimiter=\"\\t\")\n return data\n\ndef read_data_sets(file_path):\n column_names = ['timestamp','x-axis', 'y-axis', 'z-axis','x1-axis', 'y1-axis', 'z1-axis']\n data = pd.read_csv(file_path,header = None, names = column_names,delimiter=\"\\t\")\n return data\n\ndef read_txt_data_sets(file_path):\n column_names = ['timestamp','x-axis', 'y-axis', 'z-axis','x1-axis', 'y1-axis', 'z1-axis']\n data = pd.read_csv(file_path,header = None, names = column_names,delimiter=r\"\\s+\")\n return data\n\nfolder_root=\"Pat_15_23_9/Recorder_2019_10_22_15_41/\"\nelan_data=read_elan_data_sets(folder_root+\"/label.txt\")\ndata=read_data_sets(folder_root+\"/data_device_2.csv\")\ntxt_data=read_txt_data_sets(folder_root+\"/data_device_2.txt\")\ne_idx=0\nactivities=[]\npat_id=[]\nadd_info=[]\nfor idx in range(len(data[\"timestamp\"])):\n if (e_idx<len(elan_data[\"End Time - ss.msec\"]) and data[\"timestamp\"][idx]>=elan_data[\"End Time - ss.msec\"][e_idx]):\n e_idx+=1\n if(e_idx>=len(elan_data[\"End Time - ss.msec\"])):\n label=\"0\"\n add_if=np.nan\n else:\n label=elan_data[\"default\"][e_idx]\n add_if=elan_data[\"NaN\"][e_idx]\n\n activities.append(label)\n add_info.append(add_if)\n pat_id.append(elan_data[\"id\"][0]) \n\ntxt_data['id']=pat_id\ntxt_data['activity']=activities\ntxt_data['NaN']=add_info\ncolumns_titles=['id','timestamp','x-axis', 'y-axis', 'z-axis','x1-axis', 'y1-axis', 'z1-axis','activity','NaN']\ntxt_data=txt_data.reindex(columns=columns_titles)\ntxt_data.to_csv(folder_root+\"/data_device_2.txt\",header=None, sep='\\t', encoding='utf-8', index=False, float_format='%.6f')"
] |
[
[
"pandas.read_csv"
]
] |
SITDIO/auto_attribute_tree
|
[
"d93bfc597b119c156322c34076bac4a5cee10a86"
] |
[
"model/spectral.py"
] |
[
"# -*- coding: utf-8 -*-\n# Based on implementations by:\n# Gael Varoquaux [email protected]\n# Brian Cheung\n# Wei LI <[email protected]>\"\"\"\nimport warnings\n\nimport numpy as np\n\nfrom sklearn.base import BaseEstimator, ClusterMixin\nfrom sklearn.utils import check_random_state, as_float_array\nfrom sklearn.utils.validation import _deprecate_positional_args\nfrom sklearn.utils.deprecation import deprecated\nfrom sklearn.metrics.pairwise import pairwise_kernels\nfrom sklearn.neighbors import kneighbors_graph, NearestNeighbors\nfrom sklearn.manifold import spectral_embedding\nfrom sklearn.cluster._kmeans import k_means\nfrom sklearn_extra.cluster import KMedoids\n\n\n@_deprecate_positional_args\ndef discretize(vectors, *, copy=True, max_svd_restarts=30, n_iter_max=20,\n random_state=None):\n \"\"\"Search for a partition matrix (clustering) which is closest to the\n eigenvector embedding.\n Parameters\n ----------\n vectors : array-like of shape (n_samples, n_clusters)\n The embedding space of the samples.\n copy : bool, default=True\n Whether to copy vectors, or perform in-place normalization.\n max_svd_restarts : int, default=30\n Maximum number of attempts to restart SVD if convergence fails\n n_iter_max : int, default=30\n Maximum number of iterations to attempt in rotation and partition\n matrix search if machine precision convergence is not reached\n random_state : int, RandomState instance, default=None\n Determines random number generation for rotation matrix initialization.\n Use an int to make the randomness deterministic.\n See :term:`Glossary <random_state>`.\n Returns\n -------\n labels : array of integers, shape: n_samples\n The labels of the clusters.\n References\n ----------\n - Multiclass spectral clustering, 2003\n Stella X. Yu, Jianbo Shi\n https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf\n Notes\n -----\n The eigenvector embedding is used to iteratively search for the\n closest discrete partition. First, the eigenvector embedding is\n normalized to the space of partition matrices. An optimal discrete\n partition matrix closest to this normalized embedding multiplied by\n an initial rotation is calculated. Fixing this discrete partition\n matrix, an optimal rotation matrix is calculated. These two\n calculations are performed until convergence. The discrete partition\n matrix is returned as the clustering solution. Used in spectral\n clustering, this method tends to be faster and more robust to random\n initialization than k-means.\n \"\"\"\n\n from scipy.sparse import csc_matrix\n from scipy.linalg import LinAlgError\n\n random_state = check_random_state(random_state)\n\n vectors = as_float_array(vectors, copy=copy)\n\n eps = np.finfo(float).eps\n n_samples, n_components = vectors.shape\n\n # Normalize the eigenvectors to an equal length of a vector of ones.\n # Reorient the eigenvectors to point in the negative direction with respect\n # to the first element. This may have to do with constraining the\n # eigenvectors to lie in a specific quadrant to make the discretization\n # search easier.\n norm_ones = np.sqrt(n_samples)\n for i in range(vectors.shape[1]):\n vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) \\\n * norm_ones\n if vectors[0, i] != 0:\n vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])\n\n # Normalize the rows of the eigenvectors. Samples should lie on the unit\n # hypersphere centered at the origin. This transforms the samples in the\n # embedding space to the space of partition matrices.\n vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]\n\n svd_restarts = 0\n has_converged = False\n\n # If there is an exception we try to randomize and rerun SVD again\n # do this max_svd_restarts times.\n while (svd_restarts < max_svd_restarts) and not has_converged:\n\n # Initialize first column of rotation matrix with a row of the\n # eigenvectors\n rotation = np.zeros((n_components, n_components))\n rotation[:, 0] = vectors[random_state.randint(n_samples), :].T\n\n # To initialize the rest of the rotation matrix, find the rows\n # of the eigenvectors that are as orthogonal to each other as\n # possible\n c = np.zeros(n_samples)\n for j in range(1, n_components):\n # Accumulate c to ensure row is as orthogonal as possible to\n # previous picks as well as current one\n c += np.abs(np.dot(vectors, rotation[:, j - 1]))\n rotation[:, j] = vectors[c.argmin(), :].T\n\n last_objective_value = 0.0\n n_iter = 0\n\n while not has_converged:\n n_iter += 1\n\n t_discrete = np.dot(vectors, rotation)\n\n labels = t_discrete.argmax(axis=1)\n vectors_discrete = csc_matrix(\n (np.ones(len(labels)), (np.arange(0, n_samples), labels)),\n shape=(n_samples, n_components))\n\n t_svd = vectors_discrete.T * vectors\n\n try:\n U, S, Vh = np.linalg.svd(t_svd)\n svd_restarts += 1\n except LinAlgError:\n print(\"SVD did not converge, randomizing and trying again\")\n break\n\n ncut_value = 2.0 * (n_samples - S.sum())\n if ((abs(ncut_value - last_objective_value) < eps) or\n (n_iter > n_iter_max)):\n has_converged = True\n else:\n # otherwise calculate rotation and continue\n last_objective_value = ncut_value\n rotation = np.dot(Vh.T, U.T)\n\n if not has_converged:\n raise LinAlgError('SVD did not converge')\n return labels\n\n\n@_deprecate_positional_args\ndef spectral_clustering_mod(affinity, *, n_clusters=8, n_components=None,\n eigen_solver=None, random_state=None, n_init=10,\n eigen_tol=0.0, assign_labels='kmeans',\n verbose=False):\n \"\"\"Modified implementation of sklearn's spectral clustering\n Apply clustering to a projection of the normalized Laplacian.\n In practice Spectral Clustering is very useful when the structure of\n the individual clusters is highly non-convex or more generally when\n a measure of the center and spread of the cluster is not a suitable\n description of the complete cluster. For instance, when clusters are\n nested circles on the 2D plane.\n If affinity is the adjacency matrix of a graph, this method can be\n used to find normalized graph cuts.\n Read more in the :ref:`User Guide <spectral_clustering>`.\n Parameters\n ----------\n affinity : {array-like, sparse matrix} of shape (n_samples, n_samples)\n The affinity matrix describing the relationship of the samples to\n embed. **Must be symmetric**.\n Possible examples:\n - adjacency matrix of a graph,\n - heat kernel of the pairwise distance matrix of the samples,\n - symmetric k-nearest neighbours connectivity matrix of the samples.\n n_clusters : int, default=None\n Number of clusters to extract.\n n_components : int, default=n_clusters\n Number of eigenvectors to use for the spectral embedding\n eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}\n The eigenvalue decomposition strategy to use. AMG requires pyamg\n to be installed. It can be faster on very large, sparse problems,\n but may also lead to instabilities. If None, then ``'arpack'`` is\n used.\n random_state : int, RandomState instance, default=None\n A pseudo random number generator used for the initialization of the\n lobpcg eigenvectors decomposition when eigen_solver == 'amg' and by\n the K-Means initialization. Use an int to make the randomness\n deterministic.\n See :term:`Glossary <random_state>`.\n n_init : int, default=10\n Number of time the k-means algorithm will be run with different\n centroid seeds. The final results will be the best output of n_init\n consecutive runs in terms of inertia. Only used if\n ``assign_labels='kmeans'``.\n eigen_tol : float, default=0.0\n Stopping criterion for eigendecomposition of the Laplacian matrix\n when using arpack eigen_solver.\n assign_labels : {'kmedoids', 'kmeans', 'discretize'}, default='kmedoids'\n The strategy to use to assign labels in the embedding\n space. There are two ways to assign labels after the Laplacian\n embedding. k-means can be applied and is a popular choice. But it can\n also be sensitive to initialization. Discretization is another\n approach which is less sensitive to random initialization. See\n the 'Multiclass spectral clustering' paper referenced below for\n more details on the discretization approach.\n verbose : bool, default=False\n Verbosity mode.\n .. versionadded:: 0.24\n Returns\n -------\n labels : array of integers, shape: n_samples\n The labels of the clusters.\n References\n ----------\n - Normalized cuts and image segmentation, 2000\n Jianbo Shi, Jitendra Malik\n http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324\n - A Tutorial on Spectral Clustering, 2007\n Ulrike von Luxburg\n http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323\n - Multiclass spectral clustering, 2003\n Stella X. Yu, Jianbo Shi\n https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf\n Notes\n -----\n The graph should contain only one connect component, elsewhere\n the results make little sense.\n This algorithm solves the normalized cut for k=2: it is a\n normalized spectral clustering.\n \"\"\"\n if assign_labels not in ('kmedoids', 'kmeans', 'discretize'):\n raise ValueError(\"The 'assign_labels' parameter should be \"\n \"'kmedoids', 'kmeans' or 'discretize', but '%s' was given\"\n % assign_labels)\n\n random_state = check_random_state(random_state)\n n_components = n_clusters if n_components is None else n_components\n\n # The first eigenvector is constant only for fully connected graphs\n # and should be kept for spectral clustering (drop_first = False)\n # See spectral_embedding documentation.\n maps = spectral_embedding(affinity, n_components=n_components,\n eigen_solver=eigen_solver,\n random_state=random_state,\n eigen_tol=eigen_tol, drop_first=False)\n if verbose:\n print(f'Computing label assignment using {assign_labels}')\n\n if assign_labels == 'kmedoids':\n cluster_centers, labels, _ = k_medoids(maps, n_clusters, random_state=random_state,\n n_init=n_init, verbose=verbose)\n elif assign_labels == 'kmeans':\n cluster_centers, labels, _ = k_means(maps, n_clusters, random_state=random_state,\n n_init=n_init, verbose=verbose)\n else:\n cluster_centers, labels = None, discretize(\n maps, random_state=random_state)\n\n return labels, maps, cluster_centers\n\n\nclass SpectralClustering(ClusterMixin, BaseEstimator):\n \"\"\"Apply clustering to a projection of the normalized Laplacian.\n In practice Spectral Clustering is very useful when the structure of\n the individual clusters is highly non-convex, or more generally when\n a measure of the center and spread of the cluster is not a suitable\n description of the complete cluster, such as when clusters are\n nested circles on the 2D plane.\n If the affinity matrix is the adjacency matrix of a graph, this method\n can be used to find normalized graph cuts.\n When calling ``fit``, an affinity matrix is constructed using either\n a kernel function such the Gaussian (aka RBF) kernel with Euclidean\n distance ``d(X, X)``::\n np.exp(-gamma * d(X,X) ** 2)\n or a k-nearest neighbors connectivity matrix.\n Alternatively, a user-provided affinity matrix can be specified by\n setting ``affinity='precomputed'``.\n Read more in the :ref:`User Guide <spectral_clustering>`.\n Parameters\n ----------\n n_clusters : int, default=8\n The dimension of the projection subspace.\n eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None\n The eigenvalue decomposition strategy to use. AMG requires pyamg\n to be installed. It can be faster on very large, sparse problems,\n but may also lead to instabilities. If None, then ``'arpack'`` is\n used.\n n_components : int, default=n_clusters\n Number of eigenvectors to use for the spectral embedding\n random_state : int, RandomState instance, default=None\n A pseudo random number generator used for the initialization of the\n lobpcg eigenvectors decomposition when ``eigen_solver='amg'`` and by\n the K-Means initialization. Use an int to make the randomness\n deterministic.\n See :term:`Glossary <random_state>`.\n n_init : int, default=10\n Number of time the k-means algorithm will be run with different\n centroid seeds. The final results will be the best output of n_init\n consecutive runs in terms of inertia. Only used if\n ``assign_labels='kmeans'``.\n gamma : float, default=1.0\n Kernel coefficient for rbf, poly, sigmoid, laplacian and chi2 kernels.\n Ignored for ``affinity='nearest_neighbors'``.\n affinity : str or callable, default='rbf'\n How to construct the affinity matrix.\n - 'nearest_neighbors': construct the affinity matrix by computing a\n graph of nearest neighbors.\n - 'rbf': construct the affinity matrix using a radial basis function\n (RBF) kernel.\n - 'precomputed': interpret ``X`` as a precomputed affinity matrix,\n where larger values indicate greater similarity between instances.\n - 'precomputed_nearest_neighbors': interpret ``X`` as a sparse graph\n of precomputed distances, and construct a binary affinity matrix\n from the ``n_neighbors`` nearest neighbors of each instance.\n - one of the kernels supported by\n :func:`~sklearn.metrics.pairwise_kernels`.\n Only kernels that produce similarity scores (non-negative values that\n increase with similarity) should be used. This property is not checked\n by the clustering algorithm.\n n_neighbors : int, default=10\n Number of neighbors to use when constructing the affinity matrix using\n the nearest neighbors method. Ignored for ``affinity='rbf'``.\n eigen_tol : float, default=0.0\n Stopping criterion for eigendecomposition of the Laplacian matrix\n when ``eigen_solver='arpack'``.\n assign_labels : {'kmeans', 'discretize'}, default='kmeans'\n The strategy for assigning labels in the embedding space. There are two\n ways to assign labels after the Laplacian embedding. k-means is a\n popular choice, but it can be sensitive to initialization.\n Discretization is another approach which is less sensitive to random\n initialization.\n degree : float, default=3\n Degree of the polynomial kernel. Ignored by other kernels.\n coef0 : float, default=1\n Zero coefficient for polynomial and sigmoid kernels.\n Ignored by other kernels.\n kernel_params : dict of str to any, default=None\n Parameters (keyword arguments) and values for kernel passed as\n callable object. Ignored by other kernels.\n n_jobs : int, default=None\n The number of parallel jobs to run when `affinity='nearest_neighbors'`\n or `affinity='precomputed_nearest_neighbors'`. The neighbors search\n will be done in parallel.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n verbose : bool, default=False\n Verbosity mode.\n .. versionadded:: 0.24\n Attributes\n ----------\n affinity_matrix_ : array-like of shape (n_samples, n_samples)\n Affinity matrix used for clustering. Available only after calling\n ``fit``.\n labels_ : ndarray of shape (n_samples,)\n Labels of each point\n Examples\n --------\n >>> from sklearn.cluster import SpectralClustering\n >>> import numpy as np\n >>> X = np.array([[1, 1], [2, 1], [1, 0],\n ... [4, 7], [3, 5], [3, 6]])\n >>> clustering = SpectralClustering(n_clusters=2,\n ... assign_labels='discretize',\n ... random_state=0).fit(X)\n >>> clustering.labels_\n array([1, 1, 1, 0, 0, 0])\n >>> clustering\n SpectralClustering(assign_labels='discretize', n_clusters=2,\n random_state=0)\n Notes\n -----\n A distance matrix for which 0 indicates identical elements and high values\n indicate very dissimilar elements can be transformed into an affinity /\n similarity matrix that is well-suited for the algorithm by\n applying the Gaussian (aka RBF, heat) kernel::\n np.exp(- dist_matrix ** 2 / (2. * delta ** 2))\n where ``delta`` is a free parameter representing the width of the Gaussian\n kernel.\n An alternative is to take a symmetric version of the k-nearest neighbors\n connectivity matrix of the points.\n If the pyamg package is installed, it is used: this greatly\n speeds up computation.\n References\n ----------\n - Normalized cuts and image segmentation, 2000\n Jianbo Shi, Jitendra Malik\n http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324\n - A Tutorial on Spectral Clustering, 2007\n Ulrike von Luxburg\n http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323\n - Multiclass spectral clustering, 2003\n Stella X. Yu, Jianbo Shi\n https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf\n \"\"\"\n @_deprecate_positional_args\n def __init__(self, n_clusters=8, *, eigen_solver=None, n_components=None,\n random_state=None, n_init=10, gamma=1., affinity='rbf',\n n_neighbors=10, eigen_tol=0.0, assign_labels='kmeans',\n degree=3, coef0=1, kernel_params=None, n_jobs=None,\n verbose=False):\n self.n_clusters = n_clusters\n self.eigen_solver = eigen_solver\n self.n_components = n_components\n self.random_state = random_state\n self.n_init = n_init\n self.gamma = gamma\n self.affinity = affinity\n self.n_neighbors = n_neighbors\n self.eigen_tol = eigen_tol\n self.assign_labels = assign_labels\n self.degree = degree\n self.coef0 = coef0\n self.kernel_params = kernel_params\n self.n_jobs = n_jobs\n self.verbose = verbose\n\n def fit(self, X, y=None):\n \"\"\"Perform spectral clustering from features, or affinity matrix.\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features) or \\\n (n_samples, n_samples)\n Training instances to cluster, similarities / affinities between\n instances if ``affinity='precomputed'``, or distances between\n instances if ``affinity='precomputed_nearest_neighbors``. If a\n sparse matrix is provided in a format other than ``csr_matrix``,\n ``csc_matrix``, or ``coo_matrix``, it will be converted into a\n sparse ``csr_matrix``.\n y : Ignored\n Not used, present here for API consistency by convention.\n Returns\n -------\n self\n \"\"\"\n X = self._validate_data(X, accept_sparse=['csr', 'csc', 'coo'],\n dtype=np.float64, ensure_min_samples=2)\n allow_squared = self.affinity in [\"precomputed\",\n \"precomputed_nearest_neighbors\"]\n if X.shape[0] == X.shape[1] and not allow_squared:\n warnings.warn(\"The spectral clustering API has changed. ``fit``\"\n \"now constructs an affinity matrix from data. To use\"\n \" a custom affinity matrix, \"\n \"set ``affinity=precomputed``.\")\n\n if self.affinity == 'nearest_neighbors':\n connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors,\n include_self=True,\n n_jobs=self.n_jobs)\n self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)\n elif self.affinity == 'precomputed_nearest_neighbors':\n estimator = NearestNeighbors(n_neighbors=self.n_neighbors,\n n_jobs=self.n_jobs,\n metric=\"precomputed\").fit(X)\n connectivity = estimator.kneighbors_graph(X=X, mode='connectivity')\n self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)\n elif self.affinity == 'precomputed':\n self.affinity_matrix_ = X\n else:\n params = self.kernel_params\n if params is None:\n params = {}\n if not callable(self.affinity):\n params['gamma'] = self.gamma\n params['degree'] = self.degree\n params['coef0'] = self.coef0\n self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,\n filter_params=True,\n **params)\n\n random_state = check_random_state(self.random_state)\n self.labels_, \\\n self.maps_, \\\n self.medoid_indices_ = spectral_clustering_mod(self.affinity_matrix_,\n n_clusters=self.n_clusters,\n n_components=self.n_components,\n eigen_solver=self.eigen_solver,\n random_state=random_state,\n n_init=self.n_init,\n eigen_tol=self.eigen_tol,\n assign_labels=self.assign_labels,\n verbose=self.verbose)\n return self\n\n def fit_predict(self, X, y=None):\n \"\"\"Perform spectral clustering from features, or affinity matrix,\n and return cluster labels.\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features) or \\\n (n_samples, n_samples)\n Training instances to cluster, similarities / affinities between\n instances if ``affinity='precomputed'``, or distances between\n instances if ``affinity='precomputed_nearest_neighbors``. If a\n sparse matrix is provided in a format other than ``csr_matrix``,\n ``csc_matrix``, or ``coo_matrix``, it will be converted into a\n sparse ``csr_matrix``.\n y : Ignored\n Not used, present here for API consistency by convention.\n Returns\n -------\n labels : ndarray of shape (n_samples,)\n Cluster labels.\n \"\"\"\n return super().fit_predict(X, y)\n\n def _more_tags(self):\n return {'pairwise': self.affinity in [\"precomputed\",\n \"precomputed_nearest_neighbors\"]}\n\n # TODO: Remove in 1.1\n # mypy error: Decorated property not supported\n @deprecated(\"Attribute _pairwise was deprecated in \" # type: ignore\n \"version 0.24 and will be removed in 1.1 (renaming of 0.26).\")\n @property\n def _pairwise(self):\n return self.affinity in [\"precomputed\",\n \"precomputed_nearest_neighbors\"]\n\n\n@_deprecate_positional_args\ndef k_medoids(X, n_clusters, *, init='k-medoids++', max_iter=300,\n random_state=None, algorithm=\"pam\", return_n_iter=False, **kwargs):\n \"\"\"TODO K-means clustering algorithm.\n Read more in the :ref:`User Guide <k_means>`.\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The observations to cluster. It must be noted that the data\n will be converted to C ordering, which will cause a memory copy\n if the given data is not C-contiguous.\n n_clusters : int\n The number of clusters to form as well as the number of\n centroids to generate.\n TODO init : {'k-medoids++', 'random'}, callable or array-like of shape \\\n (n_clusters, n_features), default='k-means++'\n Method for initialization:\n 'k-means++' : selects initial cluster centers for k-mean\n clustering in a smart way to speed up convergence. See section\n Notes in k_init for more details.\n 'random': choose `n_clusters` observations (rows) at random from data\n for the initial centroids.\n If an array is passed, it should be of shape (n_clusters, n_features)\n and gives the initial centers.\n If a callable is passed, it should take arguments X, n_clusters and a\n random state and return an initialization.\n max_iter : int, default=300\n Maximum number of iterations of the k-means algorithm to run.\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for centroid initialization. Use\n an int to make the randomness deterministic.\n See :term:`Glossary <random_state>`.\n TODO algorithm : {\"auto\", \"full\", \"elkan\"}, default=\"auto\"\n K-means algorithm to use. The classical EM-style algorithm is \"full\".\n The \"elkan\" variation is more efficient on data with well-defined\n clusters, by using the triangle inequality. However it's more memory\n intensive due to the allocation of an extra array of shape\n (n_samples, n_clusters).\n For now \"auto\" (kept for backward compatibility) chooses \"elkan\" but it\n might change in the future for a better heuristic.\n return_n_iter : bool, default=False\n Whether or not to return the number of iterations.\n Returns\n -------\n TODO\n centroid : ndarray of shape (n_clusters, n_features)\n Centroids found at the last iteration of k-means.\n label : ndarray of shape (n_samples,)\n label[i] is the code or index of the centroid the\n i'th observation is closest to.\n inertia : float\n The final value of the inertia criterion (sum of squared distances to\n the closest centroid for all observations in the training set).\n best_n_iter : int\n Number of iterations corresponding to the best results.\n Returned only if `return_n_iter` is set to True.\n \"\"\"\n est = KMedoids(\n n_clusters=n_clusters, init=init, max_iter=max_iter,\n random_state=random_state, method=algorithm\n ).fit(X)\n\n if return_n_iter:\n return est.medoid_indices_, est.labels_, est.inertia_, est.n_iter_\n else:\n return est.medoid_indices_, est.labels_, est.inertia_\n"
] |
[
[
"scipy.linalg.LinAlgError",
"numpy.dot",
"numpy.linalg.svd",
"numpy.sqrt",
"sklearn.manifold.spectral_embedding",
"sklearn.neighbors.kneighbors_graph",
"numpy.arange",
"sklearn.cluster._kmeans.k_means",
"numpy.linalg.norm",
"numpy.finfo",
"numpy.sign",
"sklearn.metrics.pairwise.pairwise_kernels",
"sklearn.utils.deprecation.deprecated",
"sklearn.neighbors.NearestNeighbors",
"numpy.zeros",
"sklearn.utils.check_random_state",
"sklearn.utils.as_float_array"
]
] |
Ruchip16/jina
|
[
"24c38a5c330453fb3ebd95f4f4f977b501b21240"
] |
[
"tests/unit/orchestrate/flow/flow-construct/test_flow_yaml_parser.py"
] |
[
"import os\nfrom pathlib import Path\n\nimport numpy as np\nimport pytest\n\nfrom jina import Executor\nfrom jina.excepts import BadYAMLVersion\nfrom jina import Flow\nfrom jina.jaml import JAML\nfrom jina.enums import GatewayProtocolType\nfrom jina.jaml.parsers import get_supported_versions\nfrom jina.parsers.flow import set_flow_parser\nfrom docarray.document.generators import from_ndarray\n\ncur_dir = Path(__file__).parent\n\n\ndef test_load_flow_from_empty_yaml():\n with open(cur_dir / 'yaml' / 'dummy-flow.yml') as fp:\n JAML.load(fp)\n\n with open(cur_dir / 'yaml' / 'dummy-flow.yml') as fp:\n Flow.load_config(fp)\n\n\ndef test_support_versions():\n assert get_supported_versions(Flow) == ['1']\n\n\ndef test_load_legacy_and_v1():\n Flow.load_config('yaml/flow-legacy-syntax.yml')\n Flow.load_config('yaml/flow-v1-syntax.yml')\n\n # this should fallback to v1\n Flow.load_config('yaml/flow-v1.0-syntax.yml')\n\n with pytest.raises(BadYAMLVersion):\n Flow.load_config('yaml/flow-v99-syntax.yml')\n\n\[email protected]\ndef test_add_needs_inspect(tmpdir):\n f1 = (\n Flow()\n .add(name='executor0', needs='gateway')\n .add(name='executor1', needs='gateway')\n .inspect()\n .needs(['executor0', 'executor1'])\n )\n with f1:\n _ = f1.index(from_ndarray(np.random.random([5, 5])), return_results=True)\n f2 = Flow.load_config('yaml/flow-v1.0-syntax.yml')\n\n with f2:\n _ = f2.index(from_ndarray(np.random.random([5, 5])), return_results=True)\n\n assert f1 == f2\n\n\ndef test_load_dump_load(tmpdir):\n \"\"\"TODO: Dumping valid yaml is out of scope of PR#1442, to do in separate PR\"\"\"\n f1 = Flow.load_config('yaml/flow-legacy-syntax.yml')\n f1.save_config(str(Path(tmpdir) / 'a0.yml'))\n f2 = Flow.load_config('yaml/flow-v1.0-syntax.yml')\n f2.save_config(str(Path(tmpdir) / 'a1.yml'))\n\n\ndef test_load_modify_dump_load(tmpdir):\n f: Flow = Flow.load_config('yaml/flow-gateway.yml')\n # assert vars inside `with`\n assert f._kwargs['name'] == 'abc'\n assert f.port_expose == 12345\n assert f.protocol == GatewayProtocolType.HTTP\n # assert executor args\n assert f._deployment_nodes['custom1'].args.uses == 'jinahub://CustomExecutor1'\n assert f._deployment_nodes['custom2'].args.uses == 'CustomExecutor2'\n assert f._deployment_nodes['custom2'].args.port_in == 23456\n\n # change args inside `with`\n f.port_expose = 12346\n f.protocol = GatewayProtocolType.WEBSOCKET\n # change executor args\n f._deployment_nodes['custom2'].args.port_in = 23457\n\n f.save_config(str(Path(tmpdir) / 'a0.yml'))\n f1: Flow = Flow.load_config(str(Path(tmpdir) / 'a0.yml'))\n\n # assert args from original yaml\n assert f1._kwargs['name'] == 'abc'\n assert 'custom1' in f1._deployment_nodes\n assert 'custom2' in f1._deployment_nodes\n assert f1._deployment_nodes['custom1'].args.uses == 'jinahub://CustomExecutor1'\n assert f1._deployment_nodes['custom2'].args.uses == 'CustomExecutor2'\n # assert args modified in code\n assert f1.port_expose == 12346\n assert f1.protocol == GatewayProtocolType.WEBSOCKET\n assert f1._deployment_nodes['custom2'].args.port_in == 23457\n\n\ndef test_dump_load_build(monkeypatch):\n f: Flow = Flow.load_config(\n '''\n jtype: Flow\n with:\n name: abc\n port_expose: 12345\n protocol: http\n executors:\n - name: executor1\n port_in: 45678\n shards: 2\n - name: executor2\n uses: docker://exec\n host: 1.2.3.4\n - name: executor3\n uses: docker://exec\n shards: 2\n '''\n ).build()\n\n f1: Flow = Flow.load_config(JAML.dump(f)).build()\n # these were passed by the user\n assert f.port_expose == f1.port_expose\n assert f.protocol == f1.protocol\n assert f['executor1'].args.port_in == f1['executor1'].args.port_in\n assert f['executor2'].args.host == f1['executor2'].args.host\n # this was set during `load_config`\n assert f['executor2'].args.port_in == f1['executor2'].args.port_in\n # gateway args are not set, if `JINA_FULL_CLI` is not set\n assert f['gateway'].args.port_in != f1['gateway'].args.port_in\n\n monkeypatch.setenv('JINA_FULL_CLI', 'true')\n f2: Flow = Flow.load_config(JAML.dump(f)).build()\n # these were passed by the user\n assert f.port_expose == f2.port_expose\n # validate gateway args (set during build)\n assert f['gateway'].args.port_in == f2['gateway'].args.port_in\n\n\ndef test_load_flow_with_port():\n f = Flow.load_config('yaml/test-flow-port.yml')\n with f:\n assert f.port_expose == 12345\n\n\ndef test_load_flow_from_cli():\n a = set_flow_parser().parse_args(['--uses', 'yaml/test-flow-port.yml'])\n f = Flow.load_config(a.uses)\n with f:\n assert f.port_expose == 12345\n\n\ndef test_load_flow_from_yaml():\n with open(cur_dir.parent.parent.parent / 'yaml' / 'test-flow.yml') as fp:\n _ = Flow.load_config(fp)\n\n\ndef test_flow_yaml_dump(tmpdir):\n f = Flow()\n f.save_config(os.path.join(str(tmpdir), 'test1.yml'))\n fl = Flow.load_config(os.path.join(str(tmpdir), 'test1.yml'))\n assert f.args.inspect == fl.args.inspect\n\n\ndef test_flow_yaml_from_string():\n f1 = Flow.load_config('yaml/flow-v1.0-syntax.yml')\n with open(str(cur_dir / 'yaml' / 'flow-v1.0-syntax.yml')) as fp:\n str_yaml = fp.read()\n assert isinstance(str_yaml, str)\n f2 = Flow.load_config(str_yaml)\n assert f1 == f2\n\n f3 = Flow.load_config(\n '!Flow\\nversion: 1.0\\ndeployments: [{name: ppp0, uses: _merge}, name: aaa1]'\n )\n assert 'ppp0' in f3._deployment_nodes.keys()\n assert 'aaa1' in f3._deployment_nodes.keys()\n assert f3.num_deployments == 2\n\n\nclass DummyEncoder(Executor):\n pass\n\n\ndef test_flow_uses_from_dict():\n d1 = {'jtype': 'DummyEncoder', 'metas': {'name': 'dummy1'}}\n with Flow().add(uses=d1):\n pass\n\n\ndef test_flow_yaml_override_with_protocol():\n from jina.enums import GatewayProtocolType\n\n path = os.path.join(\n cur_dir.parent.parent.parent, 'yaml/examples/faiss/flow-index.yml'\n )\n f1 = Flow.load_config(path)\n assert f1.protocol == GatewayProtocolType.GRPC\n f2 = Flow.load_config(path, uses_with={'protocol': 'http'})\n assert f2.protocol == GatewayProtocolType.HTTP\n f3 = Flow.load_config(path, uses_with={'protocol': 'websocket'})\n assert f3.protocol == GatewayProtocolType.WEBSOCKET\n"
] |
[
[
"numpy.random.random"
]
] |
abhinandansharma/number-plate-recognition
|
[
"e31a1bfb5b7d92199829cb30b281d37f2b4552bb"
] |
[
"DetectChars.py"
] |
[
"# DetectChars.py\nimport os\n\nimport cv2\nimport numpy as np\nimport math\nimport random\n\nimport Main\nimport Preprocess\nimport PossibleChar\n\n# module level variables ##########################################################################\n\nkNearest = cv2.ml.KNearest_create()\n\n # constants for checkIfPossibleChar, this checks one possible char only (does not compare to another char)\nMIN_PIXEL_WIDTH = 2\nMIN_PIXEL_HEIGHT = 8\n\nMIN_ASPECT_RATIO = 0.25\nMAX_ASPECT_RATIO = 1.0\n\nMIN_PIXEL_AREA = 80\n\n # constants for comparing two chars\nMIN_DIAG_SIZE_MULTIPLE_AWAY = 0.3\nMAX_DIAG_SIZE_MULTIPLE_AWAY = 5.0\n\nMAX_CHANGE_IN_AREA = 0.5\n\nMAX_CHANGE_IN_WIDTH = 0.8\nMAX_CHANGE_IN_HEIGHT = 0.2\n\nMAX_ANGLE_BETWEEN_CHARS = 12.0\n\n # other constants\nMIN_NUMBER_OF_MATCHING_CHARS = 3\n\nRESIZED_CHAR_IMAGE_WIDTH = 20\nRESIZED_CHAR_IMAGE_HEIGHT = 30\n\nMIN_CONTOUR_AREA = 100\n\n###################################################################################################\ndef loadKNNDataAndTrainKNN():\n allContoursWithData = [] # declare empty lists,\n validContoursWithData = [] # we will fill these shortly\n\n try:\n npaClassifications = np.loadtxt(\"classifications.txt\", np.float32) # read in training classifications\n except: # if file could not be opened\n print(\"error, unable to open classifications.txt, exiting program\\n\") # show error message\n os.system(\"pause\")\n return False # and return False\n # end try\n\n try:\n npaFlattenedImages = np.loadtxt(\"flattened_images.txt\", np.float32) # read in training images\n except: # if file could not be opened\n print(\"error, unable to open flattened_images.txt, exiting program\\n\") # show error message\n os.system(\"pause\")\n return False # and return False\n # end try\n\n npaClassifications = npaClassifications.reshape((npaClassifications.size, 1)) # reshape numpy array to 1d, necessary to pass to call to train\n\n kNearest.setDefaultK(1) # set default K to 1\n\n kNearest.train(npaFlattenedImages, cv2.ml.ROW_SAMPLE, npaClassifications) # train KNN object\n\n return True # if we got here training was successful so return true\n# end function\n\n###################################################################################################\ndef detectCharsInPlates(listOfPossiblePlates):\n intPlateCounter = 0\n imgContours = None\n contours = []\n\n if len(listOfPossiblePlates) == 0: # if list of possible plates is empty\n return listOfPossiblePlates # return\n # end if\n\n # at this point we can be sure the list of possible plates has at least one plate\n\n for possiblePlate in listOfPossiblePlates: # for each possible plate, this is a big for loop that takes up most of the function\n\n possiblePlate.imgGrayscale, possiblePlate.imgThresh = Preprocess.preprocess(possiblePlate.imgPlate) # preprocess to get grayscale and threshold images\n\n if Main.showSteps == True: # show steps ###################################################\n cv2.imshow(\"5a\", possiblePlate.imgPlate)\n cv2.imshow(\"5b\", possiblePlate.imgGrayscale)\n cv2.imshow(\"5c\", possiblePlate.imgThresh)\n # end if # show steps #####################################################################\n\n # increase size of plate image for easier viewing and char detection\n possiblePlate.imgThresh = cv2.resize(possiblePlate.imgThresh, (0, 0), fx = 1.6, fy = 1.6)\n\n # threshold again to eliminate any gray areas\n thresholdValue, possiblePlate.imgThresh = cv2.threshold(possiblePlate.imgThresh, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n\n if Main.showSteps == True: # show steps ###################################################\n cv2.imshow(\"5d\", possiblePlate.imgThresh)\n # end if # show steps #####################################################################\n\n # find all possible chars in the plate,\n # this function first finds all contours, then only includes contours that could be chars (without comparison to other chars yet)\n listOfPossibleCharsInPlate = findPossibleCharsInPlate(possiblePlate.imgGrayscale, possiblePlate.imgThresh)\n\n if Main.showSteps == True: # show steps ###################################################\n height, width, numChannels = possiblePlate.imgPlate.shape\n imgContours = np.zeros((height, width, 3), np.uint8)\n del contours[:] # clear the contours list\n\n for possibleChar in listOfPossibleCharsInPlate:\n contours.append(possibleChar.contour)\n # end for\n\n cv2.drawContours(imgContours, contours, -1, Main.SCALAR_WHITE)\n\n cv2.imshow(\"6\", imgContours)\n # end if # show steps #####################################################################\n\n # given a list of all possible chars, find groups of matching chars within the plate\n listOfListsOfMatchingCharsInPlate = findListOfListsOfMatchingChars(listOfPossibleCharsInPlate)\n\n if Main.showSteps == True: # show steps ###################################################\n imgContours = np.zeros((height, width, 3), np.uint8)\n del contours[:]\n\n for listOfMatchingChars in listOfListsOfMatchingCharsInPlate:\n intRandomBlue = random.randint(0, 255)\n intRandomGreen = random.randint(0, 255)\n intRandomRed = random.randint(0, 255)\n\n for matchingChar in listOfMatchingChars:\n contours.append(matchingChar.contour)\n # end for\n cv2.drawContours(imgContours, contours, -1, (intRandomBlue, intRandomGreen, intRandomRed))\n # end for\n cv2.imshow(\"7\", imgContours)\n # end if # show steps #####################################################################\n\n if (len(listOfListsOfMatchingCharsInPlate) == 0):\t\t\t# if no groups of matching chars were found in the plate\n\n if Main.showSteps == True: # show steps ###############################################\n print(\"chars found in plate number \" + str(\n intPlateCounter) + \" = (none), click on any image and press a key to continue . . .\")\n intPlateCounter = intPlateCounter + 1\n cv2.destroyWindow(\"8\")\n cv2.destroyWindow(\"9\")\n cv2.destroyWindow(\"10\")\n cv2.waitKey(0)\n # end if # show steps #################################################################\n\n possiblePlate.strChars = \"\"\n continue\t\t\t\t\t\t# go back to top of for loop\n # end if\n\n for i in range(0, len(listOfListsOfMatchingCharsInPlate)): # within each list of matching chars\n listOfListsOfMatchingCharsInPlate[i].sort(key = lambda matchingChar: matchingChar.intCenterX) # sort chars from left to right\n listOfListsOfMatchingCharsInPlate[i] = removeInnerOverlappingChars(listOfListsOfMatchingCharsInPlate[i]) # and remove inner overlapping chars\n # end for\n\n if Main.showSteps == True: # show steps ###################################################\n imgContours = np.zeros((height, width, 3), np.uint8)\n\n for listOfMatchingChars in listOfListsOfMatchingCharsInPlate:\n intRandomBlue = random.randint(0, 255)\n intRandomGreen = random.randint(0, 255)\n intRandomRed = random.randint(0, 255)\n\n del contours[:]\n\n for matchingChar in listOfMatchingChars:\n contours.append(matchingChar.contour)\n # end for\n\n cv2.drawContours(imgContours, contours, -1, (intRandomBlue, intRandomGreen, intRandomRed))\n # end for\n cv2.imshow(\"8\", imgContours)\n # end if # show steps #####################################################################\n\n # within each possible plate, suppose the longest list of potential matching chars is the actual list of chars\n intLenOfLongestListOfChars = 0\n intIndexOfLongestListOfChars = 0\n\n # loop through all the vectors of matching chars, get the index of the one with the most chars\n for i in range(0, len(listOfListsOfMatchingCharsInPlate)):\n if len(listOfListsOfMatchingCharsInPlate[i]) > intLenOfLongestListOfChars:\n intLenOfLongestListOfChars = len(listOfListsOfMatchingCharsInPlate[i])\n intIndexOfLongestListOfChars = i\n # end if\n # end for\n\n # suppose that the longest list of matching chars within the plate is the actual list of chars\n longestListOfMatchingCharsInPlate = listOfListsOfMatchingCharsInPlate[intIndexOfLongestListOfChars]\n\n if Main.showSteps == True: # show steps ###################################################\n imgContours = np.zeros((height, width, 3), np.uint8)\n del contours[:]\n\n for matchingChar in longestListOfMatchingCharsInPlate:\n contours.append(matchingChar.contour)\n # end for\n\n cv2.drawContours(imgContours, contours, -1, Main.SCALAR_WHITE)\n\n cv2.imshow(\"9\", imgContours)\n # end if # show steps #####################################################################\n\n possiblePlate.strChars = recognizeCharsInPlate(possiblePlate.imgThresh, longestListOfMatchingCharsInPlate)\n\n if Main.showSteps == True: # show steps ###################################################\n print(\"chars found in plate number \" + str(\n intPlateCounter) + \" = \" + possiblePlate.strChars + \", click on any image and press a key to continue . . .\")\n intPlateCounter = intPlateCounter + 1\n cv2.waitKey(0)\n # end if # show steps #####################################################################\n\n # end of big for loop that takes up most of the function\n\n if Main.showSteps == True:\n print(\"\\nchar detection complete, click on any image and press a key to continue . . .\\n\")\n cv2.waitKey(0)\n # end if\n\n return listOfPossiblePlates\n# end function\n\n###################################################################################################\ndef findPossibleCharsInPlate(imgGrayscale, imgThresh):\n listOfPossibleChars = [] # this will be the return value\n contours = []\n imgThreshCopy = imgThresh.copy()\n\n # find all contours in plate\n contours, npaHierarchy = cv2.findContours(imgThreshCopy, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n\n for contour in contours: # for each contour\n possibleChar = PossibleChar.PossibleChar(contour)\n\n if checkIfPossibleChar(possibleChar): # if contour is a possible char, note this does not compare to other chars (yet) . . .\n listOfPossibleChars.append(possibleChar) # add to list of possible chars\n # end if\n # end if\n\n return listOfPossibleChars\n# end function\n\n###################################################################################################\ndef checkIfPossibleChar(possibleChar):\n # this function is a 'first pass' that does a rough check on a contour to see if it could be a char,\n # note that we are not (yet) comparing the char to other chars to look for a group\n if (possibleChar.intBoundingRectArea > MIN_PIXEL_AREA and\n possibleChar.intBoundingRectWidth > MIN_PIXEL_WIDTH and possibleChar.intBoundingRectHeight > MIN_PIXEL_HEIGHT and\n MIN_ASPECT_RATIO < possibleChar.fltAspectRatio and possibleChar.fltAspectRatio < MAX_ASPECT_RATIO):\n return True\n else:\n return False\n # end if\n# end function\n\n###################################################################################################\ndef findListOfListsOfMatchingChars(listOfPossibleChars):\n # with this function, we start off with all the possible chars in one big list\n # the purpose of this function is to re-arrange the one big list of chars into a list of lists of matching chars,\n # note that chars that are not found to be in a group of matches do not need to be considered further\n listOfListsOfMatchingChars = [] # this will be the return value\n\n for possibleChar in listOfPossibleChars: # for each possible char in the one big list of chars\n listOfMatchingChars = findListOfMatchingChars(possibleChar, listOfPossibleChars) # find all chars in the big list that match the current char\n\n listOfMatchingChars.append(possibleChar) # also add the current char to current possible list of matching chars\n\n if len(listOfMatchingChars) < MIN_NUMBER_OF_MATCHING_CHARS: # if current possible list of matching chars is not long enough to constitute a possible plate\n continue # jump back to the top of the for loop and try again with next char, note that it's not necessary\n # to save the list in any way since it did not have enough chars to be a possible plate\n # end if\n\n # if we get here, the current list passed test as a \"group\" or \"cluster\" of matching chars\n listOfListsOfMatchingChars.append(listOfMatchingChars) # so add to our list of lists of matching chars\n\n listOfPossibleCharsWithCurrentMatchesRemoved = []\n\n # remove the current list of matching chars from the big list so we don't use those same chars twice,\n # make sure to make a new big list for this since we don't want to change the original big list\n listOfPossibleCharsWithCurrentMatchesRemoved = list(set(listOfPossibleChars) - set(listOfMatchingChars))\n\n recursiveListOfListsOfMatchingChars = findListOfListsOfMatchingChars(listOfPossibleCharsWithCurrentMatchesRemoved) # recursive call\n\n for recursiveListOfMatchingChars in recursiveListOfListsOfMatchingChars: # for each list of matching chars found by recursive call\n listOfListsOfMatchingChars.append(recursiveListOfMatchingChars) # add to our original list of lists of matching chars\n # end for\n\n break # exit for\n\n # end for\n\n return listOfListsOfMatchingChars\n# end function\n\n###################################################################################################\ndef findListOfMatchingChars(possibleChar, listOfChars):\n # the purpose of this function is, given a possible char and a big list of possible chars,\n # find all chars in the big list that are a match for the single possible char, and return those matching chars as a list\n listOfMatchingChars = [] # this will be the return value\n\n for possibleMatchingChar in listOfChars: # for each char in big list\n if possibleMatchingChar == possibleChar: # if the char we attempting to find matches for is the exact same char as the char in the big list we are currently checking\n # then we should not include it in the list of matches b/c that would end up double including the current char\n continue # so do not add to list of matches and jump back to top of for loop\n # end if\n # compute stuff to see if chars are a match\n fltDistanceBetweenChars = distanceBetweenChars(possibleChar, possibleMatchingChar)\n\n fltAngleBetweenChars = angleBetweenChars(possibleChar, possibleMatchingChar)\n\n fltChangeInArea = float(abs(possibleMatchingChar.intBoundingRectArea - possibleChar.intBoundingRectArea)) / float(possibleChar.intBoundingRectArea)\n\n fltChangeInWidth = float(abs(possibleMatchingChar.intBoundingRectWidth - possibleChar.intBoundingRectWidth)) / float(possibleChar.intBoundingRectWidth)\n fltChangeInHeight = float(abs(possibleMatchingChar.intBoundingRectHeight - possibleChar.intBoundingRectHeight)) / float(possibleChar.intBoundingRectHeight)\n\n # check if chars match\n if (fltDistanceBetweenChars < (possibleChar.fltDiagonalSize * MAX_DIAG_SIZE_MULTIPLE_AWAY) and\n fltAngleBetweenChars < MAX_ANGLE_BETWEEN_CHARS and\n fltChangeInArea < MAX_CHANGE_IN_AREA and\n fltChangeInWidth < MAX_CHANGE_IN_WIDTH and\n fltChangeInHeight < MAX_CHANGE_IN_HEIGHT):\n\n listOfMatchingChars.append(possibleMatchingChar) # if the chars are a match, add the current char to list of matching chars\n # end if\n # end for\n\n return listOfMatchingChars # return result\n# end function\n\n###################################################################################################\n# use Pythagorean theorem to calculate distance between two chars\ndef distanceBetweenChars(firstChar, secondChar):\n intX = abs(firstChar.intCenterX - secondChar.intCenterX)\n intY = abs(firstChar.intCenterY - secondChar.intCenterY)\n\n return math.sqrt((intX ** 2) + (intY ** 2))\n# end function\n\n###################################################################################################\n# use basic trigonometry (SOH CAH TOA) to calculate angle between chars\ndef angleBetweenChars(firstChar, secondChar):\n fltAdj = float(abs(firstChar.intCenterX - secondChar.intCenterX))\n fltOpp = float(abs(firstChar.intCenterY - secondChar.intCenterY))\n\n if fltAdj != 0.0: # check to make sure we do not divide by zero if the center X positions are equal, float division by zero will cause a crash in Python\n fltAngleInRad = math.atan(fltOpp / fltAdj) # if adjacent is not zero, calculate angle\n else:\n fltAngleInRad = 1.5708 # if adjacent is zero, use this as the angle, this is to be consistent with the C++ version of this program\n # end if\n\n fltAngleInDeg = fltAngleInRad * (180.0 / math.pi) # calculate angle in degrees\n\n return fltAngleInDeg\n# end function\n\n###################################################################################################\n# if we have two chars overlapping or to close to each other to possibly be separate chars, remove the inner (smaller) char,\n# this is to prevent including the same char twice if two contours are found for the same char,\n# for example for the letter 'O' both the inner ring and the outer ring may be found as contours, but we should only include the char once\ndef removeInnerOverlappingChars(listOfMatchingChars):\n listOfMatchingCharsWithInnerCharRemoved = list(listOfMatchingChars) # this will be the return value\n\n for currentChar in listOfMatchingChars:\n for otherChar in listOfMatchingChars:\n if currentChar != otherChar: # if current char and other char are not the same char . . .\n # if current char and other char have center points at almost the same location . . .\n if distanceBetweenChars(currentChar, otherChar) < (currentChar.fltDiagonalSize * MIN_DIAG_SIZE_MULTIPLE_AWAY):\n # if we get in here we have found overlapping chars\n # next we identify which char is smaller, then if that char was not already removed on a previous pass, remove it\n if currentChar.intBoundingRectArea < otherChar.intBoundingRectArea: # if current char is smaller than other char\n if currentChar in listOfMatchingCharsWithInnerCharRemoved: # if current char was not already removed on a previous pass . . .\n listOfMatchingCharsWithInnerCharRemoved.remove(currentChar) # then remove current char\n # end if\n else: # else if other char is smaller than current char\n if otherChar in listOfMatchingCharsWithInnerCharRemoved: # if other char was not already removed on a previous pass . . .\n listOfMatchingCharsWithInnerCharRemoved.remove(otherChar) # then remove other char\n # end if\n # end if\n # end if\n # end if\n # end for\n # end for\n\n return listOfMatchingCharsWithInnerCharRemoved\n# end function\n\n###################################################################################################\n# this is where we apply the actual char recognition\ndef recognizeCharsInPlate(imgThresh, listOfMatchingChars):\n strChars = \"\" # this will be the return value, the chars in the lic plate\n\n height, width = imgThresh.shape\n\n imgThreshColor = np.zeros((height, width, 3), np.uint8)\n\n listOfMatchingChars.sort(key = lambda matchingChar: matchingChar.intCenterX) # sort chars from left to right\n\n cv2.cvtColor(imgThresh, cv2.COLOR_GRAY2BGR, imgThreshColor) # make color version of threshold image so we can draw contours in color on it\n\n for currentChar in listOfMatchingChars: # for each char in plate\n pt1 = (currentChar.intBoundingRectX, currentChar.intBoundingRectY)\n pt2 = ((currentChar.intBoundingRectX + currentChar.intBoundingRectWidth), (currentChar.intBoundingRectY + currentChar.intBoundingRectHeight))\n\n cv2.rectangle(imgThreshColor, pt1, pt2, Main.SCALAR_GREEN, 2) # draw green box around the char\n\n # crop char out of threshold image\n imgROI = imgThresh[currentChar.intBoundingRectY : currentChar.intBoundingRectY + currentChar.intBoundingRectHeight,\n currentChar.intBoundingRectX : currentChar.intBoundingRectX + currentChar.intBoundingRectWidth]\n\n imgROIResized = cv2.resize(imgROI, (RESIZED_CHAR_IMAGE_WIDTH, RESIZED_CHAR_IMAGE_HEIGHT)) # resize image, this is necessary for char recognition\n\n npaROIResized = imgROIResized.reshape((1, RESIZED_CHAR_IMAGE_WIDTH * RESIZED_CHAR_IMAGE_HEIGHT)) # flatten image into 1d numpy array\n\n npaROIResized = np.float32(npaROIResized) # convert from 1d numpy array of ints to 1d numpy array of floats\n\n retval, npaResults, neigh_resp, dists = kNearest.findNearest(npaROIResized, k = 1) # finally we can call findNearest !!!\n\n strCurrentChar = str(chr(int(npaResults[0][0]))) # get character from results\n\n strChars = strChars + strCurrentChar # append current char to full string\n\n # end for\n\n if Main.showSteps == True: # show steps #######################################################\n cv2.imshow(\"10\", imgThreshColor)\n # end if # show steps #########################################################################\n\n return strChars\n# end function"
] |
[
[
"numpy.float32",
"numpy.zeros",
"numpy.loadtxt"
]
] |
zaxmks/demo-data-compliance-service
|
[
"372e612c570aaf5b512bec17627f825e880add67"
] |
[
"src/tests/sources/test_structured_data_source.py"
] |
[
"import pytest\n\nimport pandas as pd\n\nfrom src.mapping.columns.column_relation import ColumnRelation\nfrom src.mapping.rows.row_mapping_configuration import RowMappingConfiguration\nfrom src.mapping.rows.row_relation import RowRelation\nfrom src.mapping.values.value_matching_configuration import ValueMatchingConfiguration\nfrom src.sources.data_source import DataSource\nfrom src.sources.structured_data_source import StructuredDataSource\n\n\ndef get_sample_directory_df():\n df = pd.DataFrame()\n df[\"name\"] = [\n \"Michael Connell\",\n \"Maura Caslin\",\n \"Steve Jordan\",\n \"Matthew Whitaker\",\n \"Bob Hope\",\n ]\n return df\n\n\ndef get_test_source():\n data = get_sample_directory_df()\n return StructuredDataSource(data, \"test_name\")\n\n\ndef test_init():\n sds = get_test_source()\n assert sds.column_relations == []\n assert sds.row_relations == []\n\n\ndef test_append_column_when_correct_length():\n sds = get_test_source()\n sds.append_column(data_to_append=[\"a\", \"b\", \"c\", \"d\", \"e\"], column_name=\"letters\")\n assert \"letters\" in sds.data.columns\n assert list(sds.data[\"letters\"].values) == [\"a\", \"b\", \"c\", \"d\", \"e\"]\n\n\ndef test_append_column_when_incorrect_length():\n sds = get_test_source()\n with pytest.raises(Exception):\n sds.append_column(data_to_append=[\"a\", \"b\"], column_name=\"letters\")\n\n\ndef test_append_column_relation_when_correct_type():\n sds = get_test_source()\n cr = ColumnRelation(\"test_source\", \"test_source_column\", \"test_target_column\", 1.0)\n sds.append_column_relation(cr)\n assert sds.column_relations == [cr]\n\n\ndef test_append_column_relation_when_incorrect_type():\n sds = get_test_source()\n with pytest.raises(Exception):\n sds.append_column_relation(\"invalid_type\")\n\n\ndef test_append_row_relation_when_correct_type():\n sds = get_test_source()\n rr = RowRelation(\"test_source\", 0, 1, 1.0)\n sds.append_row_relation(rr)\n assert sds.row_relations == [rr]\n\n\ndef test_append_row_relation_when_incorrect_type():\n sds = get_test_source()\n with pytest.raises(Exception):\n sds.append_row_relation(\"invalid_type\")\n\n\ndef test_create_column_relation():\n sds = get_test_source()\n sds.create_column_relation(\"source_column\", \"target_column\", \"target_source\")\n assert len(sds.column_relations) == 1\n assert isinstance(sds.column_relations[0], ColumnRelation)\n assert sds.column_relations\n\n\ndef test_get_column_relations():\n sds = get_test_source()\n sds.column_relations = [1, 2, 3]\n assert sds.get_column_relations() == [1, 2, 3]\n\n\ndef test_relate_columns_to():\n ds_source = DataSource(\"src/tests/test_data/sample/names.csv\")\n ds_target = DataSource(\"src/tests/test_data/sample/names.csv\")\n matching_config = ValueMatchingConfiguration(model_type=\"exact\")\n ds_source.relate_columns_to(ds_target, mapping_configuration=matching_config)\n assert len(ds_source.column_relations) == 1\n assert ds_source.column_relations[0].target_data_source == ds_target\n assert ds_source.column_relations[0].source_column_name == \"name\"\n assert ds_source.column_relations[0].target_column_name == \"name\"\n assert ds_source.column_relations[0].confidence == 1.0\n\n\ndef test_map_rows_to():\n ds_source = DataSource(\"src/tests/test_data/sample/names.csv\")\n ds_target = DataSource(\"src/tests/test_data/sample/names.csv\")\n ds_source.create_column_relation(\"name\", \"name\", ds_target)\n value_matching_config = ValueMatchingConfiguration(model_type=\"exact\")\n row_mapping_config = RowMappingConfiguration(\n model_type=\"weighted_linear\", weights={\"name\": 1}\n )\n ds_source.map_rows_to(ds_target, value_matching_config, row_mapping_config)\n assert len(ds_source.row_relations) == 252 # Duplicate record present, hence +2\n\n\ndef test_describe_row_relation_for_index():\n ds_source = DataSource(\"src/tests/test_data/sample/names.csv\")\n ds_target = DataSource(\"src/tests/test_data/sample/names.csv\")\n ds_source.create_column_relation(\"name\", \"name\", ds_target)\n description = ds_source.describe_row_relation_for_index(0)\n assert description == '{\"name\": \"Soo Hong\"}'\n\n\ndef test_get_column():\n sds = get_test_source()\n ref_df = get_sample_directory_df()\n assert list(sds.get_column(\"name\")) == list(ref_df[\"name\"].values)\n"
] |
[
[
"pandas.DataFrame"
]
] |
fmi-basel/dl-utils
|
[
"15bbb7672314d72abc7c3f7fc86655401cde5eb6"
] |
[
"dlutils/losses/weighted_losses.py"
] |
[
"from tensorflow.keras import backend as K\nimport tensorflow as tf\nimport abc\n\n\nclass PixelWeightedLossBase(tf.keras.losses.Loss):\n def call(self, y_true, y_pred):\n '''Computes pixel weighted loss, using the last channel of y_true as \n normalization weights\n \n Notes:\n ------\n - sum of weights == 1 is expected\n - implemented as channels_last\n '''\n\n # extract pre-computed normalization channel\n self._check_weights_stacking(y_true, y_pred)\n weights = y_true[..., -1:]\n y_true = y_true[..., 0:-1]\n\n loss = self._pixel_loss(y_true, y_pred)\n\n # sum loss over spatial and channel dims, mean over batch\n loss = tf.math.reduce_sum(loss * weights,\n axis=tuple(range(1, len(y_pred.shape))))\n return tf.math.reduce_mean(loss)\n\n def _check_weights_stacking(self, y_true, y_pred):\n if y_true.shape[-1] != y_pred.shape[-1] + 1:\n raise ValueError(\n 'Weights incorrectly stacked. Expected y_true with an extra channel for the weights got y_true, y_pred shapes: {} and {}'\n .format(y_true.shape, y_pred.shape))\n\n @abc.abstractmethod\n def _pixel_loss(self, y_true, y_pred):\n pass\n\n\nclass PixelWeightedL1Loss(PixelWeightedLossBase):\n def _pixel_loss(self, y_true, y_pred):\n return K.abs(y_pred - y_true)\n\n\nclass PixelWeightedMSE(PixelWeightedLossBase):\n def _pixel_loss(self, y_true, y_pred):\n return K.square(y_pred - y_true)\n\n\nclass PixelWeightedBinaryCrossentropy(PixelWeightedLossBase):\n def __init__(self, from_logits=False):\n super().__init__()\n self.from_logits = from_logits\n\n def _pixel_loss(self, y_true, y_pred):\n return K.binary_crossentropy(y_true,\n y_pred,\n from_logits=self.from_logits)\n"
] |
[
[
"tensorflow.keras.backend.binary_crossentropy",
"tensorflow.keras.backend.square",
"tensorflow.math.reduce_mean",
"tensorflow.keras.backend.abs"
]
] |
SeuTao/iclr-2021-factored-attention
|
[
"d189e3be2f8b2f27229a528f9f36b1c7ac072cd7"
] |
[
"train.py"
] |
[
"from argparse import ArgumentParser\nimport os\nfrom pathlib import Path\nimport random\nimport string\nimport io\n\nimport matplotlib.pyplot as plt\nimport pytorch_lightning as pl\nimport torch\nimport wandb\nimport boto3\n\nfrom mogwai.data_loading import MSADataModule, MSDataModule\nfrom mogwai.parsing import read_contacts\nfrom mogwai import models\nfrom mogwai.utils.functional import apc\nfrom mogwai.metrics import contact_auc\nfrom mogwai.plotting import (\n plot_colored_preds_on_trues,\n plot_precision_vs_length,\n)\nfrom mogwai.vocab import FastaVocab\n\nfrom loggers import WandbLoggerFrozenVal\n\ns3_client = boto3.client(\"s3\")\ns3_bucket = \"proteindata\"\n\n\ndef train():\n # Initialize parser\n parser = ArgumentParser()\n parser.add_argument(\n \"--model\",\n default=\"gremlin\",\n choices=models.MODELS.keys(),\n help=\"Which model to train.\",\n )\n parser.add_argument(\n \"--train_unaligned\",\n action=\"store_true\",\n help=\"Whether to train unaligned instead.\",\n )\n model_name = parser.parse_known_args()[0].model\n train_unaligned = parser.parse_known_args()[0].train_unaligned\n parser.add_argument(\n \"--save_model_s3\",\n action=\"store_true\",\n help=\"Whether to save the model state dict.\",\n )\n parser.add_argument(\n \"--wandb_project\",\n type=str,\n default=\"iclr2021-rebuttal\",\n help=\"W&B project used for logging.\",\n )\n parser.add_argument(\n \"--pdb\",\n type=str,\n help=\"PDB id for training\",\n )\n\n if train_unaligned:\n parser = MSDataModule.add_args(parser)\n else:\n parser = MSADataModule.add_args(parser)\n\n parser = pl.Trainer.add_argparse_args(parser)\n parser.set_defaults(\n gpus=1,\n min_steps=50,\n max_steps=1000,\n log_every_n_steps=10,\n )\n model_type = models.get(model_name)\n model_type.add_args(parser)\n args = parser.parse_args()\n\n # Modify name\n pdb = args.pdb\n args.data = \"data/npz/\" + pdb + \".npz\"\n\n # Load ms(a)\n if train_unaligned:\n msa_dm = MSDataModule.from_args(args)\n else:\n msa_dm = MSADataModule.from_args(args)\n msa_dm.setup()\n\n # Load contacts\n true_contacts = torch.from_numpy(read_contacts(args.data))\n\n # Initialize model\n num_seqs, msa_length, msa_counts = msa_dm.get_stats()\n model = model_type.from_args(\n args,\n num_seqs=num_seqs,\n msa_length=msa_length,\n msa_counts=msa_counts,\n vocab_size=len(FastaVocab),\n pad_idx=FastaVocab.pad_idx,\n true_contacts=true_contacts,\n )\n\n kwargs = {}\n randstring = \"\".join(random.choice(string.ascii_lowercase) for i in range(6))\n run_name = \"_\".join([args.model, pdb, randstring])\n logger = WandbLoggerFrozenVal(project=args.wandb_project, name=run_name)\n logger.log_hyperparams(args)\n logger.log_hyperparams(\n {\n \"pdb\": pdb,\n \"num_seqs\": num_seqs,\n \"msa_length\": msa_length,\n }\n )\n kwargs[\"logger\"] = logger\n\n # Initialize Trainer\n trainer = pl.Trainer.from_argparse_args(args, checkpoint_callback=False, **kwargs)\n\n trainer.fit(model, msa_dm)\n\n # Log and print some metrics after training.\n contacts = model.get_contacts()\n apc_contacts = apc(contacts)\n\n auc = contact_auc(contacts, true_contacts).item()\n auc_apc = contact_auc(apc_contacts, true_contacts).item()\n print(f\"AUC: {auc:0.3f}, AUC_APC: {auc_apc:0.3f}\")\n\n filename = \"top_L_contacts.png\"\n plot_colored_preds_on_trues(contacts, true_contacts, point_size=5, cutoff=1)\n plt.title(f\"Top L no APC {model.get_precision(do_apc=False)}\")\n logger.log_metrics({filename: wandb.Image(plt)})\n plt.close()\n\n filename = \"top_L_contacts_apc.png\"\n plot_colored_preds_on_trues(apc_contacts, true_contacts, point_size=5, cutoff=1)\n plt.title(f\"Top L APC {model.get_precision(do_apc=True)}\")\n logger.log_metrics({filename: wandb.Image(plt)})\n plt.close()\n\n filename = \"top_L_5_contacts.png\"\n plot_colored_preds_on_trues(contacts, true_contacts, point_size=5, cutoff=5)\n plt.title(f\"Top L/5 no APC {model.get_precision(do_apc=False, cutoff=5)}\")\n logger.log_metrics({filename: wandb.Image(plt)})\n plt.close()\n\n filename = \"top_L_5_contacts_apc.png\"\n plot_colored_preds_on_trues(apc_contacts, true_contacts, point_size=5, cutoff=5)\n plt.title(f\"Top L/5 APC {model.get_precision(do_apc=True, cutoff=5)}\")\n logger.log_metrics({filename: wandb.Image(plt)})\n plt.close()\n\n filename = \"precision_vs_L.png\"\n plot_precision_vs_length(apc_contacts, true_contacts)\n logger.log_metrics({filename: wandb.Image(plt)})\n plt.close()\n\n if args.save_model_s3:\n bytestream = io.BytesIO()\n torch.save(model.state_dict(), bytestream)\n bytestream.seek(0)\n key = os.path.join(\n \"iclr-2021-factored-attention\", wandb.run.path, \"model_state_dict.h5\"\n )\n response = s3_client.put_object(\n Bucket=s3_bucket, Body=bytestream, Key=key, ACL=\"public-read\"\n )\n print(f\"uploaded state dict to s3://{s3_bucket}/{key}\")\n\n\nif __name__ == \"__main__\":\n train()\n"
] |
[
[
"matplotlib.pyplot.close"
]
] |
acrellin/cesium
|
[
"9d33edc0f9b3a79c68070826c0f390896abe294d"
] |
[
"cesium/features/tests/test_graphs.py"
] |
[
"import os\nimport numpy as np\nimport numpy.testing as npt\n\nfrom cesium import data_management\nfrom cesium.features import graphs\nfrom cesium.features.tests.util import generate_features\n\n\n# Fixed set of features w/ known values\nSCIENCE_FEATS = graphs.GENERAL_FEATS + graphs.LOMB_SCARGLE_FEATS\n\n\ndef test_feature_generation():\n \"\"\"Compare generated features to reference values.\"\"\"\n this_dir = os.path.join(os.path.dirname(__file__))\n test_files = [\n os.path.join(this_dir, 'data/257141.dat'),\n os.path.join(this_dir, 'data/245486.dat'),\n os.path.join(this_dir, 'data/247327.dat'),\n ]\n features_extracted = None\n values_computed = None\n for i, ts_data_file_path in enumerate(test_files):\n t, m, e = data_management.parse_ts_data(ts_data_file_path)\n features = generate_features(t, m, e, SCIENCE_FEATS)\n sorted_features = sorted(features.items())\n if features_extracted is None:\n features_extracted = [f[0] for f in sorted_features]\n values_computed = np.zeros((len(test_files),\n len(features_extracted)))\n values_computed[i,:] = [f[1] for f in sorted_features]\n\n def features_from_csv(filename):\n with open(filename) as f:\n feature_names = f.readline().strip().split(\",\")\n feature_values = np.loadtxt(f, delimiter=',')\n\n return feature_names, feature_values\n\n this_dir = os.path.join(os.path.dirname(__file__))\n features_expected, values_expected = features_from_csv(\n os.path.join(this_dir, \"data/expected_features.csv\"))\n\n npt.assert_equal(features_extracted, features_expected)\n npt.assert_array_almost_equal(values_computed, values_expected)\n"
] |
[
[
"numpy.testing.assert_equal",
"numpy.loadtxt",
"numpy.testing.assert_array_almost_equal"
]
] |
vivek-a81/Custom-DeTr
|
[
"54aa3a924e8f9d76e743630a5b5fd5c20f49e8cc"
] |
[
"detr/detect_pan.py"
] |
[
"\r\nimport argparse\r\nimport json\r\nimport os\r\nimport time\r\nfrom io import BytesIO\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport torch\r\nimport torchvision.transforms as T\r\nfrom detectron2.data import DatasetCatalog, MetadataCatalog\r\nfrom detectron2.utils.visualizer import Visualizer\r\nfrom panopticapi.utils import rgb2id\r\nfrom PIL import Image\r\n\r\nfrom models import build_model\r\n\r\nnp.random.seed(111)\r\n\r\ndef get_args_parser():\r\n parser = argparse.ArgumentParser('Set transformer detector', add_help=False)\r\n parser.add_argument('--lr', default=1e-4, type=float)\r\n parser.add_argument('--lr_backbone', default=1e-5, type=float)\r\n parser.add_argument('--batch_size', default=6, type=int)\r\n parser.add_argument('--weight_decay', default=1e-4, type=float)\r\n parser.add_argument('--epochs', default=300, type=int)\r\n parser.add_argument('--lr_drop', default=200, type=int)\r\n parser.add_argument('--clip_max_norm', default=0.1, type=float,\r\n help='gradient clipping max norm')\r\n \r\n # Model parameters\r\n parser.add_argument('--frozen_weights', type=str, default=None,\r\n help=\"Path to the pretrained model. If set, only the mask head will be trained\")\r\n # * Backbone\r\n parser.add_argument('--backbone', default='resnet50', type=str,\r\n help=\"Name of the convolutional backbone to use\")\r\n parser.add_argument('--dilation', action='store_true',\r\n help=\"If true, we replace stride with dilation in the last convolutional block (DC5)\")\r\n parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),\r\n help=\"Type of positional embedding to use on top of the image features\")\r\n\r\n # * Transformer\r\n parser.add_argument('--enc_layers', default=6, type=int,\r\n help=\"Number of encoding layers in the transformer\")\r\n parser.add_argument('--dec_layers', default=6, type=int,\r\n help=\"Number of decoding layers in the transformer\")\r\n parser.add_argument('--dim_feedforward', default=2048, type=int,\r\n help=\"Intermediate size of the feedforward layers in the transformer blocks\")\r\n parser.add_argument('--hidden_dim', default=256, type=int,\r\n help=\"Size of the embeddings (dimension of the transformer)\")\r\n parser.add_argument('--dropout', default=0.1, type=float,\r\n help=\"Dropout applied in the transformer\")\r\n parser.add_argument('--nheads', default=8, type=int,\r\n help=\"Number of attention heads inside the transformer's attentions\")\r\n parser.add_argument('--num_queries', default=25, type=int,\r\n help=\"Number of query slots\")\r\n parser.add_argument('--pre_norm', action='store_true')\r\n\r\n # * Segmentation\r\n parser.add_argument('--masks', action='store_true',\r\n help=\"Train segmentation head if the flag is provided\")\r\n\r\n # # Loss\r\n parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',\r\n help=\"Disables auxiliary decoding losses (loss at each layer)\")\r\n # * Matcher\r\n parser.add_argument('--set_cost_class', default=1, type=float,\r\n help=\"Class coefficient in the matching cost\")\r\n parser.add_argument('--set_cost_bbox', default=5, type=float,\r\n help=\"L1 box coefficient in the matching cost\")\r\n parser.add_argument('--set_cost_giou', default=2, type=float,\r\n help=\"giou box coefficient in the matching cost\")\r\n # * Loss coefficients\r\n parser.add_argument('--mask_loss_coef', default=1, type=float)\r\n parser.add_argument('--dice_loss_coef', default=1, type=float)\r\n parser.add_argument('--bbox_loss_coef', default=5, type=float)\r\n parser.add_argument('--giou_loss_coef', default=2, type=float)\r\n parser.add_argument('--eos_coef', default=0.1, type=float,\r\n help=\"Relative classification weight of the no-object class\")\r\n\r\n # dataset parameters\r\n parser.add_argument('--dataset_file', default='coco')\r\n parser.add_argument('--data_path', default='data.test.json', type=str)\r\n parser.add_argument('--data_panoptic_path', type=str)\r\n parser.add_argument('--remove_difficult', action='store_true')\r\n\r\n parser.add_argument('--output_dir', default='',\r\n help='path where to save the results, empty for no saving')\r\n parser.add_argument('--device', default='cuda',\r\n help='device to use for training / testing')\r\n parser.add_argument('--resume', default='', help='resume from checkpoint')\r\n\r\n parser.add_argument('--thresh', default=0.85, type=float)\r\n parser.add_argument('--save_location', type=str, \r\n help=\"the loacation to save to results of mdodel\")\r\n parser.add_argument('--num_test', type=int, \r\n help=\"number of images to be tested\")\r\n return parser\r\n\r\n\r\ndef box_cxcywh_to_xyxy(x):\r\n x_c, y_c, w, h = x.unbind(1)\r\n b = [(x_c - 0.5 * w), (y_c - 0.5 * h),\r\n (x_c + 0.5 * w), (y_c + 0.5 * h)]\r\n return torch.stack(b, dim=1)\r\n\r\ndef rescale_bboxes(out_bbox, size):\r\n img_w, img_h = size\r\n b = box_cxcywh_to_xyxy(out_bbox)\r\n b = b * torch.tensor([img_w, img_h,\r\n img_w, img_h\r\n ], dtype=torch.float32)\r\n return b\r\n\r\n\r\ntransform = T.Compose([\r\n T.Resize(800),\r\n T.ToTensor(),\r\n T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\r\n])\r\n\r\[email protected]_grad()\r\ndef infer(orig_image, model, postprocessors, classes, device, meta, colors):\r\n model.eval()\r\n w, h = orig_image.size \r\n image = transform(orig_image).unsqueeze(0).to(device)\r\n\r\n out = model(image)\r\n\r\n out[\"pred_logits\"] = out[\"pred_logits\"].cpu()\r\n out[\"pred_boxes\"] = out[\"pred_boxes\"].cpu()\r\n\r\n probas = out['pred_logits'].softmax(-1)[0, :, :-1]\r\n # keep = probas.max(-1).values > 0.85\r\n keep = probas.max(-1).values > args.thresh\r\n probas = probas[keep].cpu().data.numpy()\r\n\r\n bboxes_scaled = rescale_bboxes(out['pred_boxes'][0, keep], orig_image.size)\r\n\r\n result = postprocessors['panoptic'](out, torch.as_tensor(image.shape[-2:]).unsqueeze(0))[0]\r\n segments_info = result[\"segments_info\"]\r\n # Panoptic predictions are stored in a special format png\r\n panoptic_seg = Image.open(BytesIO(result['png_string']))\r\n final_w, final_h = panoptic_seg.size\r\n # We convert the png into an segment id map\r\n panoptic_seg = np.array(panoptic_seg, dtype=np.uint8)\r\n panoptic_seg = torch.from_numpy(rgb2id(panoptic_seg))\r\n\r\n if len(bboxes_scaled)==0:\r\n return None\r\n \r\n # plotting boxes\r\n img_obj = np.array(orig_image)\r\n for p, box in zip(probas, bboxes_scaled):\r\n bbox = box.cpu().data.numpy()\r\n bbox = bbox.astype(np.int32)\r\n x,y = bbox[0], bbox[1] \r\n bbox = np.array([\r\n [bbox[0], bbox[1]],\r\n [bbox[2], bbox[1]],\r\n [bbox[2], bbox[3]],\r\n [bbox[0], bbox[3]],\r\n ])\r\n cl = p.argmax()\r\n c = tuple(map(int, colors[cl]))\r\n text = f\"{classes[cl]}: {p[cl]:.2f}\"\r\n cv2.putText(img_obj, text, (x+2,y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, c, 2)\r\n bbox = bbox.reshape((4, 2))\r\n cv2.polylines(img_obj, [bbox], True, c, 2)\r\n\r\n # plotting segmentation\r\n for i in range(len(segments_info)):\r\n c = segments_info[i][\"category_id\"]\r\n segments_info[i][\"category_id\"] = meta.thing_dataset_id_to_contiguous_id[c] if segments_info[i][\"isthing\"] else meta.stuff_dataset_id_to_contiguous_id[c]\r\n\r\n\r\n # Finally visualize the prediction\r\n v = Visualizer(np.array(orig_image.copy().resize((final_w, final_h)))[:, :, ::-1], meta, scale=1.0)\r\n v._default_font_size = 18\r\n v = v.draw_panoptic_seg_predictions(panoptic_seg, segments_info, area_threshold=0, alpha=0.4)\r\n img_pan = cv2.resize(v.get_image(), (w,h))\r\n\r\n return (img_obj, img_pan)\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser('DETR evaluation script', parents=[get_args_parser()])\r\n args = parser.parse_args()\r\n\r\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n print(\"Using: \", device)\r\n\r\n # loading model\r\n model, _, postprocessors = build_model(args)\r\n if args.frozen_weights is not None:\r\n checkpoint = torch.load(args.frozen_weights, map_location='cpu')\r\n model.detr.load_state_dict(checkpoint['model'])\r\n\r\n if args.resume:\r\n checkpoint = torch.load(args.resume, map_location='cpu')\r\n model.load_state_dict(checkpoint['model'])\r\n model.to(device)\r\n\r\n data = json.load(open(\"data/test_panoptic.json\"))\r\n im_info = data['images']\r\n id_to_class = {}\r\n for cat in data['categories']:\r\n id_to_class[cat['id']] = cat['name']\r\n \r\n colors = np.random.randint(0, 255, size=(len(id_to_class), 3), dtype=np.int)\r\n\r\n # generating META for custom dataset for visualiztion with detectron\r\n thing_classes = []\r\n stuff_classes = []\r\n thing_dataset_id_to_contiguous_id = {}\r\n stuff_dataset_id_to_contiguous_id = {}\r\n thing_count = 0\r\n stuff_count = 0\r\n for cat in data['categories']:\r\n if cat['isthing']:\r\n thing_classes.append(cat['name'])\r\n thing_dataset_id_to_contiguous_id[cat['id']] = thing_count\r\n thing_count += 1\r\n else:\r\n stuff_classes.append(cat['name'])\r\n stuff_dataset_id_to_contiguous_id[cat['id']] = stuff_count \r\n stuff_count += 1\r\n\r\n DatasetCatalog.register(\"construction_data\", lambda: data['annotations'])\r\n MetadataCatalog.get(\"construction_data\").set(thing_classes=thing_classes, stuff_classes=stuff_classes, \r\n thing_dataset_id_to_contiguous_id=thing_dataset_id_to_contiguous_id, \r\n stuff_dataset_id_to_contiguous_id=stuff_dataset_id_to_contiguous_id)\r\n construction_meta = MetadataCatalog.get(\"construction_data\")\r\n \r\n if not os.path.exists(args.save_location):\r\n os.makedirs(args.save_location)\r\n\r\n i, k = 0, 0\r\n while True:\r\n im_path = im_info[k]['file_name']\r\n im_org = Image.open(im_path)\r\n\r\n start_t = time.perf_counter()\r\n preds = infer(im_org, model, postprocessors, id_to_class, device, construction_meta, colors)\r\n end_t = time.perf_counter()\r\n\r\n k += 1\r\n \r\n if preds is not None:\r\n print(\"Processed...{} ({:.3f}s) Done:{}..\".format(im_path, end_t - start_t, i))\r\n \r\n obj_pred, pan_pred = preds\r\n im_org = cv2.copyMakeBorder(np.float32(im_org), top=15, bottom=15, left=15, right=15, \r\n borderType=cv2.BORDER_CONSTANT, value=(255,255,255))\r\n obj_pred = cv2.copyMakeBorder(np.float32(obj_pred), top=15, bottom=15, left=15, right=15, \r\n borderType=cv2.BORDER_CONSTANT, value=(255,255,255))\r\n pan_pred = cv2.copyMakeBorder(np.float32(pan_pred), top=15, bottom=15, left=15, right=15, \r\n borderType=cv2.BORDER_CONSTANT, value=(255,255,255))\r\n\r\n cv2.imwrite(f\"{args.save_location}/pred_{i}.png\", cv2.hconcat([im_org, obj_pred, pan_pred]))\r\n i += 1\r\n\r\n\r\n if i==args.num_test:\r\n break\r\n\r\n\r\n"
] |
[
[
"numpy.random.seed",
"torch.load",
"torch.tensor",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.float32",
"torch.stack",
"numpy.array",
"torch.as_tensor"
]
] |
tu-dortmund-ls12-rt/SSSEvaluation
|
[
"7e7c181fde24c98ef4432cf6ef2e91bd99911541"
] |
[
"effsstsPlot/effsstsPlot.py"
] |
[
"from __future__ import division\r\nimport sys\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport random\r\nimport math\r\n\r\ndef pickColor(ischeme):\r\n\tcolor = ''\r\n\tschemes = [\r\n\t\t'EDA','PROPORTIONAL','SEIFDA-MILP',\r\n\t\t'SCEDF','SCRM','SCAIR-RM','SCAIR-OPA','EDAGMF-OPA','MILP-ReleaseJitter','SRSR',\r\n\t\t'PASS-OPA','RSS','UDLEDF','WLAEDF','RTEDF','UNIFRAMEWORK','SUSPOBL','SUSPJIT','SUSPBLOCK','BURST-RM','UPPAAL'\r\n\t\t'NC']\r\n\tcolors = [\r\n\t\t'#0ff1ce','#696969','#bada55',\r\n\t\t'#7fe5f0','#ff0000','#ff80ed','#407294','#c39797','#420420','#133337',\r\n\t\t'#065535','#f08080','#5ac18e','#666666','#6897bb','#f7347a','#576675','#ffc0cb','#81d8d0','#ac25e2','#b4eeb4',\r\n\t\t'#008080',\r\n\t\t'#696966','#ffd700','#ffa500','#8a2be2','#00ffff','#ff7373','#40e0d0','#0000ff',\r\n\t\t'#d3ffce','#c6e2ff','#b0e0e6','#fa8072','#003366','#ffff00','#ffb6c1','#8b0000',\r\n\t\t'#800000','#800080','#7fffd4','#00ff00','#cccccc','#0a75ad','#ffff66','#000080',\r\n\t\t'#ffc3a0','#20b2aa','#333333','#66cdaa','#ff6666','#ff00ff','#ff7f50','#088da5',\r\n\t\t'#4ca3dd','#468499','#047806','#008000','#f6546a','#cbbeb5','#00ced1','#101010',\r\n\t\t'#660066','#b6fcd5','#daa520','#990000','#0e2f44','#808080',\r\n\t\t]\r\n\tif ischeme in schemes:\r\n\t\tindex = schemes.index(ischeme)\r\n\t\tcolor = colors[index]\r\n\telse:\r\n\t\tif ischeme.__contains__('SEIFDA-minD'):\r\n\t\t\tcolor = '#ffd700'\r\n\t\telif ischeme.__contains__('SEIFDA-PBminD'):\r\n\t\t\tcolor = '#c6e2ff'\r\n\t\telif ischeme.__contains__('SEIFDA-maxD'):\r\n\t\t\tcolor = '#800080'\r\n\t\telif ischeme.__contains__('Oblivious-IUB'):\r\n\t\t\tcolor = '#20b2aa'\r\n\t\telif ischeme.__contains__('Clairvoyant-SSSD'):\r\n\t\t\tcolor = '#66cdaa'\r\n\t\telif ischeme.__contains__('Oblivious-MP'):\r\n\t\t\tcolor = '#ffa500'\r\n\t\telif ischeme.__contains__('Clairvoyant-PDAB'):\r\n\t\t\tcolor = '#b0e0e6'\r\n\t\telse:\r\n\t\t\tcolor = '#0FF0F0'\r\n\treturn color\r\n\r\ndef pickMarker(ischeme):\r\n\tmarker = ''\r\n\tschemes = [\r\n\t\t'EDA','PROPORTIONAL','SEIFDA-MILP',\r\n\t\t'SCEDF','SCRM','SCAIR-RM','SCAIR-OPA','EDAGMF-OPA','MILP-ReleaseJitter','SRSR',\r\n\t\t'PASS-OPA','RSS','UDLEDF','WLAEDF','RTEDF','UNIFRAMEWORK','SUSPOBL','SUSPJIT','SUSPBLOCK',\r\n\t\t'NC']\r\n\tmarkers = [\r\n\t\t\".\",\",\",\"o\",\r\n\t\t\"v\",\"^\",\"<\",\">\",\"1\",\"2\",\"3\",\r\n\t\t\"4\",\"8\",\"s\",\"p\",\"P\",\"*\",\"h\",\"H\",\"+\",\r\n\t\t\"x\",\r\n\t\t\"X\",\"D\",\"d\",\"|\",\"_\",\".\"]\r\n\r\n\tif ischeme in schemes:\r\n\t\tindex = schemes.index(ischeme)\r\n\t\tmarker = markers[index]\r\n\telse: \r\n\t\tif ischeme.__contains__('SEIFDA-minD'):\r\n\t\t\tmarker = \"X\"\r\n\t\telif ischeme.__contains__('SEIFDA-PBminD'):\r\n\t\t\tmarker = \"D\"\r\n\t\telif ischeme.__contains__('SEIFDA-maxD'):\r\n\t\t\tmarker = \"d\"\r\n\t\telif ischeme.__contains__('PATH-minD') and ischeme.__contains__('DnD'):\r\n\t\t\tmarker = \"|\"\r\n\t\telif ischeme.__contains__('PATH-minD') and ischeme.__contains__('D=D'):\r\n\t\t\tmarker = \"_\"\r\n\t\telif ischeme.__contains__('PATH-PBminD') and ischeme.__contains__('DnD'):\r\n\t\t\tmarker = \".\"\r\n\t\telif ischeme.__contains__('PATH-PBminD') and ischeme.__contains__('D=D'):\r\n\t\t\tmarker = \",\"\r\n\t\telse:\r\n\t\t\tmarker = \"o\"\r\n\treturn marker\r\n\r\ndef pickName(ischeme):\r\n\tname = ''\r\n\tif ischeme.__contains__('PATH-minD') and ischeme.__contains__('DnD'):\r\n\t\tname = 'Clairvoyant-SSSD'\r\n\telif ischeme.__contains__('PATH-minD') and ischeme.__contains__('D=D'):\r\n\t\tname = 'Oblivious-IUB'\r\n\telif ischeme.__contains__('PATH-PBminD') and ischeme.__contains__('DnD'):\r\n\t\tname = 'Clairvoyant-PDAB'\r\n\telif ischeme.__contains__('PATH-PBminD') and ischeme.__contains__('D=D'):\r\n\t\tname = 'Oblivious-MP'\r\n\telse:\r\n\t\tname = ischeme\r\n\treturn name\r\n\r\ndef effsstsPlot(prefix, plotall, schemes, minsstype, maxsstype, ssofftypes, ustart, uend, ustep, numberoftasks):\r\n\t\"\"\"\r\n\tprints all plots\r\n\t\"\"\"\r\n\t# sstype= ['S','M','L','0.15']\r\n\t# ssofftypes = [2, 3, 5]\r\n\t#ssoprops = ['2', '5', '8']\r\n\r\n\t#figlabel = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']\r\n\t# prefix=\"effsstsPlot/data/\"\r\n\r\n\t# for three sub-plot, fixed\r\n\t# fig = plt.figure(figsize=(13, 4))\r\n\tfig = plt.figure()\r\n\t# create a virtual outer subsplot for putting big x-ylabel\r\n\tax = fig.add_subplot(111)\r\n\tfig.subplots_adjust(top=0.9, left=0.1, right=0.95, hspace=0.3)\r\n\r\n\tax.set_xlabel('Utilization (%)', size=15)\r\n\tax.set_ylabel('Acceptance Ratio', size=15)\r\n\tax.spines['top'].set_color('black')\r\n\tax.spines['bottom'].set_color('black')\r\n\tax.spines['left'].set_color('black')\r\n\tax.spines['right'].set_color('black')\r\n\tax.tick_params(labelcolor='black', top=False,\r\n\t\t\t\tbottom=False, left=False, right=False)\r\n\r\n\ti = 1\r\n\tfor ischeme in schemes:\r\n\t\tifile = prefix+\"/\"+str(minsstype)+\"-\"+str(maxsstype)+\"/\"+str(ssofftypes)+\"/\"+ischeme+ str(numberoftasks) +\".npy\"\r\n\t\tdata = np.load(ifile)\r\n\t\tx = data[0][0::1]\r\n\t\ty = data[1][0::1]\r\n\t\tprint(x)\r\n\t\tprint(y)\r\n\t\tax.plot(x, y,\r\n\t\t\t\t'-',\r\n\t\t\t\tcolor=pickColor(ischeme),\r\n\t\t\t\tmarker=pickMarker(ischeme),\r\n\t\t\t\tmarkersize=4,\r\n\t\t\t\tmarkevery=1,\r\n\t\t\t\tfillstyle='none',\r\n\t\t\t\tlabel=pickName(ischeme),\r\n\t\t\t\tlinewidth=1.0,\r\n\t\t\t\tclip_on=False)\r\n\t\tif i == 1:\r\n\t\t\tax.legend(bbox_to_anchor=(0.5, 1.11),\r\n\t\t\t\t\t\tloc=10,\r\n\t\t\t\t\t\tmarkerscale=1.5,\r\n\t\t\t\t\t\tncol=3,\r\n\t\t\t\t\t\tborderaxespad=0.,\r\n\t\t\t\t\t\tprop={'size': 10})\r\n\r\n\tax.set_title('No. of tasks: '+str(numberoftasks)+', Self-suspension length: ' +\r\n\t\t\t\t\tstr(minsstype)+\"-\"+str(maxsstype)+', No. of segments: '+str(ssofftypes), size=10, y=0.99)\r\n\tax.grid()\r\n\ti += 1\r\n\t#fig.savefig(prefix+\"/\"+isstype+\"/\"+issofftypes +\r\n\t\t# \"/\"+ischeme+\".pdf\", bbox_inches='tight')\r\n\r\n\t#plt.show()\r\n\tif plotall:\r\n\t\tfig.savefig(prefix + '/EFFSSTS[' + str(ssofftypes) + '][' + str(minsstype)+\"-\"+str(maxsstype) + '][' + str(numberoftasks) + '].pdf', bbox_inches='tight')\r\n\t\tprint('[DONE]', '/' + prefix + '/EFFSSTS[' + str(ssofftypes) + '][' + str(minsstype)+\"-\"+str(maxsstype) + '][' + str(numberoftasks) + '].pdf')\r\n\telse:\r\n\t\tfig.savefig(prefix + '/' + schemes[0] + '[' + str(ssofftypes) + '][' + str(minsstype)+\"-\"+str(maxsstype) + '][' + str(numberoftasks) + '].pdf', bbox_inches='tight')\r\n\t\tprint('[DONE]', '/' + prefix + '/' + schemes[0] + '[' + str(ssofftypes) + '][' + str(minsstype)+\"-\"+str(maxsstype) + '][' + str(numberoftasks) + '].pdf')\r\n\t#sys.exit()\r\n\r\n\r\ndef effsstsPlotmulti(prefix, plotall, id_par, par_values, schemes, minsstype, maxsstype, ssofftypes, ustart, uend, ustep, numberoftasks):\r\n\t\"\"\"\r\n\tprints all plots\r\n\t\"\"\"\r\n\t# sstype= ['S','M','L','0.15']\r\n\t# ssofftypes = [2, 3, 5]\r\n\t#ssoprops = ['2', '5', '8']\r\n\r\n\t#figlabel = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']\r\n\t# prefix=\"effsstsPlot/data/\"\r\n\r\n\t# for three sub-plot, fixed\r\n\t# fig = plt.figure(figsize=(13, 4))\r\n\t# fig = plt.figure()\r\n\t# create a virtual outer subsplot for putting big x-ylabel\r\n\t# ax = fig.add_subplot(111)\r\n\t# fig.subplots_adjust(top=0.9, left=0.1, right=0.95, hspace=0.3)\r\n\tif id_par == 'Tasks per Set':\r\n\t\tnumberoftasks = par_values\r\n\r\n\telif id_par == 'Number of Segments':\r\n\t\tssofftypes = par_values\r\n\t\tprint\r\n\t\t'ns1: ', ssofftypes[0]\r\n\telif id_par == 'Suspension Length':\r\n\t\tminsstype = par_values[0:3]\r\n\t\tmaxsstype = par_values[3:6]\r\n\r\n\tfig = plt.figure(figsize=(18, 12))\r\n\tfor c in range(3):\r\n\t\tax = fig.add_subplot(2, 3, (c + 1))\r\n\r\n\t\tax.set_xlabel('Utilization (%)', size=10)\r\n\t\tax.set_ylabel('Acceptance Ratio', size=10)\r\n\t\tax.spines['top'].set_color('black')\r\n\t\tax.spines['bottom'].set_color('black')\r\n\t\tax.spines['left'].set_color('black')\r\n\t\tax.spines['right'].set_color('black')\r\n\t\tax.tick_params(labelcolor='black', top=False,\r\n\t\t\t\t\tbottom=False, left=False, right=False)\r\n\t\ti = 1\r\n\t\tfor ischeme in schemes:\r\n\t\t\tif id_par == 'Tasks per Set':\r\n\t\t\t\tifile = prefix + \"/\" + str(minsstype) + \"-\" + str(maxsstype) + \"/\" + str(\r\n\t\t\t\t\tssofftypes) + \"/\" + ischeme + str(numberoftasks[c]) + \".npy\"\r\n\t\t\telif id_par == 'Number of Segments':\r\n\t\t\t\tifile = prefix + \"/\" + str(minsstype) + \"-\" + str(maxsstype) + \"/\" + str(\r\n\t\t\t\t\tssofftypes[c]) + \"/\" + ischeme + str(numberoftasks) + \".npy\"\r\n\t\t\telif id_par == 'Suspension Length':\r\n\t\t\t\tifile = prefix + \"/\" + str(minsstype[c]) + \"-\" + str(maxsstype[c]) + \"/\" + str(\r\n\t\t\t\t\tssofftypes) + \"/\" + ischeme + str(numberoftasks) + \".npy\"\r\n\t\t\tdata = [0]\r\n\t\t\ttry:\r\n\t\t\t\tdata = np.load(ifile)\r\n\t\t\texcept:\r\n\t\t\t\tprint(\"Data not loaded\")\r\n\t\t\tif np.all(data)==False:\r\n\t\t\t\tif id_par == 'Tasks per Set':\r\n\t\t\t\t\traise Exception(\"Run \"+str(ischeme)+\" with \"+str(numberoftasks[c])+\" \"+str(id_par)+\" first\")\r\n\t\t\t\telif id_par == 'Number of Segments':\r\n\t\t\t\t\traise Exception(\"Run \"+str(ischeme)+\" with \"+str(ssofftypes[c])+\" Segments first\")\r\n\t\t\t\telif id_par == 'Suspension Length':\r\n\t\t\t\t\traise Exception(\"Run \"+str(ischeme)+\" with Suspension Interval of [\"+str(minsstype[c])+\",\"+str(maxsstype[c])+\"] first\")\r\n\t\t\tx = data[0][0::1]\r\n\t\t\ty = data[1][0::1]\r\n\t\t\tus = int(math.ceil(ustart/ustep))\r\n\t\t\tue = int(math.floor(uend/ustep))\r\n\t\t\tprint(x)\r\n\t\t\tprint(y)\r\n\t\t\tx=x[us:ue+1]\r\n\t\t\ty=y[us:ue+1]\r\n\t\t\tax.plot(x, y,\r\n\t\t\t\t\t'-',\r\n\t\t\t\t\tcolor=pickColor(ischeme),\r\n\t\t\t\t\tmarker=pickMarker(ischeme),\r\n\t\t\t\t\tmarkersize=4,\r\n\t\t\t\t\tmarkevery=1,\r\n\t\t\t\t\tfillstyle='none',\r\n\t\t\t\t\tlabel=pickName(ischeme),\r\n\t\t\t\t\tlinewidth=1.0,\r\n\t\t\t\t\tclip_on=False)\r\n\t\t\tif c==1:\r\n\t\t\t\tax.legend(bbox_to_anchor=(0.5, 1.11),\r\n\t\t\t\t\t\tloc=10,\r\n\t\t\t\t\t\tmarkerscale=1.5,\r\n\t\t\t\t\t\tncol=3,\r\n\t\t\t\t\t\tborderaxespad=0.,\r\n\t\t\t\t\t\tprop={'size': 10})\r\n\t\t\tif i == 1:\r\n\t\t\t\tax.grid()\r\n\t\t\ti += 1\r\n\r\n\tfig.suptitle('No. of tasks: '+str(numberoftasks)+', Self-suspension length: ' +\r\n\t\t\t\t\tstr(minsstype)+\"-\"+str(maxsstype)+', No. of segments: '+str(ssofftypes), size=16, y=0.99)\r\n\t# ax.grid()\r\n\r\n\t#fig.savefig(prefix+\"/\"+isstype+\"/\"+issofftypes +\r\n\t\t# \"/\"+ischeme+\".pdf\", bbox_inches='tight')\r\n\r\n\t#plt.show()\r\n\tif plotall:\r\n\t\tfig.savefig(prefix + '/EFFSSTS[' + str(ssofftypes) + '][' + str(minsstype)+\"-\"+str(maxsstype) + '][' + str(numberoftasks) + '].pdf', bbox_inches='tight')\r\n\t\tprint('[DONE]', '/' + prefix + '/EFFSSTS[' + str(ssofftypes) + '][' + str(minsstype)+\"-\"+str(maxsstype) + '][' + str(numberoftasks) + '].pdf')\r\n\telse:\r\n\t\tfig.savefig(prefix + '/' + schemes[0] + '[' + str(ssofftypes) + '][' + str(minsstype)+\"-\"+str(maxsstype) + '][' + str(numberoftasks) + '].pdf', bbox_inches='tight')\r\n\t\tprint('[DONE]', '/' + prefix + '/' + schemes[0] + '[' + str(ssofftypes) + '][' + str(minsstype)+\"-\"+str(maxsstype) + '][' + str(numberoftasks) + '].pdf')\r\n\r\n\r\ndef effsstsPlotAll(prefix, plotall, schemes, minsstype, maxsstype, ssofftypes, ustart, uend, ustep, numberoftasks):\r\n\tprint('-------------------------------------------------------')\r\n\tprint(prefix, plotall, schemes, minsstype, maxsstype, ssofftypes, ustart, uend, ustep,numberoftasks)\r\n\tprint('-------------------------------------------------------')\r\n\tfor scheme in schemes:\r\n\t\teffsstsPlot(prefix, False, scheme.split(), minsstype, maxsstype, ssofftypes, ustart, uend, ustep, numberoftasks)\r\n\tif (plotall):\r\n\t\teffsstsPlot(prefix, True, schemes, minsstype, maxsstype, ssofftypes, ustart, uend, ustep, numberoftasks)\r\n\r\ndef effsstsPlotAllmulti(prefix, plotall, id_par, par_values, schemes, minsstype, maxsstype, ssofftypes, ustart, uend, ustep, numberoftasks):\r\n\tprint('-------------------------------------------------------')\r\n\tprint(prefix, plotall, schemes, minsstype, maxsstype, ssofftypes, ustart, uend, ustep,numberoftasks)\r\n\tprint('-------------------------------------------------------')\r\n\tfor scheme in schemes:\r\n\t\teffsstsPlotmulti(prefix, False, id_par, par_values, scheme.split(), minsstype, maxsstype, ssofftypes, ustart, uend, ustep, numberoftasks)\r\n\tif (plotall):\r\n\t\teffsstsPlotmulti(prefix, True, id_par, par_values, schemes, minsstype, maxsstype, ssofftypes, ustart, uend, ustep, numberoftasks)\r\n\r\nif __name__ == '__main__':\r\n\targs = sys.argv\r\n\tprint(args)\r\n\ttestSchemes = ['EDA', 'NC', 'SCEDF', 'PASS-OPA']\r\n\ttestSelfSuspendingType= ['S','M','L']\r\n\ttestNumberofSegments = [2]\r\n\teffsstsPlotAll(args[1], True, testSchemes, testSelfSuspendingType, testNumberofSegments, 1, 99, 5, 10,10)\r\n"
] |
[
[
"numpy.all",
"numpy.load",
"matplotlib.pyplot.figure"
]
] |
likedan/keras_lrp
|
[
"f2a464604e1d69d00c1feeaa7bbd752a9234bd49"
] |
[
"lrp/avgpool.py"
] |
[
"'''\n@author: Sebastian Lapuschkin\n@author: Gregoire Montavon\n@maintainer: Sebastian Lapuschkin\n@contact: [email protected], [email protected]\n@date: 14.08.2015\n@version: 1.2+\n@copyright: Copyright (c) 2015-2017, Sebastian Lapuschkin, Alexander Binder, Gregoire Montavon, Klaus-Robert Mueller, Wojciech Samek\n@license : BSD-2-Clause\n'''\n\nimport numpy as np\nimport math\n# -------------------------------\n# Sum Pooling layer\n# -------------------------------\n\nclass AvgPool:\n\n def __init__(self, layer, X):\n '''\n Constructor for the sum pooling layer object\n\n Parameters\n ----------\n\n pool : tuple (h,w)\n the size of the pooling mask in vertical (h) and horizontal (w) direction\n\n stride : tuple (h,w)\n the vertical (h) and horizontal (w) step sizes between filter applications.\n '''\n\n self.X = X\n self.pool = layer.pool_size\n self.stride = layer.strides\n\n def _simple_lrp(self,R):\n '''\n LRP according to Eq(56) in DOI: 10.1371/journal.pone.0130140\n '''\n N,H,W,D = self.X.shape\n\n hpool, wpool = self.pool\n hstride, wstride = self.stride\n\n #assume the given pooling and stride parameters are carefully chosen.\n Hout = int((H - hpool) / hstride + 1)\n Wout = int((W - wpool) / wstride + 1)\n\n Rx = np.zeros(self.X.shape)\n for i in range(Hout):\n for j in range(Wout):\n Z = self.X[:, i*hstride:i*hstride+hpool , j*wstride:j*wstride+wpool , : ] #input activations.\n Zs = Z.sum(axis=(1,2),keepdims=True)\n Zs += 1e-12*((Zs >= 0)*2-1) # add a weak numerical stabilizer to cushion an all-zero input\n\n Rx[:,i*hstride:i*hstride+hpool: , j*wstride:j*wstride+wpool: , : ] += (Z/Zs/(hpool * wpool)) * R[:,i:i+1,j:j+1,:] #distribute relevance propoprtional to input activations per layer\n\n return Rx\n\n\n def _flat_lrp(self,R):\n '''\n distribute relevance for each output evenly to the output neurons' receptive fields.\n '''\n N,H,W,D = self.X.shape\n\n hpool, wpool = self.pool\n hstride, wstride = self.stride\n\n #assume the given pooling and stride parameters are carefully chosen.\n Hout = (H - hpool) / hstride + 1\n Wout = (W - wpool) / wstride + 1\n\n Rx = np.zeros_like(self.X,dtype=np.float)\n\n for i in range(Hout):\n for j in range(Wout):\n Z = np.ones([N,hpool,wpool,D])\n Zs = Z.sum(axis=(1,2),keepdims=True)\n Rx[:,i*hstride:i*hstride+hpool , j*wstride:j*wstride+wpool,:] += (Z / Zs) * R[:,i:i+1,j:j+1,:]\n return Rx\n\n def _ww_lrp(self,R):\n '''\n due to uniform weights used for sum pooling (1), this method defaults to _flat_lrp(R)\n '''\n return self._flat_lrp(R)\n\n def _epsilon_lrp(self,R,epsilon):\n '''\n LRP according to Eq(58) in DOI: 10.1371/journal.pone.0130140\n '''\n N,H,W,D = self.X.shape\n\n hpool, wpool = self.pool\n hstride, wstride = self.stride\n\n #assume the given pooling and stride parameters are carefully chosen.\n Hout = (H - hpool) / hstride + 1\n Wout = (W - wpool) / wstride + 1\n\n Rx = np.zeros(self.X.shape)\n for i in range(Hout):\n for j in range(Wout):\n Z = self.X[:, i*hstride:i*hstride+hpool , j*wstride:j*wstride+wpool , : ] #input activations.\n Zs = Z.sum(axis=(1,2),keepdims=True)\n Zs += epsilon*((Zs >= 0)*2-1) # add a epsilon stabilizer to cushion an all-zero input\n\n Rx[:,i*hstride:i*hstride+hpool: , j*wstride:j*wstride+wpool: , : ] += (Z/Zs) * R[:,i:i+1,j:j+1,:] #distribute relevance propoprtional to input activations per layer\n\n return Rx\n\n\n # yes, we can do this. no, it will not make sense most of the time. by default, _lrp_simple will be called. see line 152\n def _alphabeta_lrp(self,R,alpha):\n '''\n LRP according to Eq(60) in DOI: 10.1371/journal.pone.0130140\n '''\n\n beta = 1-alpha\n\n N,H,W,D = self.X.shape\n\n hpool, wpool = self.pool\n hstride, wstride = self.stride\n\n #assume the given pooling and stride parameters are carefully chosen.\n Hout = (H - hpool) / hstride + 1\n Wout = (W - wpool) / wstride + 1\n\n #distribute the gradient towards across all inputs evenly\n Rx = np.zeros(self.X.shape)\n for i in range(Hout):\n for j in range(Wout):\n Z = self.X[:, i*hstride:i*hstride+hpool , j*wstride:j*wstride+wpool , : ] #input activations.\n\n if not alpha == 0:\n Zp = Z * (Z > 0)\n Zsp = Zp.sum(axis=(1,2),keepdims=True) +1e-16 #zero division is quite likely in sum pooling layers when using the alpha-variant\n Ralpha = (Zp/Zsp) * R[:,i:i+1,j:j+1,:]\n else:\n Ralpha = 0\n\n if not beta == 0:\n Zn = Z * (Z < 0)\n Zsn = Zn.sum(axis=(1,2),keepdims=True) - 1e-16 #zero division is quite likely in sum pooling layers when using the alpha-variant\n Rbeta = (Zn/Zsn) * R[:,i:i+1,j:j+1,:]\n else:\n Rbeta = 0\n\n Rx[:,i*hstride:i*hstride+hpool: , j*wstride:j*wstride+wpool: , : ] += Ralpha + Rbeta\n\n return Rx\n"
] |
[
[
"numpy.zeros",
"numpy.zeros_like",
"numpy.ones"
]
] |
airacid/literate-garbanzo
|
[
"8827db2e39896eb2633440478cf6b799a9d0aebd"
] |
[
"lib/datasets/imagenet3d.py"
] |
[
"__author__ = 'yuxiang' # derived from honda.py by fyang\n\nimport datasets\nimport datasets.imagenet3d\nimport os\nimport PIL\nimport datasets.imdb\nimport numpy as np\nimport scipy.sparse\nfrom utils.cython_bbox import bbox_overlaps\nfrom utils.boxes_grid import get_boxes_grid\nimport subprocess\nimport pickle\nfrom fast_rcnn.config import cfg\nimport math\nfrom rpn_msr.generate_anchors import generate_anchors\nimport sys\n\nclass imagenet3d(datasets.imdb):\n def __init__(self, image_set, imagenet3d_path=None):\n datasets.imdb.__init__(self, 'imagenet3d_' + image_set)\n self._image_set = image_set\n self._imagenet3d_path = self._get_default_path() if imagenet3d_path is None \\\n else imagenet3d_path\n self._data_path = os.path.join(self._imagenet3d_path, 'Images')\n self._classes = ('__background__', 'aeroplane', 'ashtray', 'backpack', 'basket', \\\n 'bed', 'bench', 'bicycle', 'blackboard', 'boat', 'bookshelf', 'bottle', 'bucket', \\\n 'bus', 'cabinet', 'calculator', 'camera', 'can', 'cap', 'car', 'cellphone', 'chair', \\\n 'clock', 'coffee_maker', 'comb', 'computer', 'cup', 'desk_lamp', 'diningtable', \\\n 'dishwasher', 'door', 'eraser', 'eyeglasses', 'fan', 'faucet', 'filing_cabinet', \\\n 'fire_extinguisher', 'fish_tank', 'flashlight', 'fork', 'guitar', 'hair_dryer', \\\n 'hammer', 'headphone', 'helmet', 'iron', 'jar', 'kettle', 'key', 'keyboard', 'knife', \\\n 'laptop', 'lighter', 'mailbox', 'microphone', 'microwave', 'motorbike', 'mouse', \\\n 'paintbrush', 'pan', 'pen', 'pencil', 'piano', 'pillow', 'plate', 'pot', 'printer', \\\n 'racket', 'refrigerator', 'remote_control', 'rifle', 'road_pole', 'satellite_dish', \\\n 'scissors', 'screwdriver', 'shoe', 'shovel', 'sign', 'skate', 'skateboard', 'slipper', \\\n 'sofa', 'speaker', 'spoon', 'stapler', 'stove', 'suitcase', 'teapot', 'telephone', \\\n 'toaster', 'toilet', 'toothbrush', 'train', 'trash_bin', 'trophy', 'tub', 'tvmonitor', \\\n 'vending_machine', 'washing_machine', 'watch', 'wheelchair')\n self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))\n self._image_ext = '.JPEG'\n self._image_index = self._load_image_set_index()\n # Default to roidb handler\n if cfg.IS_RPN:\n self._roidb_handler = self.gt_roidb\n else:\n self._roidb_handler = self.region_proposal_roidb\n\n self.config = {'top_k': 100000}\n\n # statistics for computing recall\n self._num_boxes_all = np.zeros(self.num_classes, dtype=np.int)\n self._num_boxes_covered = np.zeros(self.num_classes, dtype=np.int)\n self._num_boxes_proposal = 0\n\n assert os.path.exists(self._imagenet3d_path), \\\n 'imagenet3d path does not exist: {}'.format(self._imagenet3d_path)\n assert os.path.exists(self._data_path), \\\n 'Path does not exist: {}'.format(self._data_path)\n\n def image_path_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return self.image_path_from_index(self.image_index[i])\n\n def image_path_from_index(self, index):\n \"\"\"\n Construct an image path from the image's \"index\" identifier.\n \"\"\"\n\n image_path = os.path.join(self._data_path, index + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path\n\n def _load_image_set_index(self):\n \"\"\"\n Load the indexes listed in this dataset's image set file.\n \"\"\"\n image_set_file = os.path.join(self._imagenet3d_path, 'Image_sets', self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n\n with open(image_set_file) as f:\n image_index = [x.rstrip('\\n') for x in f.readlines()]\n return image_index\n\n def _get_default_path(self):\n \"\"\"\n Return the default path where imagenet3d is expected to be installed.\n \"\"\"\n return os.path.join(datasets.ROOT_DIR, 'data', 'ImageNet3D')\n\n\n def gt_roidb(self):\n \"\"\"\n Return the database of ground-truth regions of interest.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n\n cache_file = os.path.join(self.cache_path, self.name + '_' + cfg.SUBCLS_NAME + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = pickle.load(fid)\n print(('{} gt roidb loaded from {}'.format(self.name, cache_file)))\n return roidb\n\n gt_roidb = [self._load_imagenet3d_annotation(index)\n for index in self.image_index]\n\n if cfg.IS_RPN:\n # print out recall\n for i in range(1, self.num_classes):\n print(('{}: Total number of boxes {:d}'.format(self.classes[i], self._num_boxes_all[i])))\n print(('{}: Number of boxes covered {:d}'.format(self.classes[i], self._num_boxes_covered[i])))\n print(('{}: Recall {:f}'.format(self.classes[i], float(self._num_boxes_covered[i]) / float(self._num_boxes_all[i]))))\n\n with open(cache_file, 'wb') as fid:\n pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)\n print(('wrote gt roidb to {}'.format(cache_file)))\n\n return gt_roidb\n\n\n def _load_imagenet3d_annotation(self, index):\n \"\"\"\n Load image and bounding boxes info from txt file in the imagenet3d format.\n \"\"\"\n\n if self._image_set == 'test' or self._image_set == 'test_1' or self._image_set == 'test_2':\n lines = []\n else:\n filename = os.path.join(self._imagenet3d_path, 'Labels', index + '.txt')\n lines = []\n with open(filename) as f:\n for line in f:\n lines.append(line)\n\n num_objs = len(lines)\n\n boxes = np.zeros((num_objs, 4), dtype=np.float32)\n viewpoints = np.zeros((num_objs, 3), dtype=np.float32) # azimuth, elevation, in-plane rotation\n viewpoints_flipped = np.zeros((num_objs, 3), dtype=np.float32) # azimuth, elevation, in-plane rotation\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n\n for ix, line in enumerate(lines):\n words = line.split()\n assert len(words) == 5 or len(words) == 8, 'Wrong label format: {}'.format(index)\n cls = self._class_to_ind[words[0]]\n boxes[ix, :] = [float(n) for n in words[1:5]]\n gt_classes[ix] = cls\n overlaps[ix, cls] = 1.0\n if len(words) == 8:\n viewpoints[ix, :] = [float(n) for n in words[5:8]]\n # flip the viewpoint\n viewpoints_flipped[ix, 0] = -viewpoints[ix, 0] # azimuth\n viewpoints_flipped[ix, 1] = viewpoints[ix, 1] # elevation\n viewpoints_flipped[ix, 2] = -viewpoints[ix, 2] # in-plane rotation\n else:\n viewpoints[ix, :] = np.inf\n viewpoints_flipped[ix, :] = np.inf\n\n gt_subclasses = np.zeros((num_objs), dtype=np.int32)\n gt_subclasses_flipped = np.zeros((num_objs), dtype=np.int32)\n subindexes = np.zeros((num_objs, self.num_classes), dtype=np.int32)\n subindexes_flipped = np.zeros((num_objs, self.num_classes), dtype=np.int32)\n viewindexes_azimuth = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n viewindexes_azimuth_flipped = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n viewindexes_elevation = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n viewindexes_elevation_flipped = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n viewindexes_rotation = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n viewindexes_rotation_flipped = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n subindexes = scipy.sparse.csr_matrix(subindexes)\n subindexes_flipped = scipy.sparse.csr_matrix(subindexes_flipped)\n viewindexes_azimuth = scipy.sparse.csr_matrix(viewindexes_azimuth)\n viewindexes_azimuth_flipped = scipy.sparse.csr_matrix(viewindexes_azimuth_flipped)\n viewindexes_elevation = scipy.sparse.csr_matrix(viewindexes_elevation)\n viewindexes_elevation_flipped = scipy.sparse.csr_matrix(viewindexes_elevation_flipped)\n viewindexes_rotation = scipy.sparse.csr_matrix(viewindexes_rotation)\n viewindexes_rotation_flipped = scipy.sparse.csr_matrix(viewindexes_rotation_flipped)\n\n if cfg.IS_RPN:\n if cfg.IS_MULTISCALE:\n # compute overlaps between grid boxes and gt boxes in multi-scales\n # rescale the gt boxes\n boxes_all = np.zeros((0, 4), dtype=np.float32)\n for scale in cfg.TRAIN.SCALES:\n boxes_all = np.vstack((boxes_all, boxes * scale))\n gt_classes_all = np.tile(gt_classes, len(cfg.TRAIN.SCALES))\n\n # compute grid boxes\n s = PIL.Image.open(self.image_path_from_index(index)).size\n image_height = s[1]\n image_width = s[0]\n boxes_grid, _, _ = get_boxes_grid(image_height, image_width)\n\n # compute overlap\n overlaps_grid = bbox_overlaps(boxes_grid.astype(np.float), boxes_all.astype(np.float))\n \n # check how many gt boxes are covered by grids\n if num_objs != 0:\n index = np.tile(list(range(num_objs)), len(cfg.TRAIN.SCALES))\n max_overlaps = overlaps_grid.max(axis = 0)\n fg_inds = []\n for k in range(1, self.num_classes):\n fg_inds.extend(np.where((gt_classes_all == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])\n index_covered = np.unique(index[fg_inds])\n\n for i in range(self.num_classes):\n self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])\n self._num_boxes_covered[i] += len(np.where(gt_classes[index_covered] == i)[0])\n else:\n assert len(cfg.TRAIN.SCALES_BASE) == 1\n scale = cfg.TRAIN.SCALES_BASE[0]\n feat_stride = 16\n # faster rcnn region proposal\n base_size = 16\n ratios = cfg.TRAIN.RPN_ASPECTS\n scales = cfg.TRAIN.RPN_SCALES\n anchors = generate_anchors(base_size, ratios, scales)\n num_anchors = anchors.shape[0]\n\n # image size\n s = PIL.Image.open(self.image_path_from_index(index)).size\n image_height = s[1]\n image_width = s[0]\n\n # height and width of the heatmap\n height = np.round((image_height * scale - 1) / 4.0 + 1)\n height = np.floor((height - 1) / 2 + 1 + 0.5)\n height = np.floor((height - 1) / 2 + 1 + 0.5)\n\n width = np.round((image_width * scale - 1) / 4.0 + 1)\n width = np.floor((width - 1) / 2.0 + 1 + 0.5)\n width = np.floor((width - 1) / 2.0 + 1 + 0.5)\n\n # gt boxes\n gt_boxes = boxes * scale\n\n # 1. Generate proposals from bbox deltas and shifted anchors\n shift_x = np.arange(0, width) * feat_stride\n shift_y = np.arange(0, height) * feat_stride\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),\n shift_x.ravel(), shift_y.ravel())).transpose()\n # add A anchors (1, A, 4) to\n # cell K shifts (K, 1, 4) to get\n # shift anchors (K, A, 4)\n # reshape to (K*A, 4) shifted anchors\n A = num_anchors\n K = shifts.shape[0]\n all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))\n all_anchors = all_anchors.reshape((K * A, 4))\n\n # compute overlap\n overlaps_grid = bbox_overlaps(all_anchors.astype(np.float), gt_boxes.astype(np.float))\n \n # check how many gt boxes are covered by anchors\n if num_objs != 0:\n max_overlaps = overlaps_grid.max(axis = 0)\n fg_inds = []\n for k in range(1, self.num_classes):\n fg_inds.extend(np.where((gt_classes == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])\n\n for i in range(self.num_classes):\n self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])\n self._num_boxes_covered[i] += len(np.where(gt_classes[fg_inds] == i)[0])\n\n return {'boxes' : boxes,\n 'gt_classes': gt_classes,\n 'gt_viewpoints': viewpoints,\n 'gt_viewpoints_flipped': viewpoints_flipped,\n 'gt_viewindexes_azimuth': viewindexes_azimuth,\n 'gt_viewindexes_azimuth_flipped': viewindexes_azimuth_flipped,\n 'gt_viewindexes_elevation': viewindexes_elevation,\n 'gt_viewindexes_elevation_flipped': viewindexes_elevation_flipped,\n 'gt_viewindexes_rotation': viewindexes_rotation,\n 'gt_viewindexes_rotation_flipped': viewindexes_rotation_flipped,\n 'gt_subclasses': gt_subclasses,\n 'gt_subclasses_flipped': gt_subclasses_flipped,\n 'gt_overlaps' : overlaps,\n 'gt_subindexes': subindexes,\n 'gt_subindexes_flipped': subindexes_flipped,\n 'flipped' : False}\n\n\n def region_proposal_roidb(self):\n \"\"\"\n Return the database of regions of interest.\n Ground-truth ROIs are also included.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path,\n self.name + '_' + cfg.REGION_PROPOSAL + '_region_proposal_roidb.pkl')\n\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = pickle.load(fid)\n print(('{} roidb loaded from {}'.format(self.name, cache_file)))\n return roidb\n\n if self._image_set != 'test':\n gt_roidb = self.gt_roidb()\n\n print('Loading region proposal network boxes...')\n model = cfg.REGION_PROPOSAL\n rpn_roidb = self._load_rpn_roidb(gt_roidb, model)\n print('Region proposal network boxes loaded')\n roidb = datasets.imdb.merge_roidbs(rpn_roidb, gt_roidb)\n else:\n print('Loading region proposal network boxes...')\n model = cfg.REGION_PROPOSAL\n roidb = self._load_rpn_roidb(None, model)\n print('Region proposal network boxes loaded')\n\n print(('{} region proposals per image'.format(self._num_boxes_proposal / len(self.image_index))))\n\n # print out recall\n if self._image_set != 'test':\n for i in range(1, self.num_classes):\n print(('{}: Total number of boxes {:d}'.format(self.classes[i], self._num_boxes_all[i])))\n print(('{}: Number of boxes covered {:d}'.format(self.classes[i], self._num_boxes_covered[i])))\n if self._num_boxes_all[i] > 0:\n print(('{}: Recall {:f}'.format(self.classes[i], float(self._num_boxes_covered[i]) / float(self._num_boxes_all[i]))))\n\n with open(cache_file, 'wb') as fid:\n pickle.dump(roidb, fid, pickle.HIGHEST_PROTOCOL)\n print(('wrote roidb to {}'.format(cache_file)))\n\n return roidb\n\n def _load_rpn_roidb(self, gt_roidb, model):\n\n box_list = []\n for ix, index in enumerate(self.image_index):\n filename = os.path.join(self._imagenet3d_path, 'region_proposals', model, index + '.txt')\n assert os.path.exists(filename), \\\n '{} data not found at: {}'.format(model, filename)\n raw_data = np.loadtxt(filename, dtype=float)\n if len(raw_data.shape) == 1:\n if raw_data.size == 0:\n raw_data = raw_data.reshape((0, 5))\n else:\n raw_data = raw_data.reshape((1, 5))\n\n if model == 'selective_search' or model == 'mcg':\n x1 = raw_data[:, 1].copy()\n y1 = raw_data[:, 0].copy()\n x2 = raw_data[:, 3].copy()\n y2 = raw_data[:, 2].copy()\n elif model == 'edge_boxes':\n x1 = raw_data[:, 0].copy()\n y1 = raw_data[:, 1].copy()\n x2 = raw_data[:, 2].copy() + raw_data[:, 0].copy()\n y2 = raw_data[:, 3].copy() + raw_data[:, 1].copy()\n elif model == 'rpn_caffenet' or model == 'rpn_vgg16':\n x1 = raw_data[:, 0].copy()\n y1 = raw_data[:, 1].copy()\n x2 = raw_data[:, 2].copy()\n y2 = raw_data[:, 3].copy()\n else:\n assert 1, 'region proposal not supported: {}'.format(model)\n\n inds = np.where((x2 > x1) & (y2 > y1))[0]\n raw_data[:, 0] = x1\n raw_data[:, 1] = y1\n raw_data[:, 2] = x2\n raw_data[:, 3] = y2\n raw_data = raw_data[inds,:4]\n\n self._num_boxes_proposal += raw_data.shape[0]\n box_list.append(raw_data)\n print(('load {}: {}'.format(model, index)))\n\n if gt_roidb is not None:\n # compute overlaps between region proposals and gt boxes\n boxes = gt_roidb[ix]['boxes'].copy()\n gt_classes = gt_roidb[ix]['gt_classes'].copy()\n # compute overlap\n overlaps = bbox_overlaps(raw_data.astype(np.float), boxes.astype(np.float))\n # check how many gt boxes are covered by anchors\n if raw_data.shape[0] != 0:\n max_overlaps = overlaps.max(axis = 0)\n fg_inds = []\n for k in range(1, self.num_classes):\n fg_inds.extend(np.where((gt_classes == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])\n\n for i in range(self.num_classes):\n self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])\n self._num_boxes_covered[i] += len(np.where(gt_classes[fg_inds] == i)[0])\n\n return self.create_roidb_from_box_list(box_list, gt_roidb)\n\n\n def evaluate_detections(self, all_boxes, output_dir):\n\n # for each image\n for im_ind, index in enumerate(self.image_index):\n filename = os.path.join(output_dir, index + '.txt')\n print(('Writing imagenet3d results to file ' + filename))\n with open(filename, 'wt') as f:\n # for each class\n for cls_ind, cls in enumerate(self.classes):\n if cls == '__background__':\n continue\n dets = all_boxes[cls_ind][im_ind]\n if dets == []:\n continue\n # detection and viewpoint\n for k in range(dets.shape[0]):\n f.write('{:s} {:f} {:f} {:f} {:f} {:.32f} {:f} {:f} {:f}\\n'.format(\\\n cls, dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4], dets[k, 6], dets[k, 7], dets[k, 8]))\n\n # write detection results into one file\n def evaluate_detections_one_file(self, all_boxes, output_dir):\n\n # for each class\n for cls_ind, cls in enumerate(self.classes):\n if cls == '__background__':\n continue\n # open results file\n filename = os.path.join(output_dir, 'detections_{}.txt'.format(cls))\n print(('Writing imagenet3d results to file ' + filename))\n with open(filename, 'wt') as f:\n # for each image\n for im_ind, index in enumerate(self.image_index):\n dets = all_boxes[cls_ind][im_ind]\n if dets == []:\n continue\n # detection and viewpoint\n for k in range(dets.shape[0]):\n f.write('{:s} {:f} {:f} {:f} {:f} {:.32f} {:f} {:f} {:f}\\n'.format(\\\n index, dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4], dets[k, 6], dets[k, 7], dets[k, 8]))\n\n def evaluate_proposals(self, all_boxes, output_dir):\n # for each image\n for im_ind, index in enumerate(self.image_index):\n filename = os.path.join(output_dir, index + '.txt')\n print(('Writing imagenet3d results to file ' + filename))\n with open(filename, 'wt') as f:\n # for each class\n for cls_ind, cls in enumerate(self.classes):\n if cls == '__background__':\n continue\n dets = all_boxes[cls_ind][im_ind]\n if dets == []:\n continue\n for k in range(dets.shape[0]):\n f.write('{:f} {:f} {:f} {:f} {:.32f}\\n'.format(\\\n dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))\n\n def evaluate_proposals_msr(self, all_boxes, output_dir):\n # for each image\n for im_ind, index in enumerate(self.image_index):\n filename = os.path.join(output_dir, index + '.txt')\n print(('Writing imagenet3d results to file ' + filename))\n with open(filename, 'wt') as f:\n dets = all_boxes[im_ind]\n if dets == []:\n continue\n for k in range(dets.shape[0]):\n f.write('{:f} {:f} {:f} {:f} {:.32f}\\n'.format(dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))\n\n\nif __name__ == '__main__':\n d = datasets.imagenet3d('trainval')\n res = d.roidb\n from IPython import embed; embed()\n"
] |
[
[
"numpy.unique",
"numpy.arange",
"numpy.vstack",
"numpy.round",
"numpy.floor",
"numpy.meshgrid",
"numpy.zeros",
"numpy.where",
"numpy.loadtxt"
]
] |
hirune924/kaggle-G2Net
|
[
"e1b8d39a04411435c1b7b93a67cc370594f8a7ab"
] |
[
"baseline_3_fixnorm_randwin_resize_filt.py"
] |
[
"####################\n# Import Libraries\n####################\nimport os\nimport sys\nfrom PIL import Image\nimport cv2\nimport numpy as np\nimport pandas as pd\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.metrics import Accuracy\nfrom pytorch_lightning import loggers\nfrom pytorch_lightning import seed_everything\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn import model_selection\nimport albumentations as A\nimport timm\nfrom omegaconf import OmegaConf\n\nfrom sklearn.metrics import roc_auc_score\nfrom nnAudio.Spectrogram import CQT1992v2, CQT2010v2\nfrom scipy import signal\nimport random\n\nimport pycbc\nfrom pycbc.filter import highpass_fir, lowpass_fir\nfrom scipy import signal\n####################\n# Utils\n####################\ndef get_score(y_true, y_pred):\n try:\n score = roc_auc_score(y_true, y_pred)\n except:\n score = 0.0\n return score\n\ndef load_pytorch_model(ckpt_name, model, ignore_suffix='model'):\n state_dict = torch.load(ckpt_name, map_location='cpu')[\"state_dict\"]\n new_state_dict = {}\n for k, v in state_dict.items():\n name = k\n if name.startswith(str(ignore_suffix)+\".\"):\n name = name.replace(str(ignore_suffix)+\".\", \"\", 1) # remove `model.`\n new_state_dict[name] = v\n model.load_state_dict(new_state_dict, strict=False)\n return model\n\ndef filt(waves):\n #window = signal.tukey(4096,0.1)\n waves = [pycbc.filter.resample.highpass_fir(pycbc.types.TimeSeries(w, epoch=0, delta_t=1.0/2048), frequency=20, order=100) for w in waves]\n waves = [pycbc.filter.resample.notch_fir(w, f1=30, f2=80, order=10, beta=5) for w in waves]\n waves = [pycbc.filter.resample.lowpass_fir(w, frequency=512, order=5) for w in waves]\n waves = np.array([np.array(w) for w in waves])\n #waves = np.array([np.array(w)*window for w in waves])\n return waves\n####################\n# Config\n####################\n\nconf_dict = {'batch_size': 8,#32, \n 'epoch': 30,\n 'height': 256,#640,\n 'width': 256,\n 'model_name': 'efficientnet_b0',\n 'lr': 0.001,\n 'fold': 0,\n 'drop_rate': 0.2,\n 'drop_path_rate': 0.2,\n 'data_dir': '../input/g2net-gravitational-wave-detection/',\n 'model_path': None,\n 'output_dir': './',\n 'pseudo': None,\n 'seed': 2021,\n 'trainer': {}}\nconf_base = OmegaConf.create(conf_dict)\n\n####################\n# Dataset\n####################\n\nclass G2NetDataset(Dataset):\n def __init__(self, df, transform=None, conf=None, train=True):\n self.df = df.reset_index(drop=True)\n self.dir_names = df['dir'].values\n self.labels = df['target'].values\n self.wave_transform = [\n CQT1992v2(sr=2048, fmin=10, fmax=1024, hop_length=8, bins_per_octave=8, window='flattop'),\n CQT1992v2(sr=2048, fmin=10, fmax=1024, hop_length=8, bins_per_octave=8, window='blackmanharris'),\n CQT1992v2(sr=2048, fmin=10, fmax=1024, hop_length=8, bins_per_octave=8, window='nuttall')]\n #self.wave_transform = CQT1992v2(sr=2048, fmin=10, fmax=1024, hop_length=8, bins_per_octave=8, window='flattop')\n #self.wave_transform = CQT1992v2(sr=2048, fmin=20, fmax=1024, hop_length=1, bins_per_octave=14, window='flattop')\n #self.wave_transform = CQT2010v2(sr=2048, fmin=10, fmax=1024, hop_length=32, n_bins=32, bins_per_octave=8, window='flattop')\n self.stat = [\n [0.013205823003608798,0.037445450696502146],\n [0.009606230606511236,0.02489221471650526], # 10000 sample\n [0.009523397709568962,0.024628402379527688],] # 10000 sample\n # hop lengthは変えてみたほうが良いかも\n self.transform = transform\n self.conf = conf\n self.train = train\n \n def __len__(self):\n return len(self.df)\n \n def apply_qtransform(self, waves, transform):\n #print(waves.shape)\n #waves = np.hstack(waves)\n #print(np.max(np.abs(waves), axis=1))\n #waves = waves / np.max(np.abs(waves), axis=1, keepdims=True)\n #waves = waves / np.max(waves)\n waves = waves / 4.6152116213830774e-20\n waves = torch.from_numpy(waves).float()\n image = transform(waves)\n return image\n\n def __getitem__(self, idx):\n img_id = self.df.loc[idx, 'id']\n file_path = os.path.join(self.dir_names[idx],\"{}/{}/{}/{}.npy\".format(img_id[0], img_id[1], img_id[2], img_id))\n waves1 = np.load(file_path)\n waves1 = filt(waves1)\n label1 = torch.tensor([self.labels[idx]]).float()\n\n\n if self.train:\n if torch.rand(1) < 0.50:\n indx = torch.randint(0,len(self.df),[1]).numpy()[0]\n img_id = self.df.loc[indx, 'id']\n file_path = os.path.join(self.dir_names[indx],\"{}/{}/{}/{}.npy\".format(img_id[0], img_id[1], img_id[2], img_id))\n waves2 = np.load(file_path)\n waves2 = filt(waves2)\n label2 = torch.tensor([self.labels[indx]]).float()\n\n #alpha = 32.0\n #lam = np.random.beta(alpha, alpha)\n #waves = waves1 * lam + waves2 * (1-lam)\n waves = waves1 + waves2\n label = label1 + label2 - (label1*label2)\n else:\n waves = waves1\n label = label1\n\n if torch.rand(1) < 0.50:\n waves = np.roll(waves, np.random.randint(waves.shape[1]), axis=1)\n\n else:\n waves = waves1\n label = label1\n\n\n #bHP, aHP = signal.butter(1, (20,750), btype='bandpass', fs=2024)\n #waves = np.array([signal.filtfilt(bHP, aHP, w) for w in waves])\n\n if self.train:\n trans_id = random.choice([0,1,2])\n image = self.apply_qtransform(waves, self.wave_transform[trans_id])\n image = (image - self.stat[trans_id][0])/self.stat[trans_id][1]\n else:\n image = self.apply_qtransform(waves, self.wave_transform[0])\n image = (image - self.stat[0][0])/self.stat[0][1]\n \n image = image.squeeze().numpy().transpose(1,2,0)\n\n image = cv2.vconcat([image[:,:,0],image[:,:,1],image[:,:,2]])\n\n #image = (image-np.mean(image, axis=(0,1),keepdims=True))/np.std(image, axis=(0,1),keepdims=True)\n #image = (image-np.mean(image, axis=1,keepdims=True))/np.std(image, axis=1,keepdims=True)\n #image = (image-np.mean(image))/np.std(image)\n #image = (image-0.013205823003608798)/0.037445450696502146\n\n #img_pl = Image.fromarray(image).resize((self.conf.height, self.conf.width), resample=Image.BICUBIC)\n #image = np.array(img_pl)\n image = cv2.resize(image, (self.conf.width, self.conf.height), interpolation=cv2.INTER_CUBIC)\n\n if self.transform is not None:\n image = self.transform(image=image)['image']\n #image = torch.from_numpy(image.transpose(2,0,1))#.unsqueeze(dim=0)\n image = torch.from_numpy(image).unsqueeze(dim=0)\n\n return image, label\n \n####################\n# Data Module\n####################\n\nclass SETIDataModule(pl.LightningDataModule):\n\n def __init__(self, conf):\n super().__init__()\n self.conf = conf \n\n # OPTIONAL, called only on 1 GPU/machine(for download or tokenize)\n def prepare_data(self):\n pass\n\n # OPTIONAL, called for every GPU/machine\n def setup(self, stage=None):\n if stage == 'fit':\n df = pd.read_csv(os.path.join(self.conf.data_dir, \"training_labels.csv\"))\n df['dir'] = os.path.join(self.conf.data_dir, \"train\")\n \n # cv split\n skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=self.conf.seed)\n for n, (train_index, val_index) in enumerate(skf.split(df, df['target'])):\n df.loc[val_index, 'fold'] = int(n)\n df['fold'] = df['fold'].astype(int)\n \n train_df = df[df['fold'] != self.conf.fold]\n valid_df = df[df['fold'] == self.conf.fold]\n\n if self.conf.pseudo is not None:\n pseudo_df = pd.read_csv(self.conf.pseudo)\n #pseudo_df = pseudo_df[(pseudo_df['target']<0.05)|(pseudo_df['target']>0.95)]\n\n pseudo_df['dir'] = os.path.join(self.conf.data_dir, \"test\")\n\n train_df = pd.concat([train_df, pseudo_df])\n \n train_transform = A.Compose([\n #A.Resize(height=self.conf.high, width=self.conf.width, interpolation=1), \n #A.Flip(p=0.5),\n #A.VerticalFlip(p=0.5),\n #A.HorizontalFlip(p=0.5),\n #A.ShiftScaleRotate(p=0.5),\n #A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, p=0.7),\n #A.RandomBrightnessContrast(brightness_limit=(-0.2,0.2), contrast_limit=(-0.2, 0.2), p=0.7),\n #A.CLAHE(clip_limit=(1,4), p=0.5),\n #A.OneOf([\n # A.OpticalDistortion(distort_limit=1.0),\n # A.GridDistortion(num_steps=5, distort_limit=1.),\n # A.ElasticTransform(alpha=3),\n #], p=0.20),\n #A.OneOf([\n # A.GaussNoise(var_limit=[10, 50]),\n # A.GaussianBlur(),\n # A.MotionBlur(),\n # A.MedianBlur(),\n #], p=0.20),\n #A.Resize(size, size),\n #A.OneOf([\n # A.JpegCompression(quality_lower=95, quality_upper=100, p=0.50),\n # A.Downscale(scale_min=0.75, scale_max=0.95),\n #], p=0.2),\n #A.IAAPiecewiseAffine(p=0.2),\n #A.IAASharpen(p=0.2),\n A.Cutout(max_h_size=int(self.conf.height * 0.1), max_w_size=int(self.conf.width * 0.1), num_holes=5, p=0.5),\n #A.Normalize()\n ])\n\n #valid_transform = A.Compose([\n # A.Resize(height=self.conf.high, width=self.conf.width, interpolation=1), \n # #A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, always_apply=False, p=1.0)\n # ])\n\n #self.train_dataset = G2NetDataset(train_df, transform=train_transform,conf=self.conf)\n self.train_dataset = G2NetDataset(train_df, transform=None,conf=self.conf, train=True)\n self.valid_dataset = G2NetDataset(valid_df, transform=None, conf=self.conf, train=False)\n \n #elif stage == 'test':\n # test_df = pd.read_csv(os.path.join(self.conf.data_dir, \"sample_submission.csv\"))\n # test_df['dir'] = os.path.join(self.conf.data_dir, \"test\")\n # test_transform = A.Compose([\n # A.Resize(height=self.conf.height, width=self.conf.width, interpolation=1, always_apply=False, p=1.0),\n # #A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, always_apply=False, p=1.0)\n # ])\n # self.test_dataset = G2NetDataset(test_df, transform=test_transform, conf=self.conf)\n \n def train_dataloader(self):\n return DataLoader(self.train_dataset, batch_size=self.conf.batch_size, num_workers=4*4, shuffle=True, pin_memory=True, drop_last=True)\n\n def val_dataloader(self):\n return DataLoader(self.valid_dataset, batch_size=self.conf.batch_size, num_workers=4*4, shuffle=False, pin_memory=True, drop_last=True)\n\n def test_dataloader(self):\n return DataLoader(self.test_dataset, batch_size=self.conf.batch_size, num_workers=4*4, shuffle=False, pin_memory=True, drop_last=False)\n \n####################\n# Lightning Module\n####################\n\nclass LitSystem(pl.LightningModule):\n def __init__(self, conf):\n super().__init__()\n #self.conf = conf\n self.save_hyperparameters(conf)\n self.model = timm.create_model(model_name=self.hparams.model_name, num_classes=1, pretrained=True, in_chans=1,\n drop_rate=self.hparams.drop_rate, drop_path_rate=self.hparams.drop_path_rate)\n if self.hparams.model_path is not None:\n print(f'load model path: {self.hparams.model_path}')\n self.model = load_pytorch_model(self.hparams.model_path, self.model, ignore_suffix='model')\n self.criteria = torch.nn.BCEWithLogitsLoss()\n\n def forward(self, x):\n # use forward for inference/predictions\n return self.model(x)\n\n def configure_optimizers(self):\n\n optimizer = torch.optim.Adam(self.model.parameters(), lr=self.hparams.lr)\n\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=self.hparams.epoch)\n \n return [optimizer], [scheduler]\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n \n '''\n if self.current_epoch < self.hparams.epoch*0.8:\n # mixup\n alpha = 1.0\n lam = np.random.beta(alpha, alpha)\n batch_size = x.size()[0]\n index = torch.randperm(batch_size)\n x = lam * x + (1 - lam) * x[index, :]\n y = lam * y + (1 - lam) * y[index]\n #y = y + y[index] - (y * y[index])\n '''\n \n y_hat = self.model(x)\n loss = self.criteria(y_hat, y)\n \n return loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self.model(x)\n loss = self.criteria(y_hat, y)\n \n return {\n \"val_loss\": loss,\n \"y\": y,\n \"y_hat\": y_hat\n }\n \n def validation_epoch_end(self, outputs):\n avg_val_loss = torch.stack([x[\"val_loss\"] for x in outputs]).mean()\n y = torch.cat([x[\"y\"] for x in outputs]).cpu().detach().numpy()\n y_hat = torch.cat([x[\"y_hat\"] for x in outputs]).cpu().detach().numpy()\n\n #preds = np.argmax(y_hat, axis=1)\n\n val_score = get_score(y, y_hat)\n\n self.log('avg_val_loss', avg_val_loss)\n self.log('val_score', val_score)\n\n \n####################\n# Train\n#################### \ndef main():\n conf_cli = OmegaConf.from_cli()\n conf = OmegaConf.merge(conf_base, conf_cli)\n print(OmegaConf.to_yaml(conf))\n seed_everything(conf.seed)\n\n tb_logger = loggers.TensorBoardLogger(save_dir=os.path.join(conf.output_dir, 'tb_log/'))\n csv_logger = loggers.CSVLogger(save_dir=os.path.join(conf.output_dir, 'csv_log/'))\n\n lr_monitor = LearningRateMonitor(logging_interval='step')\n checkpoint_callback = ModelCheckpoint(dirpath=os.path.join(conf.output_dir, 'ckpt/'), monitor='val_score', \n save_last=True, save_top_k=5, mode='max', \n save_weights_only=True, filename=f'fold{conf.fold}-'+'{epoch}-{val_score:.5f}')\n\n data_module = SETIDataModule(conf)\n\n lit_model = LitSystem(conf)\n\n trainer = Trainer(\n logger=[tb_logger, csv_logger],\n callbacks=[lr_monitor, checkpoint_callback],\n max_epochs=conf.epoch,\n gpus=-1,\n #amp_backend='native',\n #amp_level='O2',\n #precision=16,\n num_sanity_val_steps=10,\n val_check_interval=1.0,\n #sync_batchnorm=True,\n **conf.trainer\n )\n\n trainer.fit(lit_model, data_module)\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"sklearn.metrics.roc_auc_score",
"pandas.concat",
"pandas.read_csv",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.load",
"torch.cat",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"sklearn.model_selection.StratifiedKFold",
"torch.tensor",
"torch.nn.BCEWithLogitsLoss",
"torch.rand",
"torch.stack",
"numpy.load",
"numpy.array",
"numpy.random.randint"
]
] |
biemann/tropical_precooling_environment
|
[
"df5f55544e09c5ad6bb29f8c3da6c649b057aba6"
] |
[
"tropical_precooling/env.py"
] |
[
"import os\n\nimport gym\nimport numpy as np\nimport pandas as pd\n\nfrom tropical_precooling.reward import dummy_reward, dummy_comfort_reward\n\n\nclass TropicalPrecooling(gym.Env):\n\n def __init__(self):\n \"\"\"\n load measurements and parameters for env simulation.\n \"\"\"\n env_path = os.path.dirname(__file__)\n measured_data_fnp = os.path.join(\n env_path,\n \"data\",\n \"building_measurements.csv\"\n )\n building_parameters_fnp = os.path.join(\n env_path,\n \"data\",\n \"building_parameters.csv\"\n )\n self.measured_data = pd.read_csv(\n measured_data_fnp,\n index_col=0,\n parse_dates=True,\n )\n self.building_parameters = pd.read_csv(\n building_parameters_fnp,\n index_col=0,\n parse_dates=True,\n )\n\n # Comfort bounds as defined in the paper.\n # in 156 5 minutes slots, 0:36 is the time between 4am and 7am.\n self.T_min_comfort = np.zeros(156)\n self.T_min_comfort[:36] = 25\n self.T_min_comfort[36:] = 23\n\n self.T_max_comfort = np.zeros(156)\n self.T_max_comfort[:36] = 29\n self.T_max_comfort[36:] = 25\n\n # Electricity prices in $/kWh as defined in the paper.\n # Off-peak rates apply between 4am and 7am.\n self.e = np.zeros(156)\n self.e[:36] = 9.78\n self.e[36:] = 24.48\n\n # This is baseline strategy used for computing the performance\n # measure, 27°C for 4am .. 7am and 23.5°C therafter.\n self.T_zSP_baseline = np.zeros(156)\n self.T_zSP_baseline[:36] = None\n self.T_zSP_baseline[36:] = 23.5\n\n # Define which of the days for which we have data should be used\n # for training and which for testing. Check that we only use\n # those dates which are available in both files (which should be the\n # case for all entries of both files).\n dates_mdata = set(self.measured_data.index.date)\n dates_bparams = set(self.building_parameters.index.date)\n all_dates = sorted(dates_mdata.intersection(dates_bparams))\n self.train_dates = [d for d in all_dates if d.month >= 7]\n self.test_dates = [d for d in all_dates if d.month < 7]\n\n # Some objects to store outputs of step as this is required\n # to compute the performance measure later.\n self.simulated_dates = []\n self.test_actions = []\n self.test_obs = []\n\n self.low_action = 0\n self.high_action = 50\n\n self.action_dim = 2\n\n self.action_space = gym.spaces.Box(low=-1 * np.ones(self.action_dim), high=1 * np.ones(self.action_dim))\n self.observation_space = gym.spaces.Box(low=-1000, high=1500, shape=(4 * 156,))\n\n def simulate_day(self, simulated_date, T_zSP):\n \"\"\"\n Simulate the zone temperature for one day.\n\n This starts with the temperature measured at the real building at 4am\n and computes change of temperatue within the time step length of 5\n minutes, by applying equations (3), (4) and (5) from the paper.\n This is repeated until the full day horizon is simulated.\n\n Parameters\n ----------\n simulated_date : datetime.date\n The date of the day that is simulated. Used to lookup parameters\n and measurements for the equations.\n T_zSP : numpy array with shape (156,)\n Temperature setpoints for every 5 minute slot between 4am and 5pm.\n\n Returns\n -------\n T_z : numpy array with shape (156,)\n The zone temperature of the simulated day.\n \"\"\"\n # Extract the building paramters for this day.\n # The .values prevents that the computed values are casted to\n # pandas data types.\n day_selector = self.building_parameters.index.date == simulated_date\n bparams_day = self.building_parameters.loc[day_selector]\n k_a = bparams_day[\"k_a\"].values\n k_o1 = bparams_day[\"k_o1\"].values\n k_o2 = bparams_day[\"k_o2\"].values\n m_so = bparams_day[\"m_so\"].values\n k_c = bparams_day[\"k_c\"].values\n c_pa = bparams_day[\"c_pa\"].values\n C_z = bparams_day[\"C_z\"].values\n\n # This is the measured data of the simulated day, a pandas df.\n day_selector = self.measured_data.index.date == simulated_date\n mdata_day = self.measured_data.iloc[day_selector]\n\n # Our container to store the zone temperature.\n T_z = []\n\n # The measured zone temperature, valid from 04:00:00\n # All other values above are 1d arrays, this has to be an\n # array to to allow building the final array for T_z\n T_z_t = np.asarray([mdata_day.iloc[0][\"Zone temperature\"]])\n\n # Make some arrangements to make the notiations below follow\n # the notation in equations given in the paper.\n sim_data = pd.DataFrame(index=mdata_day.index)\n sim_data[\"T_zSP_t\"] = T_zSP\n sim_data[\"T_s_t\"] = mdata_day[\"Supply air temp\"]\n sim_data[\"T_a_t\"] = mdata_day[\"Outside air temperature\"]\n sim_data[\"theta_CO2_t\"] = mdata_day[\"CO2\"]\n\n # Iterate over rows of sim_data to conveniently get the values\n # for each of the 5 minute blocks.\n for i, row in sim_data.iterrows():\n T_zSP_t = row[\"T_zSP_t\"]\n T_s_t = row[\"T_s_t\"]\n T_a_t = row[\"T_a_t\"]\n theta_CO2_t = row[\"theta_CO2_t\"]\n\n # Store the current zone temperature first ...\n T_z.append(T_z_t)\n\n # ... and now compute the delta for the zone temperature of the\n # next timestep.\n #\n m_s_t = np.maximum(0, m_so + k_c * (T_z_t - T_zSP_t)) # (5)\n\n # m_s_t = m_so + k_c * (T_z_t - T_zSP_t)\n Q_cooling_t = c_pa * (m_s_t * (-1) *np.abs(T_s_t - T_z_t)) # (4)\n\n # Now cooling/heating if AC is switched of.\n if np.isnan(Q_cooling_t):\n Q_cooling_t = 0\n\n # (5)\n dT_dt = k_a * (T_a_t - T_z_t)\n dT_dt += k_o1 * theta_CO2_t + k_o2\n dT_dt += Q_cooling_t\n dT_dt /= C_z\n dT = dT_dt * 300 # 5 Minutes step length\n\n T_z_t = T_z_t + dT\n\n # After asarray T_z has shape (156, 1) we want (156) and flatten thus\n T_z = np.asarray(T_z).flatten()\n return np.asarray(T_z)\n\n def compute_obs(self, current_date, next_date, norm_T_zSP):\n \"\"\"\n Generate the content for obs.\n\n First compute the zone temperature for the current date (which will be\n the previous day for the agent as it would receive this data after day\n has ended). Then look up / compute the remaining data.\n\n Parameters\n ----------\n current_date : datetime.date\n The date that is used to load the data for obs content 0 to 4.\n next_date : datetime.date\n The date that is used to load the data for obs content 5 and 6.\n T_zSP : numpy array with shape (156,)\n Temperature setpoints for every 5 minute slot between 4am and 5pm.\n\n Returns\n -------\n obs : numpy array with shape (7, 156)\n Observed data from the simulated building, each quantitiy for\n every 5 minute slot between 4am and 5pm, i.e. 156 values per\n quantity. These are:\n 0: The zone temperature of the previous day in °C\n 1: The supply air temperature of the previous day in °C.\n 2: The ambient temperature of the previous day in °C.\n 3: The CO_2 values of the previous day in ppm.\n 4: The energy costs of the previous day in $.\n 5: The perfect ambient temperature forecast for the current\n day in °C.\n 6: The electricity costs for the current day in cents/kWh.\n\n \"\"\"\n\n T_zSP = self.low_action + (norm_T_zSP + 1.) * 0.5 * (self.high_action - self.low_action)\n augmentedT_zSP = []\n if not np.isnan(T_zSP).any():\n for i in range(self.action_dim):\n for j in range(int(156 / self.action_dim)):\n augmentedT_zSP.append(T_zSP[i])\n\n else:\n augmentedT_zSP = T_zSP\n\n # 0: The zone temperature of the previous day in °C\n T_z = self.simulate_day(\n simulated_date=current_date,\n T_zSP=augmentedT_zSP,\n )\n\n # 1, 2, 3: Retrieve the values that are just loaded from measured data.\n # This is the measured data of the simulated day, a pandas df.\n day_selector = self.measured_data.index.date == current_date\n mdata_day = self.measured_data.iloc[day_selector]\n T_s = mdata_day[\"Supply air temp\"]\n T_a = mdata_day[\"Outside air temperature\"]\n theta_CO2 = mdata_day[\"CO2\"]\n\n # 4: The energy costs of the previous day in $.\n E = self.estimate_energy_costs(\n T_z=T_z,\n T_zSP=augmentedT_zSP,\n e=self.e,\n simulated_date=current_date,\n )\n\n # 5: The perfect ambient temperature forecast for the current day in °C.\n next_day_selector = self.measured_data.index.date == next_date\n mdata_next_day = self.measured_data.iloc[next_day_selector]\n T_a_next_day = mdata_next_day[\"Outside air temperature\"]\n\n # 6: The electricity costs for the current day in cents/kWh.\n # These never change.\n e_next_day = self.e\n\n obs = np.asarray([\n T_z,\n T_s,\n T_a,\n theta_CO2,\n E,\n T_a_next_day,\n e_next_day\n ])\n return obs\n\n def get_training_data(self):\n \"\"\"\n Returns the training data, i.e. the the (baseline) actions and\n corresponding observations.\n\n Returns\n -------\n training_actions : list of numpy arrays with shape (156,)\n The actions that have been taken by the baseline agent.\n training_obs : list of numpy arrays with shape (7, 156)\n The observations that have resulted from the actions.\n See step method for details about the content of obs objects.\n \"\"\"\n training_actions = []\n training_obs = []\n for i in range(0, len(self.train_dates) - 1):\n current_date = self.train_dates[i]\n next_date = self.train_dates[i + 1]\n\n T_zSP = self.T_zSP_baseline\n obs = self.compute_obs(\n current_date=current_date,\n next_date=next_date,\n norm_T_zSP=T_zSP,\n )\n\n training_actions.append(T_zSP)\n training_obs.append(obs)\n\n return training_actions, training_obs\n\n def step(self, actions):\n \"\"\"\n Simulate one day of building operation.\n\n Parameters\n ----------\n actions : numpy array with shape (156,)\n Temperature setpoints for every 5 minute slot between 4am and 5pm.\n The actual building doesn't support setpoints below 13°C. Setpoints\n can also be set to None which is interpreted as AC off.\n\n Returns\n -------\n obs : numpy array with shape (7, 156)\n Observed data from the simulated building, each quantitiy for\n every 5 minute slot between 4am and 5pm, i.e. 156 values per\n quantity. These are:\n 0: The zone temperature of the previous day in °C\n 1: The supply air temperature of the previous day in °C.\n 2: The ambient temperature of the previous day in °C.\n 3: The CO_2 values of the previous day in ppm.\n 4: The energy costs of the previous day in $.\n 5: The perfect ambient temperature forecast for the current\n day in °C.\n 6: The electricity costs for the current day in cents/kWh.\n\n reward : None\n This environment emits no reward, as the building doesn't emit one\n either. This field is kept for consistency with OpenAI gym\n conventions.\n done : bool\n True after the last day has been simulated.\n info : dict\n Always an empty dict as no additional information are provided\n for the user of the environment. This field is kept for consistency\n with OpenAI gym conventions.\n \"\"\"\n\n done = False\n info = {}\n\n # Determine the date of the current day and also check if this is the\n # last day that is simulated.\n current_date = self.current_step_date\n index_current_date = self.test_dates.index(current_date)\n if index_current_date + 2 == len(self.test_dates):\n done = True\n elif index_current_date + 2 > len(self.test_dates):\n raise RuntimeError(\"Environment is done already.\")\n next_date = self.test_dates[index_current_date + 1]\n\n obs = self.compute_obs(\n current_date=current_date,\n next_date=next_date,\n norm_T_zSP=actions,\n )\n\n simp_obs = [obs[0], obs[2], obs[4], obs[6]]\n flat_obs = [obs for sub_obs in simp_obs for obs in sub_obs]\n reward = dummy_reward(simp_obs, actions)\n\n # Store the actions and obs as these are required to compute the\n # performance measure later\n self.simulated_dates.append(current_date)\n self.test_actions.append(actions)\n self.test_obs.append(obs)\n\n # Increment so next call to step advances in time.\n self.current_step_date = next_date\n\n return flat_obs, reward, done, info\n\n def reset(self):\n \"\"\"\n Reset and init the environment.\n\n Returns obs for one day following the baseline strategy. Although\n most of this information will not be of worth for the agent, it has\n the advantage that the obs format stays consistent.\n\n This function also erases the recorded values that might have been\n stored while the agent has interacted with the step function.\n\n Returns\n -------\n obs : numpy array with shape (7, 156)\n Observed data from the simulated building, each quantitiy for\n every 5 minute slot between 4am and 5pm, i.e. 156 values per\n quantity. These are:\n 0: The zone temperature of the previous day in °C\n 1: The supply air temperature of the previous day in °C.\n 2: The ambient temperature of the previous day in °C.\n 3: The CO_2 values of the previous day in ppm.\n 4: The energy costs of the previous day in $.\n 5: The perfect ambient temperature forecast for the current\n day in °C.\n 6: The electricity costs for the current day in cents/kWh.\n\n \"\"\"\n self.simulated_dates = []\n self.test_actions = []\n self.test_obs = []\n\n current_date = self.test_dates[0]\n next_date = self.test_dates[1]\n\n T_zSP = self.T_zSP_baseline\n n_T_zSP = -1 + (T_zSP - self.low_action) * 2 / (self.high_action - self.low_action)\n obs = self.compute_obs(\n current_date=current_date,\n next_date=next_date,\n norm_T_zSP=n_T_zSP,\n )\n\n self.current_step_date = next_date\n simp_obs = [obs[0], obs[2], obs[4], obs[6]]\n flat_obs = [obs for sub_obs in simp_obs for obs in sub_obs]\n\n return flat_obs\n\n def compute_performance_measure(self):\n \"\"\"\n Compute performance measure as in equation (7) in the paper.\n\n This loads the recorded data about actions and obs generated by\n the evaluated agent automatically.\n\n Returns\n -------\n performance_measure : float\n \"\"\"\n performance_measure = 0\n\n # Zone temperatures, Energy costs and PMV for the canidate algorthm,\n # These are arrays with shape (len(self.simulated_dates), 156).\n T_z_ca = np.asarray([a[0] for a in self.test_obs])\n E_ca = np.asarray([a[4] for a in self.test_obs])\n PMV_ca = self.estimate_pmv(\n T_z=T_z_ca,\n T_min_comfort=self.T_min_comfort,\n T_max_comfort=self.T_max_comfort,\n )\n\n # Now compute the corresponding values for the baseline, this is most\n # conveniently done by simulating the test phase with following the\n # baseline strategy.\n env_bl = TropicalPrecooling()\n done = False\n _ = env_bl.reset()\n while not done:\n norm_action = -1 + (env_bl.T_zSP_baseline - self.low_action) * 2 / (self.high_action - self.low_action)\n _, _, done, _ = env_bl.step(actions=norm_action)\n\n T_z_bl = np.asarray([a[0] for a in env_bl.test_obs])\n E_bl = np.asarray([a[4] for a in env_bl.test_obs])\n PMV_bl = self.estimate_pmv(\n T_z=T_z_bl,\n T_min_comfort=self.T_min_comfort,\n T_max_comfort=self.T_max_comfort,\n )\n\n # Apply equation (7)\n performance_measure = 1\n performance_measure -= 0.5 * E_ca.sum() / E_bl.sum()\n performance_measure -= 0.5 * abs(PMV_ca).sum() / abs(PMV_bl).sum()\n\n return performance_measure\n\n def estimate_energy_costs(self, T_z, T_zSP, e, simulated_date):\n \"\"\"\n Compute the estimated energy costs based on equation (6) from the paper.\n\n Q_cooling_t has already been computed in self.simulate_day. However,\n this method should also work for cases where the zone temperature has\n been measured, especially to compute the performance measure.\n\n Parameters\n ----------\n T_z : numpy array with shape (156,)\n The zone temperature for one or several days.\n T_zSP : numpy array with shape like T_z\n The zone setpoint temperature aka. actions.\n e : float or numpy array with shape (156,)\n The electricity prices for every 5 minute slot in $/kWh.\n simulated_date : datetime.date\n The date of the day that is simulated. Used to lookup parameters\n and measurements for the equations.\n\n Returns\n -------\n E : numpy array with shape of T_z\n The energy costs of AC operation for every 5 minute slot.\n \"\"\"\n # Extract the required building paramters for this day.\n day_selector = self.building_parameters.index.date == simulated_date\n bparams_day = self.building_parameters.loc[day_selector]\n m_so = bparams_day[\"m_so\"].values\n k_c = bparams_day[\"k_c\"].values\n c_pa = bparams_day[\"c_pa\"].values\n COP = bparams_day[\"COP\"].values\n\n # Get the supply air temperature from measurements.\n day_selector = self.measured_data.index.date == simulated_date\n T_s = self.measured_data.iloc[day_selector][\"Supply air temp\"].values\n\n # The variables have no trailing _t (reresenting the (t) in the\n # equations as these are arrays that hold may of these variables.\n m_s = np.maximum(0, m_so + k_c * (T_z - T_zSP)) # (5)\n # m_s = m_so + k_c * (T_z - T_zSP)\n Q_cooling = c_pa * (m_s * (T_s - T_z)) # (4)\n\n # Set cooling power to zero if AC was off.\n Q_cooling[np.isnan(Q_cooling)] = 0\n\n E = -Q_cooling * e / COP\n return E\n\n def estimate_pmv(self, T_z, T_min_comfort, T_max_comfort):\n \"\"\"\n Computes an PMV estimate from given min and max comfort temperatures.\n\n PMV is usually computed as with Fanger's equation as\n (0.303 * e^(-0.036*M) + 0.028) * L\n whereby M is the metabolic rate and L is linear proportional to the\n (indoor) air temperature. The comfort range for PMV is typically\n expected to lay within the range between -0.5 and 0.5. However,\n in our case the comfort range has already been defined by the facility\n manager. Assuming thus that the minimum comfort temperature is\n equivalent to PMV=-0.5 and the maxmimum comfort temperature is\n equivalent to PMV=0.5, we estimate PMV with linear interpolation\n between these points.\n\n Arguments:\n ----------\n T_z : float or array with shape (156,) or (n, 156).\n The zone temperature for one or several days for which PMV should\n be esimated.\n T_min_comfort : float or array with shape (156,).\n The minimum thermal comfort temperature equivalent to PMV=-0.5.\n T_max_comfort : float or array with shape (156,).\n The maximum thermal comfort temperature equivalent to PMV=0.5.\n\n Returns:\n --------\n PMV : float or array\n depending on the input of T_zone.\n \"\"\"\n # This is a simple linear fit through two points.\n PMV = (0.5 - -0.5) / (T_max_comfort - T_min_comfort) * (T_z - T_min_comfort) + -0.5\n return PMV\n"
] |
[
[
"pandas.read_csv",
"numpy.maximum",
"numpy.abs",
"numpy.asarray",
"numpy.isnan",
"pandas.DataFrame",
"numpy.ones",
"numpy.zeros"
]
] |
myying/DAPPER
|
[
"2f92fcb5610f9b2d44bb077ac5ed5438ac5fd6b3"
] |
[
"dapper/mods/LotkaVolterra/__init__.py"
] |
[
"\"\"\"The generalized predator-prey model, with settings for chaotic dynamics.\n\nRefs:\n[Wiki](https://en.wikipedia.org/wiki/Competitive_Lotka-Volterra_equations),\n`bib.vano2006chaos`.\n\"\"\"\n\nimport numpy as np\n\nimport dapper.mods as modelling\nfrom dapper.mods.integration import integrate_TLM\nfrom dapper.mods.Lorenz63 import LPs\n\n__pdoc__ = {\"demo\": False}\n\nNx = 4\n\n# \"growth\" coefficients\nr = np.array([1, 0.72, 1.53, 1.27])\n\n# \"interaction\" coefficients\nA = np.array([\n [1, 1.09, 1.52, 0],\n [0, 1, 0.44, 1.36],\n [2.33, 0, 1, 0.47],\n [1.21, 0.51, 0.35, 1]\n])\n\nx0 = 0.25*np.ones(Nx)\n\n\ndef dxdt(x):\n return (r*x) * (1 - [email protected])\n\n\nstep = modelling.with_rk4(dxdt, autonom=True)\n\nTplot = 100\n\n\ndef d2x_dtdx(x):\n return np.diag(r - r*(A@x)) - (r*x)[:, None]*A\n\n\ndef dstep_dx(x, t, dt):\n return integrate_TLM(d2x_dtdx(x), dt, method='approx')\n\n\ndef LP_setup(jj): return LPs(jj, params=dict())\n"
] |
[
[
"numpy.diag",
"numpy.array",
"numpy.ones"
]
] |
cepheid42/CBET
|
[
"9ef85012b134f22a0f19cd09e8594b934a53a9f4"
] |
[
"plotter.py"
] |
[
"import matplotlib.pyplot as plt\nfrom constants import *\nfrom numpy import max as npmax\n\ndef plot_everything(grid, intensity_sum, variable1, a0_variable, keep_open=False):\n cmap = 'jet'\n plt.figure()\n plt.pcolormesh(grid.z, grid.x, grid.eden / ncrit, cmap=cmap)\n plt.plot(grid.z - (dz / 2), grid.x - (dx / 2), 'k--')\n plt.plot(grid.x - (dx / 2), grid.z - (dz / 2), 'k--')\n\n plt.plot(grid.z - (dz / 2), grid.x + (dx / 2), 'k--')\n plt.plot(grid.x + (dx / 2), grid.z - (dz / 2), 'k--')\n\n plt.plot(grid.z + (dz / 2), grid.x - (dx / 2), 'k--')\n plt.plot(grid.x - (dx / 2), grid.z + (dz / 2), 'k--')\n\n plt.plot(grid.z + (dz / 2), grid.x + (dx / 2), 'k--')\n plt.plot(grid.x + (dx / 2), grid.z + (dz / 2), 'k--')\n\n plt.plot(grid.z, grid.x, 'k--')\n plt.plot(grid.x, grid.z, 'k--')\n\n plt.colorbar()\n\n plt.xlabel('z (cm)')\n plt.ylabel('x (cm)')\n plt.title('n_e_/n_crit_')\n\n plt.show(block=False)\n\n '''Plot the cumulative energy deposited to the array edep, which shares the dimensions of grid.x, grid.z, grid.eden, dedendz, etc.'''\n for b in range(nbeams):\n for n in range(nrays):\n finalt = grid.finalts[b, n]\n plt.plot(grid.mysaved_z[b, n, :finalt], grid.mysaved_x[b, n, :finalt], 'm')\n\n plt.show(block=False)\n\n plt.figure()\n clo = 0.0\n chi = npmax(intensity_sum)\n plt.pcolormesh(grid.z, grid.x, intensity_sum, cmap=cmap, vmin=clo, vmax=chi)\n plt.colorbar()\n plt.xlabel('z (cm)')\n plt.ylabel('x (cm)')\n plt.title('Overlapped intensity')\n plt.show(block=False)\n\n plt.figure()\n plt.pcolormesh(grid.z, grid.x, variable1, cmap=cmap, vmin=0.0, vmax=0.021)\n plt.colorbar()\n plt.xlabel('z (cm)')\n plt.ylabel('x (cm)')\n plt.title('Total original field amplitude (a0)')\n plt.show(block=False)\n\n plt.figure()\n plt.pcolormesh(grid.z, grid.x, a0_variable, cmap=cmap, vmin=0.0, vmax=0.021)\n plt.colorbar()\n plt.xlabel('z (cm)')\n plt.ylabel('x (cm)')\n plt.title('Total CBET new field amplitude (a0)')\n plt.show(block=False)\n\n plt.figure()\n plt.plot(grid.x[0, :], a0_variable[1, :], ',-b')\n plt.plot(grid.x[0, :], a0_variable[nz - 2, :], ',-r')\n plt.plot(grid.x[0, :], a0_variable[nz // 2, :], ',-g')\n plt.xlabel('x (cm)')\n plt.ylabel('a0')\n plt.title('a0(x) at z_min, z_0, z_max')\n plt.grid(linestyle='--')\n plt.show(block=False)\n\n plt.figure()\n plt.plot(grid.z[:, 0], a0_variable[:, 1], ',-b')\n plt.plot(grid.z[:, 0], a0_variable[:, nx - 2], ',-r')\n plt.plot(grid.z[:, 0], a0_variable[:, nx // 2], ',-g')\n plt.xlabel('z (cm)')\n plt.ylabel('a0')\n plt.title('a0(z) at x_min, x_0, x_max')\n plt.grid(linestyle='--')\n plt.show(block=keep_open)\n"
] |
[
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.ylabel",
"numpy.max",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.pcolormesh",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
liyuesolo/ntopo-1
|
[
"d3e17ca4cfb1d7a71c4c4f0c965cfcdc67d53fa9"
] |
[
"ntopo/monitors.py"
] |
[
"\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\n\nclass SimulationMonitor:\n def __init__(self, n_iterations):\n self.n_iterations = n_iterations\n\n self.iter = None\n self.values = None\n\n def __iter__(self):\n self.iter = -1\n self.values = []\n return self\n\n def __next__(self):\n self.iter += 1\n if self.iter >= self.n_iterations:\n raise StopIteration()\n return self.iter\n\n def monitor(self, loss):\n assert len(loss) == 1\n loss = tf.reshape(loss, (-1, )).numpy()[0]\n\n self.values.append(loss)\n\n def save_plot(self, save_path, prefix, postfix):\n start = 0\n end = len(self.values)\n if end > 100:\n start = 50\n x = np.arange(start, end)\n y = self.values[start:end]\n fig, _ = plt.subplots()\n plt.plot(x, y)\n plt.axis([min(x), max(x), min(y), max(y)])\n fig.savefig(os.path.join(\n save_path, prefix + 'loss' + postfix + '.png'))\n plt.close(fig)\n"
] |
[
[
"numpy.arange",
"tensorflow.reshape",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close"
]
] |
leixin/lhotse
|
[
"24d713b81e8dbc0cdfec1038024a3b8a44eb54bd"
] |
[
"lhotse/audio.py"
] |
[
"import warnings\nfrom dataclasses import asdict, dataclass\nfrom io import BytesIO\nfrom math import sqrt\nfrom pathlib import Path\nfrom subprocess import PIPE, run\nfrom typing import Callable, Dict, Iterable, List, Optional, Union, Tuple\n\nimport numpy as np\n\nfrom lhotse.utils import Decibels, Pathlike, Seconds, SetContainingAnything, JsonMixin, YamlMixin, fastcopy\n\nChannels = Union[int, List[int]]\n\n\n# TODO: document the dataclasses like this:\n# https://stackoverflow.com/a/3051356/5285891\n\n\n@dataclass\nclass AudioSource:\n \"\"\"\n AudioSource represents audio data that can be retrieved from somewhere.\n Supported sources of audio are currently:\n - 'file' (formats supported by librosa, possibly multi-channel)\n - 'command' [unix pipe] (must be WAVE, possibly multi-channel)\n \"\"\"\n type: str\n channels: List[int]\n source: str\n\n def load_audio(\n self,\n offset_seconds: float = 0.0,\n duration_seconds: Optional[float] = None,\n ) -> np.ndarray:\n \"\"\"\n Load the AudioSource (both files and commands) with librosa,\n accounting for many audio formats and multi-channel inputs.\n Returns numpy array with shapes: (n_samples) for single-channel,\n (n_channels, n_samples) for multi-channel.\n \"\"\"\n assert self.type in ('file', 'command')\n\n source = self.source\n if self.type == 'command':\n if offset_seconds != 0.0 or duration_seconds is not None:\n # TODO(pzelasko): How should we support chunking for commands?\n # We risk being very inefficient when reading many chunks from the same file\n # without some caching scheme, because we'll be re-running commands.\n raise ValueError(\"Reading audio chunks from command AudioSource type is currently not supported.\")\n source = BytesIO(run(self.source, shell=True, stdout=PIPE).stdout)\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n samples, sampling_rate = read_audio(source, offset=offset_seconds, duration=duration_seconds)\n\n # explicit sanity check for duration as librosa does not complain here\n if duration_seconds is not None:\n num_samples = samples.shape[0] if len(samples.shape) == 1 else samples.shape[1]\n available_duration = num_samples / sampling_rate\n if available_duration < duration_seconds - 1e-3: # set the allowance as 1ms to avoid float error\n raise ValueError(\n f'Requested more audio ({duration_seconds}s) than available ({available_duration}s)'\n )\n\n return samples.astype(np.float32)\n\n def with_path_prefix(self, path: Pathlike) -> 'AudioSource':\n if self.type != 'file':\n return self\n return fastcopy(self, source=str(Path(path) / self.source))\n\n @staticmethod\n def from_dict(data) -> 'AudioSource':\n return AudioSource(**data)\n\n\ndef read_audio(path: Pathlike, offset: Seconds, duration: Seconds) -> Tuple[np.ndarray, int]:\n import soundfile as sf\n with sf.SoundFile(path) as sf_desc:\n sampling_rate = sf_desc.samplerate\n if offset:\n # Seek to the start of the target read\n sf_desc.seek(int(offset * sampling_rate))\n if duration is not None:\n frame_duration = int(duration * sampling_rate)\n else:\n frame_duration = -1\n # Load the target number of frames, and transpose to match librosa form\n return sf_desc.read(frames=frame_duration, dtype=np.float32, always_2d=False).T, sampling_rate\n\n\n@dataclass\nclass Recording:\n \"\"\"\n Recording represents an AudioSource along with some metadata.\n \"\"\"\n id: str\n sources: List[AudioSource]\n sampling_rate: int\n num_samples: int\n duration: Seconds\n\n @staticmethod\n def from_sphere(sph_path: Pathlike, relative_path_depth: Optional[int] = None) -> 'Recording':\n \"\"\"\n Read a SPHERE file's header and create the corresponding ``Recording``.\n\n :param sph_path: Path to the sphere (.sph) file.\n :param relative_path_depth: optional int specifying how many last parts of the file path\n should be retained in the ``AudioSource``. By default writes the path as is.\n :return: a new ``Recording`` instance pointing to the sphere file.\n \"\"\"\n from sphfile import SPHFile\n sph_path = Path(sph_path)\n sphf = SPHFile(sph_path)\n return Recording(\n id=sph_path.stem,\n sampling_rate=sphf.format['sample_rate'],\n num_samples=sphf.format['sample_count'],\n duration=sphf.format['sample_count'] / sphf.format['sample_rate'],\n sources=[\n AudioSource(\n type='file',\n channels=list(range(sphf.format['channel_count'])),\n source=(\n '/'.join(sph_path.parts[-relative_path_depth:])\n if relative_path_depth is not None and relative_path_depth > 0\n else str(sph_path)\n )\n )\n ]\n )\n\n @property\n def num_channels(self):\n return sum(len(source.channels) for source in self.sources)\n\n @property\n def channel_ids(self):\n return sorted(cid for source in self.sources for cid in source.channels)\n\n def load_audio(\n self,\n channels: Optional[Channels] = None,\n offset_seconds: float = 0.0,\n duration_seconds: Optional[float] = None,\n ) -> np.ndarray:\n if channels is None:\n channels = SetContainingAnything()\n elif isinstance(channels, int):\n channels = frozenset([channels])\n else:\n channels = frozenset(channels)\n\n samples_per_source = []\n for source in self.sources:\n # Case: source not requested\n if not channels.intersection(source.channels):\n continue\n samples = source.load_audio(\n offset_seconds=offset_seconds,\n duration_seconds=duration_seconds,\n )\n\n # Case: two-channel audio file but only one channel requested\n # it might not be optimal to load all channels, but IDK if there's anything we can do about it\n channels_to_remove = [\n idx for idx, cid in enumerate(source.channels)\n if cid not in channels\n ]\n if channels_to_remove:\n samples = np.delete(samples, channels_to_remove, axis=0)\n samples_per_source.append(samples)\n\n # shape: (n_channels, n_samples)\n return np.vstack(samples_per_source)\n\n def with_path_prefix(self, path: Pathlike) -> 'Recording':\n return fastcopy(self, sources=[s.with_path_prefix(path) for s in self.sources])\n\n @staticmethod\n def from_dict(data: dict) -> 'Recording':\n raw_sources = data.pop('sources')\n return Recording(sources=[AudioSource.from_dict(s) for s in raw_sources], **data)\n\n\n@dataclass\nclass RecordingSet(JsonMixin, YamlMixin):\n \"\"\"\n RecordingSet represents a dataset of recordings. It does not contain any annotation -\n just the information needed to retrieve a recording (possibly multi-channel, from files\n or from shell commands and pipes) and some metadata for each of them.\n\n It also supports (de)serialization to/from YAML and takes care of mapping between\n rich Python classes and YAML primitives during conversion.\n \"\"\"\n recordings: Dict[str, Recording]\n\n @staticmethod\n def from_recordings(recordings: Iterable[Recording]) -> 'RecordingSet':\n return RecordingSet(recordings={r.id: r for r in recordings})\n\n @staticmethod\n def from_dicts(data: Iterable[dict]) -> 'RecordingSet':\n return RecordingSet.from_recordings(Recording.from_dict(raw_rec) for raw_rec in data)\n\n def to_dicts(self) -> List[dict]:\n return [asdict(r) for r in self]\n\n def filter(self, predicate: Callable[[Recording], bool]) -> 'RecordingSet':\n \"\"\"\n Return a new RecordingSet with the Recordings that satisfy the `predicate`.\n\n :param predicate: a function that takes a recording as an argument and returns bool.\n :return: a filtered RecordingSet.\n \"\"\"\n return RecordingSet.from_recordings(rec for rec in self if predicate(rec))\n\n def load_audio(\n self,\n recording_id: str,\n channels: Optional[Channels] = None,\n offset_seconds: float = 0.0,\n duration_seconds: Optional[float] = None,\n ) -> np.ndarray:\n return self.recordings[recording_id].load_audio(\n channels=channels,\n offset_seconds=offset_seconds,\n duration_seconds=duration_seconds\n )\n\n def with_path_prefix(self, path: Pathlike) -> 'RecordingSet':\n return RecordingSet.from_recordings(r.with_path_prefix(path) for r in self)\n\n def num_channels(self, recording_id: str) -> int:\n return self.recordings[recording_id].num_channels\n\n def sampling_rate(self, recording_id: str) -> int:\n return self.recordings[recording_id].sampling_rate\n\n def num_samples(self, recording_id: str) -> int:\n return self.recordings[recording_id].num_samples\n\n def duration(self, recording_id: str) -> Seconds:\n return self.recordings[recording_id].duration\n\n def __getitem__(self, recording_id_or_index: Union[int, str]) -> Recording:\n if isinstance(recording_id_or_index, str):\n return self.recordings[recording_id_or_index]\n # ~100x faster than list(dict.values())[index] for 100k elements\n return next(val for idx, val in enumerate(self.recordings.values()) if idx == recording_id_or_index)\n\n def __iter__(self) -> Iterable[Recording]:\n return iter(self.recordings.values())\n\n def __len__(self) -> int:\n return len(self.recordings)\n\n def __add__(self, other: 'RecordingSet') -> 'RecordingSet':\n return RecordingSet(recordings={**self.recordings, **other.recordings})\n\n\nclass AudioMixer:\n \"\"\"\n Utility class to mix multiple raw audio into a single one.\n It pads the signals with zero samples for differing lengths and offsets.\n \"\"\"\n\n def __init__(self, base_audio: np.ndarray, sampling_rate: int):\n \"\"\"\n :param base_audio: The raw audio used to initialize the AudioMixer are a point of reference\n in terms of offset for all audios mixed into them.\n :param sampling_rate: Sampling rate of the audio.\n \"\"\"\n self.tracks = [base_audio]\n self.sampling_rate = sampling_rate\n self.reference_energy = audio_energy(base_audio)\n\n @property\n def unmixed_audio(self) -> np.ndarray:\n \"\"\"\n Return a numpy ndarray with the shape (num_tracks, num_samples), where each track is\n zero padded and scaled adequately to the offsets and SNR used in ``add_to_mix`` call.\n \"\"\"\n return np.vstack(self.tracks)\n\n @property\n def mixed_audio(self) -> np.ndarray:\n \"\"\"\n Return a numpy ndarray with the shape (1, num_samples) - a mono mix of the tracks\n supplied with ``add_to_mix`` calls.\n \"\"\"\n return np.sum(self.unmixed_audio, axis=0, keepdims=True)\n\n def add_to_mix(\n self,\n audio: np.ndarray,\n snr: Optional[Decibels] = None,\n offset: Seconds = 0.0,\n ):\n \"\"\"\n Add audio (only support mono-channel) of a new track into the mix.\n :param audio: An array of audio samples to be mixed in.\n :param snr: Signal-to-noise ratio, assuming `audio` represents noise (positive SNR - lower `audio` energy,\n negative SNR - higher `audio` energy)\n :param offset: How many seconds to shift `audio` in time. For mixing, the signal will be padded before\n the start with low energy values.\n :return:\n \"\"\"\n assert audio.shape[0] == 1 # TODO: support multi-channels\n assert offset >= 0.0, \"Negative offset in mixing is not supported.\"\n\n reference_audio = self.tracks[0]\n dtype = reference_audio.dtype\n num_samples_offset = round(offset * self.sampling_rate)\n current_num_samples = reference_audio.shape[1]\n\n audio_to_add = audio\n\n # When there is an offset, we need to pad before the start of the audio we're adding.\n if offset > 0:\n audio_to_add = np.hstack([\n np.zeros((1, num_samples_offset), dtype),\n audio_to_add\n ])\n\n incoming_num_samples = audio_to_add.shape[1]\n mix_num_samples = max(current_num_samples, incoming_num_samples)\n\n # When the existing samples are less than what we anticipate after the mix,\n # we need to pad after the end of the existing audio mixed so far.\n # Since we're keeping every track as a separate entry in the ``self.tracks`` list,\n # we need to pad each of them so that their shape matches when performing the final mix.\n if current_num_samples < mix_num_samples:\n for idx in range(len(self.tracks)):\n padded_audio = np.hstack([\n self.tracks[idx],\n np.zeros((1, mix_num_samples - current_num_samples), dtype)\n ])\n self.tracks[idx] = padded_audio\n\n # When the audio we're mixing in are shorter that the anticipated mix length,\n # we need to pad after their end.\n # Note: we're doing that non-efficiently, as it we potentially re-allocate numpy arrays twice,\n # during this padding and the offset padding before. If that's a bottleneck, we'll optimize.\n if incoming_num_samples < mix_num_samples:\n audio_to_add = np.hstack([\n audio_to_add,\n np.zeros((1, mix_num_samples - incoming_num_samples), dtype)\n ])\n\n # When SNR is requested, find what gain is needed to satisfy the SNR\n gain = 1.0\n if snr is not None:\n added_audio_energy = audio_energy(audio)\n target_energy = self.reference_energy * (10.0 ** (-snr / 10))\n # When mixing time-domain singals, we are working with root-power (field) quantities,\n # whereas the energy ratio applies to power quantities. To compute the gain correctly,\n # we need to take a square root of the energy ratio.\n gain = sqrt(target_energy / added_audio_energy)\n\n # self.mixed_audio = reference_audio + gain * audio_to_add\n self.tracks.append(gain * audio_to_add)\n\n\ndef audio_energy(audio: np.ndarray) -> float:\n return float(np.average(audio ** 2))\n"
] |
[
[
"numpy.delete",
"numpy.average",
"numpy.zeros",
"numpy.sum",
"numpy.vstack"
]
] |
gwr3n/inventoryanalytics
|
[
"dd2a49386441bd51287d08ee73059cac7748c898"
] |
[
"inventoryanalytics/forecasting/holt_winters.py"
] |
[
"import numpy as np, pandas as pd\nimport matplotlib.pyplot as plt, pylab as py\nfrom statsmodels.tsa.api import ExponentialSmoothing\n#from statsmodels.tsa.statespace.exponential_smoothing import ExponentialSmoothing\n\ndef sample_seasonal_random_walk(realisations, m):\n np.random.seed(1234)\n errors = np.random.normal(0, 1, realisations)\n Xt = errors[:m]\n for t in range(m,realisations):\n Xt = np.append(Xt, Xt[t-m] + errors[t])\n return Xt\n\ndef plot(realisations, forecasts):\n f = plt.figure(1)\n plt.title(\"Holt-Winters' forecasts\")\n plt.xlabel('Period')\n first, last = next(x for x, val in enumerate(forecasts) if ~np.isnan(val)), len(forecasts)-1\n plt.axvspan(first, last, alpha=0.2, color='blue')\n plt.plot(realisations, label=\"Actual values\")\n plt.plot(forecasts, \"g\", label=\"Holt-Winters' forecasts\")\n plt.legend(loc=\"upper left\")\n plt.grid(True)\n f.show()\n\ndef plot_components(fit):\n f = plt.figure(1)\n pd.DataFrame(np.c_[fit.level,fit.slope,fit.season]).rename(\n columns={0:'level',1:'slope',2:'seasonal'}).plot(subplots=True)\n f.show()\n\n# uncomment from statsmodels.tsa.api import ExponentialSmoothing\ndef holt_winters():\n N, t, m = 100, 80, 4\n realisations = pd.Series(list(sample_seasonal_random_walk(N,m)), range(N))\n mod = ExponentialSmoothing(realisations[:t+1], seasonal_periods=4, trend='add', seasonal='add').fit(optimized=True)\n params = ['smoothing_level', 'smoothing_slope', 'smoothing_seasonal', 'initial_level', 'initial_slope']\n results=pd.DataFrame(index=[\"alpha\",\"beta\",\"gamma\",\"l_0\",\"b_0\",\"SSE\"] ,columns=[\"Holt-Winters'\"])\n results[\"Holt-Winters'\"] = [mod.params[p] for p in params] + [mod.sse]\n print(results)\n forecasts = mod.forecast(N-(t+1)).rename(r'$\\alpha=0.5$ and $\\beta=0.5$')\n plot(realisations, pd.Series(np.nan, range(t+1)).append(forecasts))\n plot_components(mod)\n py.show()\n\ndef plot_ci(realisations, forecasts, forecasts_ci):\n f = plt.figure(1)\n plt.title(\"Holt-Winters' forecasts\\n State Space Model\")\n plt.xlabel('Period')\n first, last = next(x for x, val in enumerate(forecasts) if ~np.isnan(val)), len(forecasts)-1\n plt.axvspan(first, last, alpha=0.2, color='blue')\n plt.plot(realisations, label=\"Actual values\")\n plt.plot(forecasts, \"g\", label=\"Holt-Winters' forecasts\")\n t = next(x for x, val in enumerate(forecasts) if ~np.isnan(val)) - 1\n forecast_index = np.arange(t+1, t+1 + len(forecasts_ci))\n plt.fill_between(forecast_index, forecasts_ci.iloc[:, 0], forecasts_ci.iloc[:, 1], color='r', alpha=0.2)\n plt.legend(loc=\"upper left\")\n plt.grid(True)\n f.show()\n\n# uncomment from statsmodels.tsa.statespace.exponential_smoothing import ExponentialSmoothing\ndef holt_winters_ci():\n N, t, m = 100, 80, 4\n realisations = pd.Series(list(sample_seasonal_random_walk(N,m)), range(N))\n mod = ExponentialSmoothing(realisations[:t+1], trend=True, seasonal=m, initialization_method='estimated').fit(disp=False)\n print(mod.summary())\n forecasts = mod.get_forecast(N-(t+1))\n forecasts_ci = forecasts.conf_int(alpha=0.05)\n plot_ci(realisations, pd.Series(np.nan, range(t+1)).append(forecasts.predicted_mean), forecasts_ci)\n py.show()\n\nholt_winters()\n#holt_winters_ci()"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"numpy.random.seed",
"numpy.isnan",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"numpy.random.normal",
"numpy.append",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.axvspan",
"matplotlib.pyplot.figure"
]
] |
calyptia/calyptia-fluentd-benchmark-aws-environment
|
[
"3900ec856d0b12d32676fab99a3e759e1229866b"
] |
[
"in_syslog_bench/visualize/plot_pandas_Usage.py"
] |
[
"#!/usr/bin/env python3\n\nimport os\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport argparse\n\nfrom ansible.parsing.dataloader import DataLoader\nfrom ansible.inventory.manager import InventoryManager\n\nparser = argparse.ArgumentParser(description='Visualize data as plot')\nparser.add_argument('--resource',\n choices=['cpu_s', 'rss_s', 'vms_s', 'cpu_w', 'rss_w', 'vms_w',\n 'read_bytes', 'write_bytes',\n 'recv_bytes', 'send_bytes'],\n default='cpu')\nparser.add_argument('--package-name', default=\"calyptia-fluentd\")\nargs = parser.parse_args()\n\nif args.resource == 'cpu_s':\n resource_key = \"CPU Usage(%)[\" + args.package_name.title()[:15] + \"#0]\"\n xlabel_message = 'flow rate (lines/second)'\n ylabel_message = 'CPU Usage (%)'\n ylimit = 100\n fig_title = 'CPU Usage (Supervisor) -- ' + args.package_name.title()\n fig_name = args.package_name.title() + '-CPU_usage_on_supervisor.png'\n divide_base = -1\nelif args.resource == 'rss_s':\n resource_key = \"RSS(MB)[\" + args.package_name.title()[:15] + \"#0]\"\n xlabel_message = 'flow rate (lines/second)'\n ylabel_message = 'RSS Usage (MB) '\n ylimit = 100\n fig_title = 'RSS Usage (Supervisor) -- ' + args.package_name.title()\n fig_name = args.package_name.title() + '-RSS_usage_on_supervisor.png'\n divide_base = -1\nelif args.resource == 'vms_s':\n resource_key = \"VMS(MB)[\" + args.package_name.title()[:15] + \"#0]\"\n xlabel_message = 'flow rate (lines/second)'\n ylabel_message = 'VMS Usage (MB)'\n ylimit = 1200\n fig_title = 'VMS Usage (Supervisor) -- ' + args.package_name.title()\n fig_name = args.package_name.title() + '-VMS_usage_on_supervisor.png'\n divide_base = -1\nelif args.resource == 'cpu_w':\n resource_key = \"CPU Usage(%)[Ruby#0]\"\n xlabel_message = 'flow rate (lines/second)'\n ylabel_message = 'CPU Usage (%)'\n ylimit = 100\n fig_title = 'CPU Usage (Worker) -- ' + args.package_name.title()\n fig_name = args.package_name.title() + '-CPU_usage_on_worker.png'\n divide_base = -1\nelif args.resource == 'rss_w':\n resource_key = \"RSS(MB)[Ruby#0]\"\n xlabel_message = 'flow rate (lines/second)'\n ylabel_message = 'RSS Usage (MB) '\n ylimit = 200\n fig_title = 'RSS Usage (Worker) -- ' + args.package_name.title()\n fig_name = args.package_name.title() + '-RSS_usage_on_worker.png'\n divide_base = -1\nelif args.resource == 'vms_w':\n resource_key = \"VMS(MB)[Ruby#0]\"\n xlabel_message = 'flow rate (lines/second)'\n ylabel_message = 'VMS Usage (MB)'\n ylimit = 1200\n fig_title = 'VMS Usage (Worker) -- ' + args.package_name.title()\n fig_name = args.package_name.title() + '-VMS_usage_on_worker.png'\n divide_base = -1\nelif args.resource == 'read_bytes':\n resource_key = \"read bytes(KiB/sec)\"\n xlabel_message = 'flow rate (lines/second)'\n ylabel_message = 'Disk Read Usage (bytes)'\n ylimit = 2500\n fig_title = 'Disk Read Usage -- ' + args.package_name.title()\n fig_name = args.package_name.title() + '-Disk_Read_usage.png'\n divide_base = -1\nelif args.resource == 'write_bytes':\n resource_key = \"write bytes(KiB/sec)\"\n xlabel_message = 'flow rate (lines/second)'\n ylabel_message = 'Disk Write Usage (KiB)'\n ylimit = 3500\n fig_title = 'Disk Write Usage -- ' + args.package_name.title()\n fig_name = args.package_name.title() + '-Disk_Write_usage.png'\n divide_base = -1\nelif args.resource == 'recv_bytes':\n resource_key = \"recv bytes(/sec)\"\n xlabel_message = 'flow rate (lines/second)'\n ylabel_message = 'Receive Usage (Bytes)'\n ylimit = 450000\n fig_title = 'Receive Bytes Usage -- ' + args.package_name.title()\n fig_name = args.package_name.title() + '-Receive_Bytes_usage.png'\n divide_base = -1\nelif args.resource == 'send_bytes':\n resource_key = \"send bytes(/sec)\"\n xlabel_message = 'flow rate (lines/second)'\n ylabel_message = 'Send Usage (Bytes)'\n ylimit = 3000000\n fig_title = 'Send Bytes Usage -- ' + args.package_name.title()\n fig_name = args.package_name.title() + '-Send_Bytes_usage.png'\n divide_base = -1\n\npwd = os.path.dirname(os.path.realpath(__file__))\ninventory_file_name = os.path.join(pwd, '..', 'ansible/hosts')\ndata_loader = DataLoader()\ninventory = InventoryManager(loader=data_loader,\n sources=[inventory_file_name])\n\ncollector = inventory.get_groups_dict()['collector'][0]\n\ntfvars = {}\nwith open(\"terraform.tfvars\") as tfvarfile:\n for line in tfvarfile:\n name, var = line.partition(\"=\")[::2]\n tfvars[name.strip()] = var\n\nprint(tfvars)\nenvironment = tfvars[\"environment\"].strip(\" \\\"\\n\")\nif environment == \"rhel\":\n username = \"ec2-user\"\nelse:\n username = \"centos\"\n\nprint(collector)\n\nsns.set()\nsns.set_style('whitegrid')\nsns.set_palette('Set3')\n\nbase_path = os.path.join(pwd, '..', \"ansible\", \"output\", collector, \"home\", username)\nprint(base_path)\n\nrate_0 = pd.read_csv(os.path.join(base_path, 'usage-'+ args.package_name + '-0.tsv'), sep='\\t', na_values='.')\nrate_500 = pd.read_csv(os.path.join(base_path, 'usage-'+ args.package_name + '-500.tsv'), sep='\\t', na_values='.')\nrate_1000 = pd.read_csv(os.path.join(base_path, 'usage-'+ args.package_name + '-1000.tsv'), sep='\\t', na_values='.')\nrate_1500 = pd.read_csv(os.path.join(base_path, 'usage-'+ args.package_name + '-1500.tsv'), sep='\\t', na_values='.')\n\ndf = pd.DataFrame({\n 0: rate_0[resource_key],\n 500: rate_500[resource_key],\n 1000: rate_1000[resource_key],\n 1500: rate_1500[resource_key],\n})\nif divide_base > 1:\n df = df.divide(divide_base)\n\nmedians = {0: np.round(df[0].median(), 2),\n 500: np.round(df[500].median(), 2),\n 1000: np.round(df[1000].median(), 2),\n 1500: np.round(df[1500].median(), 2)}\nmedian_labels = [str(np.round(s, 2)) for s in medians]\n\nprint(medians)\ndf_melt = pd.melt(df)\nprint(df_melt.head())\n\nfig, ax = plt.subplots(figsize=(8, 6))\nax.set_title(fig_title)\nax.set_ylim(0, ylimit)\nplot = sns.boxplot(x='variable', y='value', data=df_melt, showfliers=False, ax=ax, showmeans=True)\nplot.set(\n xlabel=xlabel_message,\n ylabel=ylabel_message\n)\n\npos = range(len(medians))\ndata_range = [0, 500, 1000, 1500]\ntick = 0\nfor item in data_range:\n plot.text(tick+0.1, medians[item], medians[item],\n color='w', weight='semibold', size=10, bbox=dict(facecolor='#445A64'))\n tick = tick + 1\nsns.stripplot(x='variable', y='value', data=df_melt, jitter=False, color='black', ax=ax\n).set(\n xlabel=xlabel_message,\n ylabel=ylabel_message\n)\n\nplt.savefig(fig_name)\n"
] |
[
[
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"numpy.round",
"pandas.melt"
]
] |
Rabbit1010/TensorFlow2.0-Tutorial
|
[
"902d9dd55fc87c2bb0a3335002d668b49f9ab3ab"
] |
[
"Topic 3/train.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 5 15:33:24 2019\n\n@author: Wei-Hsiang, Shen\n\"\"\"\n\nimport tensorflow as tf\nfrom model import AOI_model\nfrom generate_data import Get_AOI_DS\n\n\nif __name__ == '__main__':\n print(\"Version: \", tf.__version__)\n print(\"GPU is\", \"available\" if tf.test.is_gpu_available() else \"NOT AVAILABLE\")\n\n BATCH_SIZE = 32\n EPOCH = 50\n\n # Get the dataset\n train_ds, val_ds, data_size = Get_AOI_DS(BATCH_SIZE)\n\n # Initialize the model\n model = AOI_model()\n\n # Setup call backs\n checkpoint_path = \"./checkpoints/resnet50_{epoch:03d}_{val_acc:.5f}.h5\"\n save_checkpoint = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, verbose=1, save_weights_only=True)\n csv_logger = tf.keras.callbacks.CSVLogger('./checkpoints/training.log')\n\n model.fit(x=train_ds, validation_data=val_ds,\n epochs=EPOCH, verbose=1,\n steps_per_epoch=tf.math.ceil(data_size*0.8/BATCH_SIZE).numpy(),\n validation_steps=tf.math.ceil(data_size*0.2/BATCH_SIZE).numpy(),\n callbacks=[save_checkpoint, csv_logger])\n\n"
] |
[
[
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.math.ceil",
"tensorflow.keras.callbacks.CSVLogger",
"tensorflow.test.is_gpu_available"
]
] |
SherwinGroup/Stele
|
[
"9bb7da0b406a801975e21c9f7ce05d369ae661e5"
] |
[
"src/Stele/processing/processing_hsg/ccd_collection/high_sideband_ccd.py"
] |
[
"import os\nimport errno\nimport copy\nimport json\nimport numpy as np\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\nfrom .ccd import CCD\nfrom Stele.processing.processing_hsg.helper_functions import gauss\nfrom .helper_functions import calc_laser_frequencies\n\nnp.set_printoptions(linewidth=500)\n\n\nclass HighSidebandCCD(CCD):\n def __init__(\n self, hsg_thing, parameter_dict=None, spectrometer_offset=None):\n \"\"\"\n This will read the appropriate file. The header needs to be fixed to\n reflect the changes to the output header from the Andor file. Because\n another helper file will do the cleaning and background subtraction,\n those are no longer part of this init. This also turns all wavelengths\n from nm (NIR ones) or cm-1 (THz ones) into eV.\n\n OR, if an array is thrown in there, it'll handle the array and dict\n\n Input:\n For post-processing analysis:\n hsg_thing = file name of the hsg spectrum from CCD superclass\n spectrometer_offset = number of nanometers the spectrometer is\n off by, should be 0.0...but can be 0.2 or 1.0\n For Live-software:\n hsg_thing = np array of spectrum from camera\n parameter_dict = equipment dict generated by software\n\n Internal:\n self.hsg_thing = the filename\n self.parameters = string with all the relevant experimental perameters\n self.description = the description we added to the file as the data\n was being taken\n self.proc_data = processed data that has gone is frequency\n vs counts/pulse\n self.dark_stdev = this is not currently handled appropriately\n self.addenda = the list of things that have been added to the file, in\n form of [constant, *spectra_added]\n self.subtrahenda = the list of spectra that have been subtracted from\n the file. Constant subtraction is dealt with with\n self.addenda\n\n :param hsg_thing: file name for the file to be opened. OR the actually\n hsg np.ndarray. Fun!\n :type hsg_thing: str OR np.ndarray\n :param parameter_dict: If being loaded through the data acquisition\n GUI, throw the dict in here\n :type parameter_dict: dict\n :param spectrometer_offset: Number of nm the spectrometer is off by\n :type spectrometer_offset: float\n :return: None, technically\n \"\"\"\n if isinstance(hsg_thing, str):\n super(HighSidebandCCD, self).__init__(\n hsg_thing, spectrometer_offset=spectrometer_offset)\n # TODO: fix addenda bullshit\n self.addenda = []\n self.subtrahenda = []\n elif isinstance(hsg_thing, np.ndarray):\n # Probably shouldn't shoehorn this in this way\n self.parameters = parameter_dict.copy()\n self.addenda = []\n self.subtrahenda = []\n self.ccd_data = np.array(hsg_thing)\n self.ccd_data[:, 0] = 1239.84 / self.ccd_data[:, 0]\n # This data won't have an error column, so attached a column of 1s\n self.ccd_data = np.column_stack((\n self.ccd_data, np.ones_like(self.ccd_data[:, 1])))\n # Because turning into eV switches direction\n self.ccd_data = np.flipud(self.ccd_data)\n self.fname = \"Live Data\"\n else:\n raise Exception(\n \"I don't know what this file type is {}, type: {}\".format(\n hsg_thing, type(hsg_thing)))\n self.proc_data = np.array(self.ccd_data)\n # proc_data is now a 1600 long array with [frequency (eV),\n # signal (counts / FEL pulse), S.E. of signal mean]\n\n # self.parameters[\"nir_freq\"] = 1239.84\n # / float(self.parameters[\"nir_lambda\"])\n self.parameters[\"nir_freq\"] = 1239.84 / float(self.parameters.get(\n \"nir_lambda\", -1))\n # self.parameters[\"thz_freq\"] = 0.000123984 *\n # float(self.parameters[\"fel_lambda\"])\n self.parameters[\"thz_freq\"] = 0.000123984 * float(self.parameters.get(\n \"fel_lambda\", -1))\n # self.parameters[\"nir_power\"] = float(self.parameters[\"nir_power\"])\n self.parameters[\"nir_power\"] = float(self.parameters.get(\n \"nir_power\", -1))\n try: # This is the new way of doing things. Also, now it's power\n self.parameters[\"thz_energy\"] = float(\n self.parameters[\"pulseEnergies\"][\"mean\"])\n self.parameters[\"thz_energy_std\"] = float(\n self.parameters[\"pulseEnergies\"][\"std\"])\n except Exception: # This is the old way TODO: DEPRECATE THIS\n self.parameters[\"thz_energy\"] = float(self.parameters.get(\n \"fel_power\", -1))\n\n # things used in fitting/guessing\n self.sb_list = np.array([])\n self.sb_index = np.array([])\n self.sb_dict = {}\n self.sb_results = np.array([])\n self.full_dict = {}\n\n def __add__(self, other):\n \"\"\"\n Add together the image data from self.proc_data, or add a constant to\n that np.array. It will then combine the addenda and subtrahenda lists,\n as well as add the fel_pulses together. If type(other) is a CCD\n object, then it will add the errors as well.\n\n Input:\n self = CCD-like object\n other = int, float or CCD object\n\n Internal:\n ret.proc_data = the self.proc_data + other(.proc_data)\n ret.addenda = combination of two input addenda lists\n\n This raises a FutureWarning because these were designed early on and\n haven't been used much.\n\n :param other: The thing to be added, it's either a int/float or a\n HighSidebandCCD object\n :type other: int/float or HighSidebandCCD\n :return: Sum of self and other\n :rtype: HighSidebandCCD\n \"\"\"\n raise FutureWarning\n ret = copy.deepcopy(self)\n # Add a constant offset to the data\n if type(other) in (int, float):\n ret.proc_data[:, 1] = self.proc_data[:, 1] + other\n ret.addenda[0] = ret.addenda[0] + other\n\n # or add the data of two hsg_spectra together\n else:\n if np.isclose(ret.parameters['center_lambda'],\n other.parameters['center_lambda']):\n ret.proc_data[:, 1] = (\n self.proc_data[:, 1] + other.proc_data[:, 1])\n ret.proc_data[:, 2] = np.sqrt(\n self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)\n ret.addenda[0] = ret.addenda[0] + other.addenda[0]\n ret.addenda.extend(other.addenda[1:])\n ret.subtrahenda.extend(other.subtrahenda)\n ret.parameters['fel_pulses'] += other.parameters['fel_pulses']\n else:\n raise Exception(\n 'Source: Spectrum.__add__:\\n' +\n 'These are not from the same grating settings')\n return ret\n\n def __sub__(self, other):\n \"\"\"\n This subtracts constants or other data sets between self.proc_data. I\n think it even keeps track of what data sets are in the file and how\n they got there.\n\n See how __add__ works for more information.\n\n This raises a FutureWarning because these were designed early on and\n haven't been used much.\n\n :param other: The thing to be subtracted, it's either a int/float or a\n HighSidebandCCD object\n :type other: int/float or HighSidebandCCD\n :return: Sum of self and other\n :rtype: HighSidebandCCD\n \"\"\"\n raise FutureWarning\n ret = copy.deepcopy(self)\n # Subtract a constant offset to the data\n if type(other) in (int, float):\n # Need to choose a name\n ret.proc_data[:, 1] = self.proc_data[:, 1] - other\n ret.addenda[0] = ret.addenda[0] - other\n\n # Subtract the data of two hsg_spectra from each other\n else:\n if np.isclose(ret.proc_data[0, 0], other.proc_data[0, 0]):\n ret.proc_data[:, 1] = (\n self.proc_data[:, 1] - other.proc_data[:, 1])\n ret.proc_data[:, 2] = np.sqrt(\n self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)\n ret.subtrahenda.extend(other.addenda[1:])\n ret.addenda.extend(other.subtrahenda)\n else:\n raise Exception(\n 'Source: Spectrum.__sub__:\\n' +\n 'These are not from the same grating settings')\n return ret\n\n def __repr__(self):\n \"\"\"\n This returns a string of filename, series, spectrometer step,\n and the wavelengths of FEL and NIR lasers.\n \"\"\"\n base = \"\"\"\n fname: {},\n Series: {series},\n spec_step: {spec_step},\n fel_lambda: {fel_lambda},\n nir_lambda: {nir_lambda}\"\"\".format(\n os.path.basename(self.fname), **self.parameters)\n return base\n\n __str__ = __repr__\n\n def calc_approx_sb_order(self, test_nir_freq):\n \"\"\"\n This simple method will simply return a float approximating the order\n of the frequency input. We need this because the CCD wavelength\n calibration is not even close to perfect. And it shifts by half a nm\n sometimes.\n\n :param test_nir_freq: the frequency guess of the nth sideband\n :type test_nir_freq: float\n :return: The approximate order of the sideband in question\n :rtype: float\n \"\"\"\n nir_freq = self.parameters['nir_freq']\n thz_freq = self.parameters['thz_freq']\n # If thz = 0, prevent error\n if not thz_freq:\n thz_freq = 1\n approx_order = (test_nir_freq - nir_freq) / thz_freq\n return approx_order\n\n # TODO: break the following definition into multiple parts, possibly files\n def guess_sidebands(self, cutoff=4.5, verbose=False, plot=False, **kwargs):\n \"\"\"\n Update 05/24/18:\n Hunter had two different loops for negative order sidebands,\n then positive order sidebands. They're done pretty much identically,\n so I've finally merged them into one.\n\n Finds the locations of all the sidebands in the proc_data array to be\n able to seed the fitting method. This works by finding the maximum\n data value in the array and guessing what sideband it is. It creates\n an array that includes this information. It will then step down,\n initially by one THz frequency, then by twos after it hasn't found any\n odd ones. It then goes up from the max and finds everything above in\n much the same way.\n\n There is currently no rhyme or reason to a cutoff of 8. I don't know\n what it should be changed to, though.\n\n Input:\n cutoff = signal-to-noise threshold to count a sideband candidate.\n\n kwargs:\n window_size: how big of a window (in pixels) to use for checking for\n sidebands. Specified in half-width\n default: 15\n\n\n Internal:\n self.sb_list = List of all of the orders the method found\n self.sb_index = index of all of the peaks of the sidebands\n self.sb_guess = three-part list including the frequency, amplitude and\n error guesses for each sideband\n \"\"\"\n # TODO: this isn't commented appropriately.\n # Will it be made more readable first?\n\n if \"cutoff\" in self.parameters:\n cutoff = self.parameters[\"cutoff\"]\n else:\n self.parameters['cutoff for guess_sidebands'] = cutoff\n\n if verbose:\n print(\"=\" * 15)\n print()\n print(\"Guessing CCD Sideband parameters\")\n print(os.path.basename(self.fname))\n print(\"\\tCutoff = {}\".format(cutoff))\n print()\n print(\"=\" * 15)\n x_axis = np.array(self.proc_data[:, 0])\n y_axis = np.array(self.proc_data[:, 1])\n try:\n error = np.array(self.proc_data[:, 2])\n except IndexError:\n # Happens on old data where spectra weren't calculated in the live\n # software.\n error = np.ones_like(x_axis)\n\n min_sb = int(self.calc_approx_sb_order(x_axis[0])) + 1\n try:\n max_sb = int(self.calc_approx_sb_order(x_axis[-1]))\n except ValueError:\n print(x_axis)\n\n nir_freq = self.parameters[\"nir_freq\"]\n thz_freq = self.parameters[\"thz_freq\"]\n\n if verbose:\n print(\"min_sb: {} | max_sb: {}\".format(min_sb, max_sb))\n\n # Find max strength sideband and it's order\n global_max = np.argmax(y_axis)\n order_init = int(round(self.calc_approx_sb_order(x_axis[global_max])))\n # if verbose:\n # print \"The global max is at index\", global_max\n if global_max < 15:\n check_y = y_axis[:global_max + 15]\n check_y = np.concatenate((np.zeros(15 - global_max), check_y))\n elif global_max > 1585:\n check_y = y_axis[global_max - 15:]\n check_y = np.concatenate((check_y, np.zeros(global_max - 1585)))\n else:\n check_y = y_axis[global_max - 15:global_max + 15]\n\n check_max_index = np.argmax(check_y)\n check_max_area = np.sum(\n check_y[check_max_index - 2:check_max_index + 3])\n\n check_ave = np.mean(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])\n check_stdev = np.std(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])\n check_ratio = (check_max_area - 3 * check_ave) / check_stdev\n\n if verbose:\n print((\"{:^16}\" * 5).format(\n \"global_max idx\", \"check_max_area\", \"check_ave\", \"check_stdev\",\n \"check_ratio\"))\n print((\"{:^16.5g}\" * 5).format(\n global_max, check_max_area, check_ave,\n check_stdev, check_ratio))\n\n if check_ratio > cutoff:\n self.sb_list = [order_init]\n self.sb_index = [global_max]\n sb_freq_guess = [x_axis[global_max]]\n sb_amp_guess = [y_axis[global_max]]\n sb_error_est = [\n np.sqrt(sum(\n [i ** 2 for i in error[global_max - 2:global_max + 3]]))\n / (check_max_area - 5 * check_ave)]\n else:\n print(\"There are no sidebands in\", self.fname)\n raise RuntimeError\n\n if verbose:\n print(\"\\t Looking for sidebands with f < {:.6f}\".format(\n sb_freq_guess[0]))\n last_sb = sb_freq_guess[0]\n index_guess = global_max\n # keep track of how many consecutive sidebands we've skipped. Sometimes\n # one's noisy or something, so we keep looking after skipping one\n consecutive_null_sb = 0\n consecutive_null_odd = 0\n no_more_odds = False\n break_condition = False\n for order in range(order_init - 1, min_sb - 1, -1):\n # Check to make sure we're not looking at an odd when\n # we've decided to skip them.\n if no_more_odds is True and order % 2 == 1:\n last_sb = last_sb - thz_freq\n if verbose:\n print(\"I skipped\", order)\n continue\n\n # Window size to look for next sideband. Needs to be order\n # dependent because higher orders get wider, so we need to look at\n # more. Values are arbitrary.\n window_size = 0.45 + 0.0004 * order # used to be last_sb?\n lo_freq_bound = last_sb - thz_freq * (\n 1 + window_size) # Not sure what to do about these\n hi_freq_bound = last_sb - thz_freq * (1 - window_size)\n\n if verbose:\n print(\"\\nSideband\", order)\n print(\"\\t{:.4f} < f_{} < {:.4f}\".format(lo_freq_bound, order,\n hi_freq_bound))\n\n # Get the indices tenergies lie within the bounds for this SB\n sliced_indices = \\\n np.where((x_axis > lo_freq_bound)\n & (x_axis < hi_freq_bound))[0]\n start_index, end_index = sliced_indices.min(), sliced_indices.max()\n\n # Get a slice of the y_data which is only in the region of interest\n check_y = y_axis[sliced_indices]\n\n check_max_index = np.argmax(\n check_y) # This assumes that two floats won't be identical\n # Calculate the \"area\" of the sideband by looking at the peak value\n # within the range, and the pixel above/below it\n check_max_area = np.sum(\n check_y[check_max_index - 1:check_max_index + 2])\n\n if verbose and plot:\n plt.figure(\"CCD data\")\n plt.plot(\n [lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n plt.plot(\n [hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n plt.plot(\n [lo_freq_bound, hi_freq_bound], [check_y[check_max_index]]\n * 2, 'b', label=\"{} Box\".format(order))\n plt.text(\n (lo_freq_bound + hi_freq_bound) / 2,\n check_y[check_max_index], order)\n\n # get slice that doesn't have the peak in it to compare statistics\n check_region = np.append(check_y[:check_max_index - 1],\n check_y[check_max_index + 2:])\n check_ave = check_region.mean()\n check_stdev = check_region.std()\n\n # Calculate an effective SNR, where check_ave is roughly the\n # background level\n check_ratio = (check_max_area - 3 * check_ave) / check_stdev\n\n # This raises the barrier for odd sideband detection\n if order % 2 == 1:\n check_ratio = check_ratio / 1.5\n if verbose:\n print(\"\\t\" + (\"{:^14}\" * 4).format(\n \"check_max_area\", \"check_ave\",\n \"check_stdev\", \"check_ratio\"))\n print(\"\\t\" + (\"{:^14.5g}\" * 4).format(\n check_max_area, check_ave, check_stdev, check_ratio))\n\n if check_ratio > cutoff:\n found_index = check_max_index + start_index\n self.sb_index.append(found_index)\n last_sb = x_axis[found_index]\n\n if verbose:\n print(\"I just found\", last_sb)\n\n sb_freq_guess.append(x_axis[found_index])\n sb_amp_guess.append(check_max_area - 3 * check_ave)\n error_est = np.sqrt(\n sum(\n [i ** 2 for i in error[\n found_index - 1:found_index + 2]]\n )) / (check_max_area - 3 * check_ave)\n if verbose:\n print(\"My error estimate is:\", error_est)\n sb_error_est.append(error_est)\n self.sb_list.append(order)\n consecutive_null_sb = 0\n if order % 2 == 1:\n consecutive_null_odd = 0\n else:\n # print \"I could not find sideband with order\", order\n last_sb = last_sb - thz_freq\n consecutive_null_sb += 1\n if order % 2 == 1:\n consecutive_null_odd += 1\n if consecutive_null_odd == 1 and no_more_odds is False:\n # print \"I'm done looking for odd sidebands\"\n no_more_odds = True\n if consecutive_null_sb == 2:\n # print \"I can't find any more sidebands\"\n break\n\n # Look for higher sidebands\n if verbose:\n print(\"\\nLooking for higher energy sidebands\")\n\n last_sb = sb_freq_guess[0]\n index_guess = global_max\n consecutive_null_sb = 0\n consecutive_null_odd = 0\n no_more_odds = False\n break_condition = False\n for order in range(order_init + 1, max_sb + 1):\n if no_more_odds is True and order % 2 == 1:\n last_sb = last_sb + thz_freq\n continue\n window_size = 0.45 + 0.001 * order # used to be 0.28 and 0.0004\n lo_freq_bound = last_sb + thz_freq * (\n 1 - window_size) # Not sure what to do about these\n hi_freq_bound = last_sb + thz_freq * (1 + window_size)\n\n start_index = False\n end_index = False\n\n if verbose:\n print(\"\\nSideband\", order)\n # print \"The low frequency bound is\", lo_freq_bound\n # print \"The high frequency bound is\", hi_freq_bound\n print(\"\\t{:.4f} < f_{} < {:.4f}\".format(lo_freq_bound, order,\n hi_freq_bound))\n for i in range(index_guess, 1600):\n if start_index is False and i == 1599:\n # print \"I'm all out of space, captain!\"\n break_condition = True\n break\n elif start_index is False and x_axis[i] > lo_freq_bound:\n # print \"start_index is\", i\n start_index = i\n elif i == 1599:\n end_index = 1599\n # print \"hit end of data, end_index is 1599\"\n elif end_index is False and x_axis[i] > hi_freq_bound:\n end_index = i\n # print \"end_index is\", i\n index_guess = i\n break\n if break_condition:\n break\n check_y = y_axis[start_index:end_index]\n\n # This assumes that two floats won't be identical\n check_max_index = np.argmax(check_y)\n\n # To be able to break down check_y into eighths\n octant = len(check_y) // 8\n if octant < 1:\n octant = 1\n\n check_max_area = np.sum(\n check_y[check_max_index - octant - 1:\n check_max_index + octant + 1])\n\n if verbose and plot:\n plt.figure(\"CCD data\")\n plt.plot(\n [lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n plt.plot(\n [hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n plt.plot(\n [lo_freq_bound, hi_freq_bound],\n [check_y[check_max_index]] * 2, 'b', label=order)\n plt.text(\n (lo_freq_bound + hi_freq_bound) / 2,\n check_y[check_max_index], order)\n\n no_peak = (2 * len(\n check_y)) // 6 # The denominator is in flux, used to be 5\n # if verbose: print \"\\tcheck_y length\", len(check_y)\n\n check_ave = np.mean(np.take(check_y, np.concatenate(\n (np.arange(no_peak), np.arange(-no_peak, 0)))))\n check_stdev = np.std(np.take(check_y, np.concatenate(\n (np.arange(no_peak), np.arange(-no_peak, 0)))))\n\n check_ratio = (\n check_max_area - (2 * octant + 1) * check_ave) / check_stdev\n\n if verbose:\n print(\"\\tIndices: {}->{} (d={})\".format(start_index, end_index,\n len(check_y)))\n # print \"check_y is\", check_y\n # print \"\\ncheck_max_area is\", check_max_area\n # print \"check_ave is\", check_ave\n # print \"check_stdev is\", check_stdev\n # print \"check_ratio is\", check_ratio\n\n print(\"\\t\" + (\"{:^14}\" * 4).format(\n \"check_max_area\", \"check_ave\",\n \"check_stdev\", \"check_ratio\"))\n print(\"\\t\" + (\"{:^14.6g}\" * 4).format(\n check_max_area, check_ave, check_stdev, check_ratio))\n\n # This raises the barrier for odd sideband detection\n if order % 2 == 1:\n check_ratio = check_ratio / 2\n if check_ratio > cutoff:\n found_index = check_max_index + start_index\n self.sb_index.append(found_index)\n last_sb = x_axis[found_index]\n\n# print \"\\tI found\", order, \"at index\", found_index, \"at freq\", last_sb\n if verbose:\n print(\n \"\\tI'm counting this SB at index {} (f={:.4f})\".format(\n found_index, last_sb),\n end=' ')\n\n sb_freq_guess.append(\n x_axis[found_index])\n sb_amp_guess.append(\n check_max_area - (2 * octant + 1) * check_ave)\n # This error is a relative error.\n error_est = (\n np.sqrt(sum([i ** 2 for i in error[\n found_index - octant:found_index + octant]]))\n / (check_max_area - (2 * octant + 1) * check_ave))\n if verbose:\n print(\". Err = {:.3g}\".format(error_est))\n # print \"\\tMy error estimate is:\", error_est\n # print \"My relative error is:\", error_est / sb_amp_guess\n sb_error_est.append(error_est)\n self.sb_list.append(order)\n consecutive_null_sb = 0\n if order % 2 == 1:\n consecutive_null_odd = 0\n else:\n # print \"I could not find sideband with order\", order\n last_sb = last_sb + thz_freq\n consecutive_null_sb += 1\n if order % 2 == 1:\n consecutive_null_odd += 1\n if verbose:\n print(\"\\t\\tI did not count this sideband\")\n if consecutive_null_odd == 1 and no_more_odds is False:\n # print \"I'm done looking for odd sidebands\"\n no_more_odds = True\n if consecutive_null_sb == 2:\n # print \"I can't find any more sidebands\"\n break\n\n if verbose:\n print(\"I found these sidebands:\", self.sb_list)\n print('-' * 15)\n print()\n print()\n\n # self.sb_guess = [frequency guess, amplitude guess,\n # relative error of amplitude] for each sideband.\n self.sb_guess = np.array([np.asarray(sb_freq_guess),\n np.asarray(sb_amp_guess),\n np.asarray(sb_error_est)]).T\n\n# TODO: altar guess_sidebands and guess_sidebandsOld to share functions\n def guess_sidebandsOld(\n self, cutoff=4.5, verbose=False, plot=False, **kwargs):\n \"\"\"\n 05/24/18\n Old code from Hunter's days (or nearly, I've already started cleaning\n some stuff up). keeping it around in case I break too much stuff\n\n Finds the locations of all the sidebands in the proc_data array to be\n able to seed the fitting method. This works by finding the maximum\n data value in the array and guessing what sideband it is. It creates\n an array that includes this information. It will then step down,\n initially by one THz frequency, then by twos after it hasn't found any\n odd ones. It then goes up from the max and finds everything above in\n much the same way.\n\n There is currently no rhyme or reason to a cutoff of 8. I don't know\n what it should be changed to, though.\n\n Input:\n cutoff = signal-to-noise threshold to count a sideband candidate.\n\n kwargs:\n window_size: how big of a window (in pixels) to use for checking for\n sidebands. Specified in half-width\n default: 15\n\n\n Internal:\n self.sb_list = List of all of the orders the method found\n self.sb_index = index of all of the peaks of the sidebands\n self.sb_guess = three-part list including the frequency, amplitude and\n error guesses for each sideband\n \"\"\"\n # TODO: this isn't commented appropriately.\n # Will it be made more readable first?\n\n if \"cutoff\" in self.parameters:\n cutoff = self.parameters[\"cutoff\"]\n else:\n self.parameters['cutoff for guess_sidebands'] = cutoff\n\n if verbose:\n print(\"=\" * 15)\n print()\n print(\"Guessing CCD Sideband parameters\")\n print(os.path.basename(self.fname))\n print(\"\\tCutoff = {}\".format(cutoff))\n print()\n print(\"=\" * 15)\n x_axis = np.array(self.proc_data[:, 0])\n y_axis = np.array(self.proc_data[:, 1])\n error = np.array(self.proc_data[:, 2])\n\n min_sb = int(self.calc_approx_sb_order(x_axis[0])) + 1\n try:\n max_sb = int(self.calc_approx_sb_order(x_axis[-1]))\n except ValueError:\n print(x_axis)\n\n nir_freq = self.parameters[\"nir_freq\"]\n thz_freq = self.parameters[\"thz_freq\"]\n\n if verbose:\n print(\"min_sb: {} | max_sb: {}\".format(min_sb, max_sb))\n\n # Find max strength sideband and it's order\n global_max = np.argmax(y_axis)\n order_init = int(round(self.calc_approx_sb_order(x_axis[global_max])))\n # if verbose:\n # print \"The global max is at index\", global_max\n if global_max < 15:\n check_y = y_axis[:global_max + 15]\n check_y = np.concatenate((np.zeros(15 - global_max), check_y))\n elif global_max > 1585:\n check_y = y_axis[global_max - 15:]\n check_y = np.concatenate((check_y, np.zeros(global_max - 1585)))\n else:\n check_y = y_axis[global_max - 15:global_max + 15]\n\n check_max_index = np.argmax(check_y)\n check_max_area = np.sum(\n check_y[check_max_index - 2:check_max_index + 3])\n\n check_ave = np.mean(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])\n check_stdev = np.std(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])\n check_ratio = (check_max_area - 3 * check_ave) / check_stdev\n\n if verbose:\n print((\"{:^16}\" * 5).format(\n \"global_max idx\", \"check_max_area\", \"check_ave\", \"check_stdev\",\n \"check_ratio\"))\n print((\"{:^16.5g}\" * 5).format(\n global_max, check_max_area, check_ave,\n check_stdev, check_ratio))\n\n if check_ratio > cutoff:\n self.sb_list = [order_init]\n self.sb_index = [global_max]\n sb_freq_guess = [x_axis[global_max]]\n sb_amp_guess = [y_axis[global_max]]\n sb_error_est = [\n np.sqrt(sum([i ** 2 for i in error[\n global_max - 2:global_max + 3]]))\n / (check_max_area - 5 * check_ave)]\n else:\n print(\"There are no sidebands in\", self.fname)\n raise RuntimeError\n\n if verbose:\n print(\"\\t Looking for sidebands with f < {:.6f}\".format(\n sb_freq_guess[0]))\n last_sb = sb_freq_guess[0]\n index_guess = global_max\n # keep track of how many consecutive sidebands we've skipped. Sometimes\n # one's noisy or something, so we'd keep looking after skipping one\n consecutive_null_sb = 0\n consecutive_null_odd = 0\n no_more_odds = False\n break_condition = False\n for order in range(order_init - 1, min_sb - 1, -1):\n # Check to make sure we're not looking at an odd when\n # we've decided to skip them.\n if no_more_odds is True and order % 2 == 1:\n last_sb = last_sb - thz_freq\n if verbose:\n print(\"I skipped\", order)\n continue\n\n # Window size to look for next sideband. Needs to be order\n # dependent because higher orders get wider, so we need to look at\n # more. Values are arbitrary.\n window_size = 0.45 + 0.0004 * order # used to be last_sb?\n lo_freq_bound = last_sb - thz_freq * (\n 1 + window_size) # Not sure what to do about these\n hi_freq_bound = last_sb - thz_freq * (1 - window_size)\n\n if verbose:\n print(\"\\nSideband\", order)\n print(\"\\t{:.4f} < f_{} < {:.4f}\".format(lo_freq_bound, order,\n hi_freq_bound))\n\n# Get the indices where the energies lie within the bounds for this SB\n sliced_indices = \\\n np.where((x_axis > lo_freq_bound)\n & (x_axis < hi_freq_bound))[0]\n start_index, end_index = sliced_indices.min(), sliced_indices.max()\n\n # Get a slice of the y_data which is only in the region of interest\n check_y = y_axis[sliced_indices]\n\n check_max_index = np.argmax(\n check_y) # This assumes that two floats won't be identical\n # Calculate the \"area\" of the sideband by looking at the peak value\n # within the range, and the pixel above/below it\n check_max_area = np.sum(\n check_y[check_max_index - 1:check_max_index + 2])\n\n if verbose and plot:\n plt.figure(\"CCD data\")\n plt.plot(\n [lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n plt.plot(\n [hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n plt.plot(\n [lo_freq_bound, hi_freq_bound], [check_y[check_max_index]]\n * 2, 'b', label=\"{} Box\".format(order))\n plt.text(\n (lo_freq_bound + hi_freq_bound) / 2,\n check_y[check_max_index], order)\n\n # get slice that doesn't have the peak in it to compare statistics\n check_region = np.append(check_y[:check_max_index - 1],\n check_y[check_max_index + 2:])\n check_ave = check_region.mean()\n check_stdev = check_region.std()\n\n # Calculate an effective SNR, where check_ave is roughly the\n # background level\n check_ratio = (check_max_area - 3 * check_ave) / check_stdev\n\n # This raises the barrier for odd sideband detection\n if order % 2 == 1:\n check_ratio = check_ratio / 1.5\n if verbose:\n print(\"\\t\" + (\"{:^14}\" * 4).format(\n \"check_max_area\", \"check_ave\",\n \"check_stdev\", \"check_ratio\"))\n print(\"\\t\" + (\"{:^14.5g}\" * 4).format(\n check_max_area, check_ave, check_stdev, check_ratio))\n\n if check_ratio > cutoff:\n found_index = check_max_index + start_index\n self.sb_index.append(found_index)\n last_sb = x_axis[found_index]\n\n if verbose:\n print(\"I just found\", last_sb)\n\n sb_freq_guess.append(x_axis[found_index])\n sb_amp_guess.append(check_max_area - 3 * check_ave)\n error_est = np.sqrt(\n sum(\n [i ** 2 for i in error[\n found_index - 1:found_index + 2]]\n )) / (check_max_area - 3 * check_ave)\n if verbose:\n print(\"My error estimate is:\", error_est)\n sb_error_est.append(error_est)\n self.sb_list.append(order)\n consecutive_null_sb = 0\n if order % 2 == 1:\n consecutive_null_odd = 0\n else:\n # print \"I could not find sideband with order\", order\n last_sb = last_sb - thz_freq\n consecutive_null_sb += 1\n if order % 2 == 1:\n consecutive_null_odd += 1\n if consecutive_null_odd == 1 and no_more_odds is False:\n # print \"I'm done looking for odd sidebands\"\n no_more_odds = True\n if consecutive_null_sb == 2:\n # print \"I can't find any more sidebands\"\n break\n\n # Look for higher sidebands\n if verbose:\n print(\"\\nLooking for higher energy sidebands\")\n\n last_sb = sb_freq_guess[0]\n index_guess = global_max\n consecutive_null_sb = 0\n consecutive_null_odd = 0\n no_more_odds = False\n break_condition = False\n for order in range(order_init + 1, max_sb + 1):\n if no_more_odds is True and order % 2 == 1:\n last_sb = last_sb + thz_freq\n continue\n window_size = 0.45 + 0.001 * order # used to be 0.28 and 0.0004\n lo_freq_bound = last_sb + thz_freq * (\n 1 - window_size) # Not sure what to do about these\n hi_freq_bound = last_sb + thz_freq * (1 + window_size)\n\n start_index = False\n end_index = False\n\n if verbose:\n print(\"\\nSideband\", order)\n # print \"The low frequency bound is\", lo_freq_bound\n # print \"The high frequency bound is\", hi_freq_bound\n print(\"\\t{:.4f} < f_{} < {:.4f}\".format(lo_freq_bound, order,\n hi_freq_bound))\n for i in range(index_guess, 1600):\n if start_index is False and i == 1599:\n # print \"I'm all out of space, captain!\"\n break_condition = True\n break\n elif start_index is False and x_axis[i] > lo_freq_bound:\n # print \"start_index is\", i\n start_index = i\n elif i == 1599:\n end_index = 1599\n # print \"hit end of data, end_index is 1599\"\n elif end_index is False and x_axis[i] > hi_freq_bound:\n end_index = i\n # print \"end_index is\", i\n index_guess = i\n break\n if break_condition:\n break\n check_y = y_axis[start_index:end_index]\n\n check_max_index = np.argmax(\n check_y) # This assumes that two floats won't be identical\n # To be able to break down check_y into eighths\n octant = len(check_y) // 8\n if octant < 1:\n octant = 1\n\n check_max_area = np.sum(\n check_y[check_max_index - octant - 1:\n check_max_index + octant + 1])\n\n if verbose and plot:\n plt.figure(\"CCD data\")\n plt.plot(\n [lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n plt.plot(\n [hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')\n plt.plot(\n [lo_freq_bound, hi_freq_bound],\n [check_y[check_max_index]] * 2, 'b', label=order)\n plt.text(\n (lo_freq_bound + hi_freq_bound) / 2,\n check_y[check_max_index], order)\n\n no_peak = (2 * len(\n check_y)) // 6 # The denominator is in flux, used to be 5\n # if verbose: print \"\\tcheck_y length\", len(check_y)\n\n check_ave = np.mean(np.take(check_y, np.concatenate(\n (np.arange(no_peak), np.arange(-no_peak, 0)))))\n check_stdev = np.std(np.take(check_y, np.concatenate(\n (np.arange(no_peak), np.arange(-no_peak, 0)))))\n\n check_ratio = ((check_max_area - (2 * octant + 1) * check_ave)\n / check_stdev)\n\n if verbose:\n print(\"\\tIndices: {}->{} (d={})\".format(start_index, end_index,\n len(check_y)))\n # print \"check_y is\", check_y\n # print \"\\ncheck_max_area is\", check_max_area\n # print \"check_ave is\", check_ave\n # print \"check_stdev is\", check_stdev\n # print \"check_ratio is\", check_ratio\n\n print(\"\\t\" + (\"{:^14}\" * 4).format(\n \"check_max_area\", \"check_ave\",\n \"check_stdev\", \"check_ratio\"))\n print(\"\\t\" + (\"{:^14.6g}\" * 4).format(\n check_max_area, check_ave, check_stdev, check_ratio))\n\n # This raises the barrier for odd sideband detection\n if order % 2 == 1:\n check_ratio = check_ratio / 2\n if check_ratio > cutoff:\n found_index = check_max_index + start_index\n self.sb_index.append(found_index)\n last_sb = x_axis[found_index]\n\n# print \"\\tI found\", order, \"at index\", found_index, \"at freq\", last_sb\n if verbose:\n print(\n \"\\tI'm counting this SB at index {} (f={:.4f})\".format(\n found_index, last_sb), end=' ')\n\n sb_freq_guess.append(x_axis[found_index])\n sb_amp_guess.append(\n check_max_area - (2 * octant + 1) * check_ave)\n error_est = (\n np.sqrt(sum([i ** 2 for i in error[\n found_index - octant:found_index + octant]]))\n / (check_max_area - (2 * octant + 1) * check_ave))\n # This error is a relative error.\n if verbose:\n print(\". Err = {:.3g}\".format(error_est))\n # print \"\\tMy error estimate is:\", error_est\n # print \"My relative error is:\", error_est / sb_amp_guess\n sb_error_est.append(error_est)\n self.sb_list.append(order)\n consecutive_null_sb = 0\n if order % 2 == 1:\n consecutive_null_odd = 0\n else:\n # print \"I could not find sideband with order\", order\n last_sb = last_sb + thz_freq\n consecutive_null_sb += 1\n if order % 2 == 1:\n consecutive_null_odd += 1\n if verbose:\n print(\"\\t\\tI did not count this sideband\")\n if consecutive_null_odd == 1 and no_more_odds is False:\n # print \"I'm done looking for odd sidebands\"\n no_more_odds = True\n if consecutive_null_sb == 2:\n # print \"I can't find any more sidebands\"\n break\n\n if verbose:\n print(\"I found these sidebands:\", self.sb_list)\n print('-' * 15)\n print()\n print()\n self.sb_guess = np.array(\n [np.asarray(sb_freq_guess), np.asarray(sb_amp_guess),\n np.asarray(sb_error_est)]).T\n # self.sb_guess = [frequency guess, amplitude guess,\n # relative error of amplitude] for each sideband.\n\n def fit_sidebands(self, plot=False, verbose=False):\n \"\"\"\n This takes self.sb_guess and fits to each maxima to get the details of\n each sideband. It's really ugly, but it works. The error of the\n sideband area is approximated from the data, not the curve fit. All\n else is from the curve fit. Which is definitely underestimating the\n error, but we don't care too much about those errors (at this point).\n\n self.sb_guess = [frequency guess, amplitude guess, relative error of\n amplitude] for each sideband.\n\n Temporary stuff:\n sb_fits = holder of the fitting results until all spectra have been fit\n window = an integer that determines the \"radius\" of the fit window,\n proportional to thz_freq.\n\n Attributes created:\n self.sb_results = the money maker. Column order:\n [sb number, Freq (eV), Freq error (eV), Gauss area (arb.),\n Area error, Gauss linewidth (eV), Linewidth error (eV)]\n [ 0 , 1 , 2, , 3 ,\n 4 , 5 , 6 ]\n self.full_dict = a dictionary similar to sb_results, but now the keys\n are the sideband orders. Column ordering is otherwise the same.\n :param plot: Do you want to see the fits plotted with the data?\n :type plot: bool\n :param verbose: Do you want to see the details\n AND the initial guess fits?\n :type verbose: bool\n :return: None\n \"\"\"\n # print \"Trying to fit these\"\n sb_fits = []\n\n if verbose:\n print(\"=\" * 15)\n print()\n print(\"Fitting CCD Sidebands\")\n print(os.path.basename(self.fname))\n print()\n print(\"=\" * 15)\n # pretty sure you want this up here so things don't break\n # when no sidebands found\n self.full_dict = {}\n thz_freq = self.parameters[\"thz_freq\"]\n\n# Adjust the fit window based on the sideband spacing The 15's are based on\n# empirical knowledge that for 540 GHz (2.23 meV), the best window size is 30\n# and that it seems like the window size should grow slowly?\n\n window = 15 + int(15 * thz_freq / 0.0022)\n # Have to do this because guess_sidebands doesn't out put data in the\n # most optimized way\n for elem, peakIdx in enumerate(self.sb_index):\n if peakIdx < window:\n data_temp = self.proc_data[:peakIdx + window, :]\n elif (1600 - peakIdx) < window:\n data_temp = self.proc_data[peakIdx - window:, :]\n else:\n data_temp = self.proc_data[\n peakIdx - window:peakIdx + window, :]\n # so the width guess gets wider as order goes up\n width_guess = 0.0001 + 0.000001 * self.sb_list[elem]\n p0 = np.array([self.sb_guess[elem, 0],\n self.sb_guess[elem, 1] * width_guess,\n width_guess,\n 0.1])\n # print \"Let's fit this shit!\"\n if verbose:\n # TODO: check that . operator can carry to next line\n print(\n \"Fitting SB {}. Peak index: {}, {}th peak in spectra\".\n format(self.sb_list[elem], peakIdx, elem))\n # print \"\\nnumber:\", elem, num\n # print \"data_temp:\", data_temp\n # print \"p0:\", p0\n print(' '*20 + \"p0 = \" + np.array_str(p0, precision=4))\n # This is to disable plotting the guess function\n # plot_guess = True\n if verbose and plot:\n plt.figure('CCD data')\n linewidth = 3\n x_vals = np.linspace(\n data_temp[0, 0], data_temp[-1, 0], num=500)\n if elem != 0:\n try:\n plt.plot(x_vals, gauss(x_vals, *p0),\n # I don't really know. Mostly\n plt.gca().get_lines()[-1].get_color() + '--',\n # just looked around at what functions\n # matplotlib has...\n linewidth=linewidth)\n # to prevent weird mac issues with the matplotlib things?\n except Exception:\n plt.plot(\n x_vals, gauss(x_vals, *p0), '--',\n linewidth=linewidth)\n\n else:\n plt.plot(\n x_vals, gauss(x_vals, *p0), '--',\n linewidth=linewidth)\n\n try:\n # 11/1/16\n # had to bump maxfev up to 2k since a sideband wasn't being fit\n # Fix for sb 106\n # 05-23 Loren 10nm\\hsg_640_Perp352seq_spectrum.txt\n\n # TODO: find new name for guass parameter and correct code\n coeff, var_list = curve_fit(\n gauss, data_temp[:, 0], data_temp[:, 1],\n p0=p0, maxfev=2000)\n except Exception as e:\n if verbose:\n print(\"\\tThe fit failed:\")\n print(\"\\t\\t\", e)\n print(\"\\tFitting region: {}->{}\".format(\n peakIdx-window, peakIdx+window))\n # print \"I couldn't fit\", elem\n # print \"It's sideband\", num\n # print \"In file\", self.fname\n # print \"because\", e\n # print \"wanted to fit xindx\", peakIdx, \"+-\", window\n self.sb_list[elem] = None\n # This will ensure the rest of the loop is not run without\n # an actual fit.\n continue\n\n # The amplitude could be negative if the linewidth is negative\n coeff[1] = abs(coeff[1])\n # The linewidth shouldn't be negative\n coeff[2] = abs(coeff[2])\n if verbose:\n print(\"\\tFit successful: \", end=' ')\n print(\"p = \" + np.array_str(coeff, precision=4))\n # print \"coeffs:\", coeff\n # print \"sigma for {}: {}\".format(self.sb_list[elem], coeff[2])\n if 10e-4 > coeff[2] > 10e-6:\n try:\n sb_fits.append(np.hstack((\n self.sb_list[elem], coeff,\n np.sqrt(np.diag(var_list)))))\n except RuntimeWarning:\n sb_fits.append(np.hstack((\n self.sb_list[elem], coeff,\n np.sqrt(np.abs(np.diag(var_list))))))\n\n # the var_list wasn't approximating the error well enough, even\n # when using sigma and absoluteSigma self.sb_guess[elem, 2] is\n # the relative error as calculated by the guess_sidebands\n # method coeff[1] is the area from the fit. Therefore, the\n # product should be the absolute error of the integrated area\n # of the sideband. The other errors are still underestimated.\n #\n # 1/12/18 note: So it looks like what hunter did is calculate\n # an error estimate for the strength/area by the quadrature sum\n # of errors of the points in the peak\n # (from like 813 in guess_sidebands:\n # error_est = np.sqrt(sum([i ** 2 for i in error[\n # found_index - 1:found_index + 2]])) / (\n # Where the error is what comes from the CCD by averaging 4\n # spectra. As far as I can tell, it doesn't currently pull in\n # the dark counts or anything like that, except maybe\n # indirectly since it'll cause the variations in the peaks\n sb_fits[-1][6] = self.sb_guess[elem, 2] * coeff[1]\n if verbose:\n print(\n \"\\tRel.Err: {:.4e} | Abs.Err: {:.4e}\".format(\n self.sb_guess[elem, 2],\n coeff[1] * self.sb_guess[elem, 2]))\n print()\n # print \"The rel. error guess is\",\n # self.sb_guess[elem, 2]\n # print \"The abs. error guess is\",\n # coeff[1] * self.sb_guess[elem, 2]\n\n # The error from self.sb_guess[elem, 2] is a relative error\n if plot and verbose:\n plt.figure('CCD data')\n linewidth = 5\n x_vals = np.linspace(\n data_temp[0, 0], data_temp[-1, 0], num=500)\n if elem != 0:\n try:\n plt.plot(x_vals, gauss(x_vals, *coeff),\n plt.gca().get_lines()[-1].get_color() + '--',\n # I don't really know. Mostly\n # just looked around at what functions\n # matplotlib has...\n linewidth=linewidth)\n # to prevent weird mac issues with the matplotlib things?\n except Exception:\n plt.plot(\n x_vals, gauss(x_vals, *coeff), '--',\n linewidth=linewidth)\n\n else:\n plt.plot(\n x_vals, gauss(x_vals, *coeff), '--',\n linewidth=linewidth)\n sb_fits_temp = np.asarray(sb_fits)\n reorder = [0, 1, 5, 2, 6, 3, 7, 4, 8]\n # Reorder the list to put the error of the i-th parameter as the i+1th.\n try:\n sb_fits = sb_fits_temp[:, reorder]\n # if verbose: print \"The abs. error guess is\", sb_fits[:, 0:5]\n except Exception:\n raise RuntimeError(\"No sidebands to fit?\")\n\n # Going to label the appropriate row with the sideband\n self.sb_list = sorted(list([x for x in self.sb_list if x is not None]))\n sb_names = np.vstack(self.sb_list)\n\n # Sort by SB order\n sorter = np.argsort(sb_fits[:, 0])\n self.sb_results = np.array(sb_fits[sorter, :7])\n\n if verbose:\n print(\"\\tsb_results:\")\n print(\n \"\\t\\t\" + (\"{:^5s}\" + (\"{:^12s}\")*(self.sb_results.shape[1]-1)).\n format(\"SB\", \"Cen.En.\", \"\", \"Area\", \"\", \"Width\", \"\"))\n for line in self.sb_results:\n print(\n '\\t\\t[' + (\"{:^5.0f}\" + \"{:<12.4g}\"*(line.size-1)).format(\n *line) + ']')\n print('-'*19)\n self.full_dict = {}\n for sb in self.sb_results:\n self.full_dict[sb[0]] = np.asarray(sb[1:])\n\n def infer_frequencies(\n self, nir_units=\"wavenumber\", thz_units=\"GHz\", bad_points=-2):\n \"\"\"\n This guy tries to fit the results from fit_sidebands to a line to get\n the relevant frequencies\n :param nir_units: What units do you want this to output?\n :type nir_units: 'nm', 'wavenumber', 'eV', 'THz'\n :param thz_units: What units do you want this to output for the THz?\n :type thz_units: 'GHz', 'wavenumber', 'meV'\n :param bad_points: How many more-positive order sidebands shall this\n ignore?\n :type bad_points: int\n :return: freqNIR, freqTHz, the frequencies in the appropriate units\n \"\"\"\n # force same units for in dict\n freqNIR, freqTHz = calc_laser_frequencies(\n self, \"wavenumber\", \"wavenumber\", bad_points)\n\n self.parameters[\"calculated NIR freq (cm-1)\"] = \"{}\".format(\n freqNIR, nir_units)\n self.parameters[\"calculated THz freq (cm-1)\"] = \"{}\".format(\n freqTHz, freqTHz)\n freqNIR, freqTHz = calc_laser_frequencies(\n self, nir_units, thz_units, bad_points)\n return freqNIR, freqTHz\n\n def save_processing(\n self, file_name, folder_str, marker='', index='', verbose=''):\n \"\"\"\n This will save all of the self.proc_data and the results from the\n fitting of this individual file.\n\n Format:\n spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'\n fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'\n\n Inputs:\n file_name = the beginning of the file name to be saved\n folder_str = the location of the folder where the file will be saved,\n will create the folder, if necessary.\n marker = I...I don't know what this was originally for\n index = used to keep these files from overwriting themselves when in a\n list\n\n Outputs:\n Two files:\n self.proc_data = the continuous spectrum\n self.sb_results = the individual sideband details\n\n :param file_name: The base name for the saved file\n :type file_name: str\n :param folder_str: The full name for the folder hte file is saved it.\n Folder can be created\n :type folder_str: str\n :param marker: Marker for the file, appended to file_name, often the\n self.parameters['series']\n :type marker: str\n :param index: used to keep these files from overwriting themselves when\n marker is the same\n :type index: str or int\n :return: None\n \"\"\"\n try:\n os.mkdir(folder_str)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n temp = np.array(self.sb_results)\n\n # But [:, 3] is already area?\n ampli = np.array([temp[:, 3] / temp[:, 5]])\n # (The old name was area)\n # I think it must be amplitude\n temp[:, 5:7] = temp[:, 5:7] * 1000 # For meV linewidths\n if verbose:\n print(\"sb_results\", self.sb_results.shape)\n print(\"ampli\", ampli.shape)\n save_results = np.hstack((temp, ampli.T))\n\n spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'\n fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'\n self.save_name = spectra_fname\n\n self.parameters['addenda'] = self.addenda\n self.parameters['subtrahenda'] = self.subtrahenda\n try:\n parameter_str = json.dumps(\n self.parameters, sort_keys=True, indent=4,\n separators=(',', ': '))\n except Exception:\n print(\"Source: EMCCD_image.save_images\\nJSON FAILED\")\n print(\"Here is the dictionary that broke JSON:\\n\", self.parameters)\n return\n parameter_str = parameter_str.replace('\\n', '\\n#')\n\n # Make the number of lines constant so importing is easier\n num_lines = parameter_str.count('#')\n # for num in range(99 - num_lines): parameter_str += '\\n#'\n parameter_str += '\\n#' * (99 - num_lines)\n origin_import_spec = (\n '\\nNIR frequency,Signal,Standard error\\neV,arb. u.,arb. u.')\n spec_header = '#' + parameter_str + origin_import_spec\n\n origin_import_fits = (\n # TODO: ensure splitting lines with a + for concatenation works\n '\\nSideband,Center energy,error,Sideband strength,error,'\n + 'Linewidth,error,Amplitude')\n origin_import_fits += '\\norder,eV,,arb. u.,,meV,,arb. u.'\n origin_import_fits += \"\\n{},,,{},,,\".format(marker, marker)\n fits_header = '#' + parameter_str + origin_import_fits\n\n # print \"DEBUG: in saving\", folder_str, \",\", spectra_fname\n\n np.savetxt(\n os.path.join(folder_str, spectra_fname), self.proc_data,\n delimiter=',', header=spec_header, comments='', fmt='%0.6e')\n np.savetxt(\n os.path.join(folder_str, fit_fname), save_results,\n delimiter=',', header=fits_header, comments='', fmt='%0.6e')\n if verbose:\n print(\"Save image.\\nDirectory: {}\".format(os.path.join(\n folder_str, spectra_fname)))\n"
] |
[
[
"numpy.diag",
"numpy.sqrt",
"numpy.linspace",
"numpy.asarray",
"numpy.flipud",
"matplotlib.pyplot.plot",
"numpy.mean",
"numpy.where",
"scipy.optimize.curve_fit",
"numpy.hstack",
"matplotlib.pyplot.gca",
"numpy.ones_like",
"numpy.arange",
"numpy.std",
"numpy.argmax",
"numpy.array_str",
"matplotlib.pyplot.text",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.isclose",
"numpy.append",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.set_printoptions",
"numpy.vstack"
]
] |
urlocal12/tensorflow
|
[
"3f70de82668c0e61ecedf1bc458ab8b6d5f9b1f0"
] |
[
"tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer.py"
] |
[
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains the loss scaling optimizer class.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.distribute import collective_all_reduce_strategy\nfrom tensorflow.python.distribute import distribution_strategy_context\nfrom tensorflow.python.distribute import mirrored_strategy\nfrom tensorflow.python.distribute import one_device_strategy\nfrom tensorflow.python.distribute import tpu_strategy\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import smart_cond\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras import optimizers\nfrom tensorflow.python.keras.mixed_precision.experimental import loss_scale as keras_loss_scale_module\nfrom tensorflow.python.keras.optimizer_v2 import optimizer_v2\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.training.experimental import loss_scale as loss_scale_module\nfrom tensorflow.python.training.experimental import mixed_precision\nfrom tensorflow.python.util.tf_export import keras_export\n\n\nclass _UnwrapPreventer(object):\n \"\"\"Wrapper that DistributionStrategy will not unwrap.\n\n Typically, DistributionStrategy will unwrap values when going from a cross-\n replica context to a replica context via `call_for_each_replica`. This class\n is a wrapper that DistributionStrategy will not unwrap, so it can be used to\n prevent it from unwrapping a value.\n\n TODO(reedwm): Find/implement a better way of preventing values from being\n unwrapped by DistributionStrategy\n \"\"\"\n\n def __init__(self, value):\n self.value = value\n\n\n@keras_export('keras.mixed_precision.experimental.LossScaleOptimizer')\nclass LossScaleOptimizer(optimizer_v2.OptimizerV2):\n \"\"\"An optimizer that applies loss scaling.\n\n Loss scaling is a process that multiplies the loss by a multiplier called the\n loss scale, and divides each gradient by the same multiplier. The pseudocode\n for this process is:\n\n ```\n loss = ...\n loss *= loss_scale\n grads = gradients(loss, vars)\n grads /= loss_scale\n ```\n\n Mathematically, loss scaling has no effect, but can help avoid numerical\n underflow in intermediate gradients when float16 tensors are used. By\n multiplying the loss, each intermediate gradient will have the same multiplier\n applied.\n\n The loss scale can either be a fixed constant, chosen by the user, or be\n dynamically determined. Dynamically determining the loss scale is convenient\n as a loss scale does not have to be explicitly chosen. However it reduces\n performance.\n\n This optimizer wraps another optimizer and applies loss scaling to it via a\n `LossScale`. Loss scaling is applied whenever gradients are\n computed, either through `minimize()` or `get_gradients()`. The loss scale is\n updated via `LossScale.update()` whenever gradients are applied, either\n through `minimize()` or `apply_gradients()`. For example:\n\n >>> opt = tf.keras.optimizers.SGD(0.25)\n >>> opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(opt,\n ... \"dynamic\")\n >>> var = tf.Variable(1.)\n >>> loss_fn = lambda: var ** 2\n >>> # 'minimize' applies loss scaling to the loss and updates the loss sale.\n >>> opt.minimize(loss_fn, var_list=var)\n >>> var.numpy()\n 0.5\n\n If a `tf.GradientTape` is used to compute gradients instead of\n `LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, the loss\n and gradients must be scaled manually. This can be done by calling\n `LossScaleOptimizer.get_scaled_loss` before passing the loss to\n `tf.GradientTape`, and `LossScaleOptimizer.get_unscaled_gradients` after\n computing the gradients with `tf.GradientTape`. For example:\n\n >>> with tf.GradientTape() as tape:\n ... loss = loss_fn()\n ... scaled_loss = opt.get_scaled_loss(loss)\n >>> scaled_grad = tape.gradient(scaled_loss, var)\n >>> (grad,) = opt.get_unscaled_gradients([scaled_grad])\n >>> opt.apply_gradients([(grad, var)]) # Loss scale is updated here\n >>> var.numpy()\n 0.25\n \"\"\"\n\n _HAS_AGGREGATE_GRAD = True\n\n def __init__(self, optimizer, loss_scale):\n \"\"\"Initializes this loss scale optimizer.\n\n Args:\n optimizer: The Optimizer instance to wrap.\n loss_scale: The loss scale to scale the loss and gradients. This can\n either be an int/float to use a fixed loss scale, the string \"dynamic\"\n to use dynamic loss scaling, or an instance of a LossScale. The string\n \"dynamic\" equivalent to passing `DynamicLossScale()`, and passing an\n int/float is equivalent to passing a FixedLossScale with the given loss\n scale.\n \"\"\"\n if not isinstance(optimizer, optimizer_v2.OptimizerV2):\n raise ValueError('\"optimizer\" must be an instance of OptimizerV2, but '\n 'got: %s' % optimizer)\n if optimizer.clipnorm is not None:\n raise ValueError('LossScaleOptimizer does not support wrapping '\n 'optimizers with a clipnorm. Optimizer %s has clipnorm '\n '%s' % (optimizer, optimizer.clipnorm))\n\n if optimizer.clipvalue is not None:\n raise ValueError('LossScaleOptimizer does not support wrapping '\n 'optimizers with a clipvalue. Optimizer %s has '\n 'clipvalue %s' % (optimizer, optimizer.clipvalue))\n self._raise_if_strategy_unsupported()\n\n self.clipnorm = None\n self.clipvalue = None\n\n self._optimizer = optimizer\n self._loss_scale = keras_loss_scale_module.get(loss_scale)\n if self._loss_scale is None:\n raise ValueError('loss_scale cannot be None.')\n for weight in loss_scale_module.get_loss_scale_weights(self._loss_scale):\n # We cannot call `track_variable` in the LossScale class itself, because a\n # file outside of Keras cannot depend on a Keras file. Calling it here\n # instead is OK, because a variable only needs to be tracked if used with\n # a Keras class, and the only way to use LossScale with a Keras class is\n # through the LossScaleOptimizer.\n backend.track_variable(weight)\n self._track_trackable(self._optimizer, 'base_optimizer')\n self._track_trackable(self._loss_scale, 'loss_scale')\n\n # Needed because the superclass's __getattribute__ checks this.\n self._hyper = {}\n\n @property\n def loss_scale(self):\n \"\"\"The `LossScale` instance associated with this optimizer.\"\"\"\n return self._loss_scale\n\n def get_scaled_loss(self, loss):\n \"\"\"Scales the loss by the loss scale.\n\n This method is only needed if you compute gradients manually, e.g. with\n `tf.GradientTape`. In that case, call this method to scale the loss before\n passing the loss to `tf.GradientTape`. If you use\n `LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss\n scaling is automatically applied and this method is unneeded.\n\n If this method is called, `get_unscaled_gradients` should also be called.\n See the `tf.keras.mixed_precision.experimental.LossScaleOptimizer` doc for\n an example.\n\n Args:\n loss: The loss, which will be multiplied by the loss scale. Can either be\n a tensor or a callable returning a tensor.\n\n Returns:\n `loss` multiplied by `LossScaleOptimizer.loss_scale()`.\n \"\"\"\n loss_scale = self._loss_scale()\n if callable(loss):\n def new_loss():\n loss_val = loss()\n return loss_val * math_ops.cast(loss_scale, loss_val.dtype)\n return new_loss\n else:\n return loss * math_ops.cast(loss_scale, loss.dtype)\n\n def get_unscaled_gradients(self, grads):\n \"\"\"Unscales the gradients by the loss scale.\n\n This method is only needed if you compute gradients manually, e.g. with\n `tf.GradientTape`. In that case, call this method to unscale the gradients\n after computing them with `tf.GradientTape`. If you use\n `LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss\n scaling is automatically applied and this method is unneeded.\n\n If this method is called, `get_scaled_loss` should also be called. See\n the `tf.keras.mixed_precision.experimental.LossScaleOptimizer` doc for an\n example.\n\n Args:\n grads: A list of tensors, each which will be divided by the loss scale.\n Can have None values, which are ignored.\n\n Returns:\n A new list the same size as `grads`, where every non-None value in `grads`\n is divided by `LossScaleOptimizer.loss_scale()`.\n \"\"\"\n loss_scale = self._loss_scale()\n loss_scale_reciprocal = 1. / loss_scale\n return [\n _multiply_gradient(g, loss_scale_reciprocal) if g is not None else None\n for g in grads\n ]\n\n def _compute_gradients(self, loss, var_list, grad_loss=None):\n loss = self.get_scaled_loss(loss)\n grads_and_vars = self._optimizer._compute_gradients(loss, var_list, # pylint: disable=protected-access\n grad_loss)\n grads = [g for g, _ in grads_and_vars]\n variables = [v for _, v in grads_and_vars]\n unscaled_grads = self.get_unscaled_gradients(grads)\n return list(zip(unscaled_grads, variables))\n\n def get_gradients(self, loss, params):\n loss = self.get_scaled_loss(loss)\n grads = self._optimizer.get_gradients(loss, params)\n return self.get_unscaled_gradients(grads)\n\n def _create_all_weights(self, var_list):\n self._optimizer._create_all_weights(var_list) # pylint: disable=protected-access\n\n def apply_gradients(self,\n grads_and_vars,\n name=None,\n experimental_aggregate_gradients=True):\n if distribution_strategy_context.in_cross_replica_context():\n raise ValueError('apply_gradients() must be called in a replica context.')\n # We check for the strategy here despite already checking in the constructor\n # as frequently the optimizer is created outside the strategy's scope.\n self._raise_if_strategy_unsupported()\n\n grads_and_vars = tuple(grads_and_vars)\n return distribution_strategy_context.get_replica_context().merge_call(\n self._apply_gradients_cross_replica,\n args=(grads_and_vars, name, experimental_aggregate_gradients))\n\n def _apply_gradients_cross_replica(self, distribution, grads_and_vars, name,\n experimental_aggregate_gradients):\n grads = [g for g, _ in grads_and_vars]\n loss_scale_update_op, should_apply_grads = self._loss_scale.update(grads)\n\n def apply_fn():\n # We do not want DistributionStrategy to unwrap any MirroredVariables in\n # grads_and_vars, because even in a replica context, the wrapped optimizer\n # expects mirrored variables. So we wrap the variables with an\n # _UnwrapPreventer, preventing DistributionStrategy from unwrapping the\n # MirroredVariables.\n wrapped_vars = _UnwrapPreventer([v for _, v in grads_and_vars])\n return distribution.extended.call_for_each_replica(\n self._apply_gradients,\n args=(grads, wrapped_vars, name, experimental_aggregate_gradients))\n\n # Note: We must call this cond() in a cross-replica context.\n # DistributionStrategy does not support having a cond in a replica context\n # with a branch that calls `merge_call`, and self._optimizer.apply_gradients\n # calls `merge_call`.\n maybe_apply_op = smart_cond.smart_cond(should_apply_grads,\n apply_fn,\n control_flow_ops.no_op)\n return control_flow_ops.group(maybe_apply_op, loss_scale_update_op)\n\n def _apply_gradients(self, grads, wrapped_vars, name,\n experimental_aggregate_gradients):\n # TODO(reedwm): This will raise a fairly cryptic error message if\n # self._optimizer.apply_gradients does not take\n # experimental_aggregate_gradients.\n return self._optimizer.apply_gradients(\n list(zip(grads, wrapped_vars.value)), name,\n experimental_aggregate_gradients=experimental_aggregate_gradients)\n\n def get_config(self):\n serialized_optimizer = optimizers.serialize(self._optimizer)\n serialized_loss_scale = keras_loss_scale_module.serialize(self._loss_scale)\n return {\n 'optimizer': serialized_optimizer,\n 'loss_scale': serialized_loss_scale,\n }\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n config = config.copy() # Make a copy, since we mutate config\n config['optimizer'] = optimizers.deserialize(\n config['optimizer'], custom_objects=custom_objects)\n config['loss_scale'] = keras_loss_scale_module.deserialize(\n config['loss_scale'], custom_objects=custom_objects)\n return cls(**config)\n\n def _raise_if_strategy_unsupported(self):\n if not strategy_supports_loss_scaling():\n strategy = distribution_strategy_context.get_strategy()\n if isinstance(strategy,\n (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)):\n raise ValueError(\n 'Loss scaling is not supported with TPUStrategy. Loss scaling is '\n 'unnecessary with TPUs, since they support bfloat16 instead of '\n 'float16 and bfloat16 does not require loss scaling. You should '\n 'remove the use of the LossScaleOptimizer when TPUs are used.')\n else:\n raise ValueError('Loss scaling is not supported with the '\n 'tf.distribute.Strategy: %s. Try using a different '\n 'Strategy, e.g. a MirroredStrategy' %\n strategy.__class__.__name__)\n\n # Delegations: We delegate most OptimizerV2 methods to the wrapped optimizer\n # below.\n\n @property\n def iterations(self):\n return self._optimizer.iterations\n\n @iterations.setter\n def iterations(self, variable):\n self._optimizer.iterations = variable\n\n def get_slot_names(self):\n return self._optimizer.get_slot_names()\n\n def variables(self):\n return self._optimizer.variables()\n\n @property\n def weights(self):\n return self._optimizer.weights\n\n def get_weights(self):\n return self._optimizer.get_weights()\n\n def set_weights(self, weights):\n return self._optimizer.set_weights(weights)\n\n def _aggregate_gradients(self, grads_and_vars):\n return self._optimizer._aggregate_gradients(grads_and_vars) # pylint: disable=protected-access\n\n # For the most part, we only expose methods in the base OptimizerV2, not\n # individual subclasses like Adam. However, although \"learning_rate\" and \"lr\"\n # properties are not part of the base OptimizerV2 class, they are part of most\n # subclasses, so we expose them here for convenience.\n\n @property\n def learning_rate(self):\n return self._optimizer.learning_rate\n\n @learning_rate.setter\n def learning_rate(self, lr):\n self._optimizer.learning_rate = lr\n\n @property\n def lr(self):\n return self._optimizer.lr\n\n @lr.setter\n def lr(self, lr):\n self._optimizer.lr = lr\n\n def get_slot(self, var, slot_name):\n # We cannot implement get_slot for the following reason: When saving a\n # checkpoint, two optimizers cannot share slot variables. Since both the\n # LossScaleOptimizer and the wrapped optimizer (self and self._optimizer\n # respectively) are checkpointed, we cannot expose the wrapped optimizer's\n # slots in the LossScaleOptimizer. Otherwise, a checkpoint would believe\n # both optimizers share slot variables.\n raise AttributeError(\n 'You cannot call get_slot on a LossScaleOptimizer. This limitation '\n 'will be removed in the future.')\n\n def add_slot(self, var, slot_name, initializer='zeros'):\n # We disallow adding a slot for consistency with `get_slot`.\n raise AttributeError(\n 'You cannot call add_slot on a LossScaleOptimizer. This limitation '\n 'will be removed in the future.')\n\n # We do not override some OptimizerV2 methods. For each, we describe why we do\n # not delegate them to self._optimizer:\n # * get_updates: get_updates() calls get_gradients(). Since we override\n # get_gradients(), we cannot delegate get_updates() to self._optimizer,\n # otherwise the overridden get_gradients() method would not be called.\n # Luckily, get_updates() does not access any OptimizerV2 fields, so\n # inheriting the OptimizerV2 version works fine.\n # * minimize: We don't delegate for a similar as get_updates(): it calls\n # both self._compute_gradients() and self.apply_gradients(), and both need\n # to have the LossScaleOptimizer version called.\n\n # TODO(reedwm): Maybe merge this class's functionality into OptimizerV2.\n\n # TODO(reedwm): Maybe throw an error if mixed precision is used without this\n # optimizer being used.\n\n\n# pylint: disable=protected-access\nmixed_precision._register_wrapper_optimizer_cls(optimizer_v2.OptimizerV2,\n LossScaleOptimizer)\n\n\ndef _multiply_gradient(gradient, scale):\n \"\"\"Multiply a (possibly sparse) gradient by the given scale factor.\"\"\"\n scale = math_ops.cast(scale, gradient.dtype)\n if isinstance(gradient, ops.IndexedSlices):\n return ops.IndexedSlices(\n gradient.values * scale,\n gradient.indices,\n dense_shape=gradient.dense_shape)\n else:\n return gradient * scale\n\n\ndef strategy_supports_loss_scaling():\n \"\"\"Returns True if the current Strategy supports loss scaling.\"\"\"\n if not distribution_strategy_context.has_strategy():\n return True\n strategy = distribution_strategy_context.get_strategy()\n # Strategies are supported if either there is only one replica or if variables\n # are replicated per device. Otherwise, the current model.fit() implementation\n # and most custom training loops incorrectly unscale the gradients. Currently,\n # gradients are unscaled once per compute replica, but they should be unscaled\n # once per variable replica. When there is one variable replica for each\n # compute replica, this works fine, but otherwise issues will occur.\n # TODO(reedwm): Support all strategies.\n return isinstance(strategy, (\n collective_all_reduce_strategy.CollectiveAllReduceStrategy,\n collective_all_reduce_strategy.CollectiveAllReduceStrategyV1,\n one_device_strategy.OneDeviceStrategy,\n one_device_strategy.OneDeviceStrategyV1,\n mirrored_strategy.MirroredStrategy,\n mirrored_strategy.MirroredStrategyV1,\n ))\n"
] |
[
[
"tensorflow.python.keras.mixed_precision.experimental.loss_scale.serialize",
"tensorflow.python.distribute.distribution_strategy_context.in_cross_replica_context",
"tensorflow.python.keras.backend.track_variable",
"tensorflow.python.keras.optimizers.deserialize",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.keras.optimizers.serialize",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.python.framework.ops.IndexedSlices",
"tensorflow.python.keras.mixed_precision.experimental.loss_scale.deserialize",
"tensorflow.python.distribute.distribution_strategy_context.get_strategy",
"tensorflow.python.distribute.distribution_strategy_context.has_strategy",
"tensorflow.python.keras.mixed_precision.experimental.loss_scale.get",
"tensorflow.python.training.experimental.mixed_precision._register_wrapper_optimizer_cls",
"tensorflow.python.framework.smart_cond.smart_cond",
"tensorflow.python.training.experimental.loss_scale.get_loss_scale_weights",
"tensorflow.python.distribute.distribution_strategy_context.get_replica_context",
"tensorflow.python.ops.math_ops.cast"
]
] |
ThomasMullen/NPP
|
[
"b3ef0bac451841a60edcb60340d2af36ec395f90"
] |
[
"utils/hdf5_tools.py"
] |
[
"import os\nimport h5py\nimport mat73\nimport numpy as np\n\n\"\"\"\nView HDF5 data structure\n------------------------\n\"\"\"\n\n\ndef traverse_datasets(hdf_file):\n \"\"\"\n Peak into matlab file and print the Key, Shape, Data type.\n :param hdf_file:\n :return:\n \"\"\"\n\n def h5py_dataset_iterator(g, prefix=''):\n \"\"\"\n iterate through the HDF5 file and search through the nested datasets\n :param g: .mat filepath\n :param prefix:\n :return: prints out the directory/subdirectory, shape, and dtype in the HDF5 file\n \"\"\"\n for key in g.keys():\n item = g[key]\n path = f'{prefix}/{key}'\n if isinstance(item, h5py.Dataset): # test for dataset\n yield path, item\n elif isinstance(item, h5py.Group): # test for group (go down)\n yield from h5py_dataset_iterator(item, path)\n\n for path, _ in h5py_dataset_iterator(hdf_file):\n yield path\n\n\ndef view_hdf_structure(filepath, print_labels=False):\n \"\"\"\n Looks through the structure and prints information about the structure.\n :param filepath: filepath of .mat\n :return:\n \"\"\"\n vol_labels = []\n with h5py.File(filepath, 'r') as f:\n for dataset in traverse_datasets(f):\n if print_labels:\n print(f'Path: {dataset}\\tShape: {f[dataset].shape}\\tData type: {f[dataset].dtype}')\n vol_labels.append(dataset)\n return vol_labels[:-1]\n\n\ndef list_experiment_directories(experiment_parent_directory):\n \"\"\"\n :param experiment_parent_directory: the directory which contains the folders of fish experiments.\n :return: a list of directories, each directory containing 3 .mat file i.e. log files that need preprocessing.\n \"\"\"\n list_experiment_directories = next(os.walk(experiment_parent_directory))[1]\n print('Experiments:')\n [print(i) for i in list_experiment_directories]\n return list_experiment_directories\n\n\ndef extract_dataset(filepath, dataset_name=''):\n \"\"\"\n extracts the dataset of the dataset you are interested in\n :param filepath: the .mat filepath\n :param dataset_name: the name of the dataset you are interested in\n :return: a n-dimensional array for the dataset.\n \"\"\"\n # print(dataset_name)\n with h5py.File(filepath, 'r') as f:\n data = np.array(f[dataset_name][:])\n return data\n\n\ndef pull_frames(mat_filepath, plane_number, frame_range, start_frame=0, sample_rate=10):\n labels = view_hdf_structure(mat_filepath)\n # extract shape of plane\n plane_shape = extract_dataset(mat_filepath, labels[0]).shape[:2]\n frames_shape = (int(frame_range / sample_rate),) + plane_shape\n frames = np.zeros(frames_shape)\n\n for i, frame in enumerate(range(start_frame, start_frame + frame_range, sample_rate)):\n frames[i, :, :] = extract_dataset(mat_filepath, labels[frame])[:, :, plane_number, 0, 0]\n return frames\n\n\ndef compute_total_frames(volumes_dir, number_chars=3, is_mat_file=False):\n vol_list = os.listdir(volumes_dir)\n\n try:\n last_frame = max([int(name[:-number_chars]) for name in vol_list])\n except:\n raise print(\"Ensure only integer named .h5 file volumes are in directory:\", volumes_dir)\n if is_mat_file:\n return last_frame\n return last_frame\n\n\ndef list_frames_numbers(volumes_dir, number_chars=3):\n vol_list = os.listdir(volumes_dir)\n\n try:\n frames = np.array([int(name[:-number_chars]) for name in vol_list])\n except:\n raise print(\"Ensure only integer named .h5 file volumes are in directory:\", volumes_dir)\n return np.sort(frames)\n\n\ndef export_numpy_2_h5(array, filepath, to_compress=True):\n # store original volume shape\n vol_shape = array.shape\n\n # reshape volume into 2D array\n array = array.reshape(vol_shape[0], -1)\n\n # export as hdf5 file\n file = h5py.File(filepath, 'w')\n if to_compress:\n file.create_dataset(\"vol\", shape=vol_shape, data=array, compression=\"gzip\", compression_opts=9)\n else:\n file.create_dataset(\"vol\", shape=vol_shape, data=array)\n file.close()\n return\n\n\nif __name__ == '__main__':\n path_dir = \"/Volumes/LSM4/tomData/01062021/Fish4/tiff_stacks/20210601_7dpf_HUC_H2B_fish4_run1/\"\n file_name = \"dataSkewCorrected.mat\"\n\n labels = view_hdf_structure(path_dir + file_name)\n\n # # extract plane\n # plane_160 = np.zeros((100, 262, 710))\n # for i, frame in enumerate(range(0, 1000, 10)):\n # plane_160[i, :, :] = extract_dataset(path_dir + file_name, labels[frame])[:, :, 160, 0, 0]\n\n # extract volume\n full_vol = extract_dataset(path_dir + file_name, labels[100])[:, :, :, 0, 0]\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.sort"
]
] |
Frenzoid/labs
|
[
"3552c604445d1a9c79ec9f53b274b3890c23991e"
] |
[
"MACHINE_LEARNING/1_stadistics_review.py"
] |
[
"\nimport numpy as np\n\ndata = [15, 16, 18, 19, 22, 24, 29, 30, 34]\n\nprint(\"mean:\", np.mean(data))\nprint(\"median:\", np.median(data))\nprint(\"50th percentile (median):\", np.percentile(data, 50))\nprint(\"25th percentile:\", np.percentile(data, 25))\nprint(\"75th percentile:\", np.percentile(data, 75))\nprint(\"standard deviation:\", np.std(data))\nprint(\"variance:\", np.var(data))\n"
] |
[
[
"numpy.median",
"numpy.percentile",
"numpy.std",
"numpy.mean",
"numpy.var"
]
] |
GauravSahani1417/Unsupervised-Learning
|
[
"5fb4188061b950f62c1cfd8c0fc0a24bf2d4f913"
] |
[
"Density-Based Clustering.py"
] |
[
"import numpy as np\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.preprocessing import StandardScaler\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets.samples_generator import make_blobs\n\n# Create random data and store in feature matrix X and response vector y\nX, y = make_blobs(n_samples=1500, centers=[[2, 1], [-4, -2], [1, -4]], cluster_std=0.7)\n\n# Standardize features by removing the mean and scaling to unit variance\nX = StandardScaler().fit_transform(X)\n\n\n\"\"\" \nDefine function to change parameters and make it simple- \n - epsilon is a float that describes the maximum distance between two samples for them to be considered as in same \n neighbourhood.\n - minimum_samples is number of samples in a neighbourhood for a point to be considered as a core point.\n - data is our dataset\n\"\"\"\n\n\ndef display(epsilon, minimum_samples, data):\n\n # Initialize DBSCAN with specified epsilon and min. samples. Fit the model with feature matrix X\n db = DBSCAN(eps=epsilon, min_samples=minimum_samples).fit(data)\n\n # Create an array of booleans using the labels from db\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n\n # Replace all elements with 'True' in core_samples_mask that are in cluster, 'False' if points are outliers\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n\n # Number of clusters in labels, ignoring noise if present\n n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n\n # Black color is removed and used for noise instead.\n # Remove repetition in labels by turning it into a set.\n unique_labels = set(labels)\n\n # Create colors for the clusters.\n colors = plt.get_cmap('Spectral')(np.linspace(0, 1, len(unique_labels)))\n\n # Plot the points with colors\n for k, col in zip(unique_labels, colors):\n if k == -1:\n # Black used for noise.\n col = 'k'\n\n class_member_mask = (labels == k)\n\n # Plot the data points that are clustered\n xy = data[class_member_mask & core_samples_mask]\n plt.plot(xy[:, 0],\n xy[:, 1],\n 'o',\n markerfacecolor=col,\n markeredgecolor='k',\n markersize=14)\n\n # Plot the outliers\n xy = data[class_member_mask & ~core_samples_mask]\n plt.plot(xy[:, 0],\n xy[:, 1],\n 'o',\n markerfacecolor=col,\n markeredgecolor='k',\n markersize=6)\n\n plt.title('Estimated number of clusters: %d' % n_clusters_)\n plt.show()\n\n\n# Function object\ndisplay(0.25, 6, X)\n\n# Acknowledgement\nprint('-----------------------------Program Complete---------------------------------')\n"
] |
[
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.get_cmap",
"sklearn.cluster.DBSCAN",
"matplotlib.pyplot.plot",
"numpy.zeros_like",
"sklearn.datasets.samples_generator.make_blobs",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.show"
]
] |
sanja7s/MedRed
|
[
"0d9bc5be603dbbab7807b01b00f15822e0a944c6"
] |
[
"code/validation/multiclass_predict_diseases.py"
] |
[
"# import spacy\nfrom collections import defaultdict\n# nlp = spacy.load('en_core_web_lg')\n\nimport pandas as pd\nimport seaborn as sns\nimport random\nimport pickle\nimport numpy as np\n\nfrom xgboost import XGBClassifier\n\nimport matplotlib.pyplot as plt\nfrom collections import Counter\n\nimport sklearn\n#from sklearn.pipeline import Pipeline\nfrom sklearn import linear_model\n#from sklearn import svm\n#from sklearn.ensemble import GradientBoostingClassifier, AdaBoostClassifier\n\nfrom sklearn.model_selection import KFold #cross_validate, cross_val_score\nfrom sklearn.metrics import classification_report, accuracy_score, precision_recall_fscore_support\nfrom sklearn.metrics import precision_score, f1_score, recall_score\nfrom sklearn import metrics\nfrom sklearn.model_selection import StratifiedKFold\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=sklearn.exceptions.UndefinedMetricWarning)\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n\n\nall_sr = ['bpd', 'cfs','crohnsdisease', 'dementia', 'depression',\\\n 'diabetes', 'dysautonomia', 'gastroparesis','hypothyroidism', 'ibs', \\\n 'interstitialcystitis', 'kidneystones', 'menieres', 'multiplesclerosis',\\\n 'parkinsons', 'psoriasis', 'rheumatoid', 'sleepapnea']\n\n\nall_dis = {el:i for i, el in enumerate(all_sr)}\ndisease_values_dict = all_dis\n# these will be used to take disease names for each prediction task\ndisease_names = list(disease_values_dict.keys())\ndisease_labels = list(disease_values_dict.values())\n\n\netype=\"DL\"\n\nplt.rcParams[\"font.weight\"] = \"bold\"\nplt.rcParams[\"axes.labelweight\"] = \"bold\"\nplt.rcParams.update({'font.size': 16})\n\n\nfeatures_file = \"data/features/{}_embdedded_features.pckl\".format(etype)\nresults_file = \"results/{}_multiclasscm.csv\".format(etype)\n\nword_emb_len = 300\n\n\ndef sample_all_diseases(df, n=1):\n\n if etype == \"DL\":\n smallest_disease=all_dis['parkinsons']\n else:\n smallest_disease=all_dis['gastroparesis']\n \n def merge_rows(row):\n if n == 1:\n return row\n res_row = np.zeros(len(row[0]))\n for i in range(n):\n res_row = res_row+row[i]\n return res_row / n\n \n df = df.sample(frac=1).reset_index(drop=True)\n \n dis_size = len(df[df['disease']==smallest_disease])\n sample_size = int(dis_size/n)*n\n \n print(dis_size, sample_size)\n \n df_sample= pd.DataFrame()\n for disease in all_dis:\n df_dis = df[df['disease'] == all_dis[disease]]\n df_dis = df_dis.sample(n=sample_size, random_state=11).reset_index()\n if n > 1:\n df_dis = df_dis.groupby(df_dis.index // n).agg(lambda x: list(x))\n df_dis['disease'] = all_dis[disease]\n df_sample = pd.concat([df_dis, df_sample])\n \n \n if n > 1:\n df_sample['features'] = df_sample['features'].apply(lambda row: merge_rows(row))\n df_sample = df_sample.drop(columns=['index'])\n \n return df_sample\n\n\n\ndef prepare_training_data_for_multi_disease(features, n=1):\n \n dis_sample = sample_all_diseases(features, n)\n print(\"Subsampled all diseases for \", len(dis_sample), \" posts\")\n \n training = dis_sample.copy()\n training = training.reset_index(drop=True)\n\n return training\n\n\ndef XGBoost_cross_validate():\n\n features = pd.read_pickle(features_file)\n\n features.rename(columns={'vec':'features'}, inplace=True)\n features = features.drop(columns=['subreddit', 'entities'])\n\n disease = features['disease']\n print (\"Post per subreddit \")\n print (features.groupby('disease').size())\n\n # print('Distribution before imbalancing: {}'.format(Counter(disease)))\n\n training = prepare_training_data_for_multi_disease(features)\n print(training.tail())\n \n training_labels = training[\"disease\"].astype(int)\n training_labels.head()\n\n training_features = pd.DataFrame(training[\"features\"].tolist())\n training_features.head()\n \n # XGBoost\n AUC_results = []\n f1_results = []\n results = []\n\n cm_all = []\n\n\n kf = StratifiedKFold(n_splits=10, random_state=11, shuffle=True)\n\n for train_index, test_index in kf.split(training_features,training_labels):\n X_train = training_features.loc[train_index]\n y_train = training_labels.loc[train_index]\n\n X_test = training_features.loc[test_index]\n y_test = training_labels.loc[test_index]\n\n\n model = XGBClassifier(n_estimators=100, n_jobs=11, max_depth=4) # 1000 200\n model.fit(X_train, y_train.values.ravel())\n predictions = model.predict(X_test)\n\n results.append(precision_recall_fscore_support(y_test, predictions))\n f1_results.append(f1_score(y_true=y_test, y_pred=predictions, average='weighted'))\n\n cm_cv = sklearn.metrics.confusion_matrix(y_true=y_test, y_pred=predictions, labels=disease_labels)\n cm_all.append(cm_cv)\n\n print (\"Accuracy : %.4g\" % metrics.accuracy_score(y_test, predictions))\n\n\n f1_results_avg = [pd.np.mean(f1_results), pd.np.std(f1_results)]\n #AUC_results_avg = [pd.np.mean(AUC_results), pd.np.std(AUC_results)]\n\n print (f1_results_avg)\n \n return f1_results, results, model, cm_all\n\n\ndef plot_confusion_matrix():\n\n f1_results, results, model, cm_all = XGBoost_cross_validate()\n\n results_avg = pd.np.mean(results, axis=0)\n f1 = results_avg[2]\n per_dis_f1 = [ str(disease_names[i]) + ' F1: ' + \"{0:.2f}\".format(f1[i]) for i in range (len(f1)) ]\n\n cms = np.array(cm_all)\n cms2 = cms.sum(axis=0)\n\n from matplotlib.colors import LogNorm \n from matplotlib import cm\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10,10))\n\n sns.set_style('darkgrid')\n\n syn = 'royalblue'\n sem = 'darkorange'\n join = 'forestgreen'\n\n # normalize confusion matrix\n #cms2 = np.round(cms2.astype('float') / cms2.sum(axis=1)[:, np.newaxis],2)\n\n\n viridis = cm.get_cmap('viridis', 12)\n a = sns.heatmap(cms2, square=True, cbar=0,\n #normalize=True,\n #norm=LogNorm(vmin=cms2.min(), vmax=cms2.max()),\n cmap=viridis,\n xticklabels=disease_names,\n yticklabels=per_dis_f1, annot=True, fmt='1g', ax=ax, annot_kws={\"size\": 13, \"weight\": \"bold\"})\n # a.xaxis.tick_top()\n # a.title.\n # a.xaxis.\n #ax.set_title(i)\n \n\n plt.tight_layout()\n\n fig.savefig('results/multiclass/classifier_for_' + etype + '_cm_bold_v4.png')\n\n\n results_std = pd.np.std(results, axis=0)\n f1_std = results_std[2]\n\n\n per_dis_f1_dict = {str(disease_names[i]): f1[i] for i in range (len(f1)) }\n per_dis_f1_dict_std = {str(disease_names[i]): f1_std[i] for i in range (len(f1)) }\n\n print (per_dis_f1_dict)\n print(per_dis_f1_dict_std)\n\n\ndef save_confusion_matrix():\n\n\n f1_results, results, model, cm_all = XGBoost_cross_validate()\n\n cms = np.array(cm_all)\n cms2 = cms.sum(axis=0)\n\n np.savetxt(results_file, cms2)\n\n\nplot_confusion_matrix()"
] |
[
[
"numpy.array",
"pandas.concat",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"sklearn.model_selection.StratifiedKFold",
"pandas.DataFrame",
"sklearn.metrics.confusion_matrix",
"sklearn.metrics.precision_recall_fscore_support",
"sklearn.metrics.f1_score",
"matplotlib.cm.get_cmap",
"numpy.savetxt",
"matplotlib.pyplot.rcParams.update",
"pandas.read_pickle",
"pandas.np.std",
"pandas.np.mean",
"sklearn.metrics.accuracy_score"
]
] |
louise-scherrer/sot-talos-balance
|
[
"e1d2c853439902955f15e30fa15c0ce4fd6811a0",
"e1d2c853439902955f15e30fa15c0ce4fd6811a0"
] |
[
"unittest/python/test_ft_calibration.py",
"unittest/python/test_dcm_controller_talos.py"
] |
[
"import numpy as np\nfrom numpy.testing import assert_almost_equal as assertApprox\n\nimport sot_talos_balance.talos.ft_calibration_conf as conf\nfrom sot_talos_balance.ft_calibration import FtCalibration\n\nrobot_name = 'robot'\nftc = FtCalibration('ftc')\nftc.init(robot_name)\nrfw = conf.rfw\nlfw = conf.lfw\nftc.setLeftFootWeight(lfw)\nftc.setRightFootWeight(rfw)\nprint('Weights of both feet set to {0}'.format(rfw))\n\nprint('The robot should be in the air.')\n\nftc.right_foot_force_in.value = [1, 1, 8, 1, 1, 1]\nftc.left_foot_force_in.value = [1, 1, 6, 1, 1, 1]\n\nprint(\"Let's calibrate the ft sensors...\")\nftc.calibrateFeetSensor()\nfor i in range(2, 1003):\n ftc.right_foot_force_in.value = np.random.randn(6) * 0.0000001 + [1, 1, 8, 1, 1, 1]\n ftc.left_foot_force_in.value = np.random.randn(6) * 0.0000001 + [1, 1, 6, 1, 1, 1]\n ftc.right_foot_force_out.recompute(i)\n ftc.left_foot_force_out.recompute(i)\n\nassertApprox(ftc.right_foot_force_out.value, np.array((0, 0, -rfw, 0, 0, 0)), 5)\nassertApprox(ftc.left_foot_force_out.value, np.array((0, 0, -lfw, 0, 0, 0)), 5)\n\nprint(\"Ft sensors calibrated!\")\nprint(\"Value outputed after calibration:\")\nprint(ftc.right_foot_force_out.value)\nprint(ftc.left_foot_force_out.value)\n",
"from math import sqrt\n\nimport numpy as np\nimport pinocchio as pin\nfrom numpy.testing import assert_almost_equal as assertApprox\nfrom rospkg import RosPack\n\nimport sot_talos_balance.talos.parameter_server_conf as param_server_conf\nfrom sot_talos_balance.create_entities_utils import DcmController, create_parameter_server\n\n# --- General ---\nprint(\"--- General ---\")\n\ndt = 0.001\nrobot_name = 'robot'\n\nhalfSitting = [\n 0.0,\n 0.0,\n 1.018213,\n 0.00,\n 0.0,\n 0.0,\n 1.0, # Free flyer\n 0.0,\n 0.0,\n -0.411354,\n 0.859395,\n -0.448041,\n -0.001708, # Left Leg\n 0.0,\n 0.0,\n -0.411354,\n 0.859395,\n -0.448041,\n -0.001708, # Right Leg\n 0.0,\n 0.006761, # Chest\n 0.25847,\n 0.173046,\n -0.0002,\n -0.525366,\n 0.0,\n -0.0,\n 0.1,\n -0.005, # Left Arm\n -0.25847,\n -0.173046,\n 0.0002,\n -0.525366,\n 0.0,\n 0.0,\n 0.1,\n -0.005, # Right Arm\n 0.,\n 0. # Head\n]\n\nq = np.matrix(halfSitting).T\nprint(\"q:\")\nprint(q.flatten().tolist()[0])\n\nrospack = RosPack()\nurdfPath = rospack.get_path('talos_data') + \"/urdf/talos_reduced.urdf\"\nurdfDir = [rospack.get_path('talos_data') + \"/../\"]\n\nmodel = pin.buildModelFromUrdf(urdfPath, pin.JointModelFreeFlyer())\ndata = model.createData()\ncom = pin.centerOfMass(model, data, q)\npin.updateFramePlacements(model, data)\nm = data.mass[0]\nh = float(com[2])\ng = 9.81\nomega = sqrt(g / h)\n\nleftName = param_server_conf.footFrameNames['Left']\nleftId = model.getFrameId(leftName)\nleftPos = data.oMf[leftId]\n\nrightName = param_server_conf.footFrameNames['Right']\nrightId = model.getFrameId(rightName)\nrightPos = data.oMf[rightId]\n\ncenterTranslation = (data.oMf[rightId].translation + data.oMf[leftId].translation) / 2 + np.matrix(\n param_server_conf.rightFootSoleXYZ).T\ncenterPos = pin.SE3(rightPos.rotation, centerTranslation)\ncomRel = centerPos.actInv(com)\n\nfz = m * g\nforce = [0.0, 0.0, fz]\ntau = np.cross(comRel, np.matrix(force).T, axis=0)\nwrench = force + tau.flatten().tolist()\n\nprint(\"desired wrench: %s\" % str(wrench))\n\n# --- Desired CoM, DCM and ZMP\ncomDes = tuple(comRel.flatten().tolist()[0])\ndcmDes = comDes\nzmpDes = comDes[:2] + (0.0, )\n\n# --- Parameter server ---\nprint(\"--- Parameter server ---\")\n\nparam_server = create_parameter_server(param_server_conf, dt)\n\n# --- DCM controller\nKp_dcm = [0.0, 0.0, 0.0]\nKi_dcm = [0.0, 0.0, 0.0]\ngamma_dcm = 0.2\n\ndcm_controller = DcmController(\"dcmCtrl\")\n\ndcm_controller.Kp.value = Kp_dcm\ndcm_controller.Ki.value = Ki_dcm\ndcm_controller.decayFactor.value = gamma_dcm\ndcm_controller.mass.value = m\ndcm_controller.omega.value = omega\n\ndcm_controller.com.value = comDes\ndcm_controller.dcm.value = comDes\n\ndcm_controller.zmpDes.value = zmpDes\ndcm_controller.dcmDes.value = dcmDes\n\ndcm_controller.init(dt)\n\ndcm_controller.wrenchRef.recompute(0)\n\nprint(\"reference wrench: %s\" % str(dcm_controller.wrenchRef.value))\nassertApprox(wrench, dcm_controller.wrenchRef.value, 3)\n"
] |
[
[
"numpy.array",
"numpy.random.randn"
],
[
"numpy.matrix",
"numpy.testing.assert_almost_equal"
]
] |
bioidiap/bob.bio.base
|
[
"44b8d192e957eb328591c8110cf0113f602292ef"
] |
[
"bob/bio/base/test/dummy/preprocessor.py"
] |
[
"from bob.bio.base.preprocessor import Preprocessor\nfrom bob.bio.base.database import BioFile\nimport numpy\nnumpy.random.seed(10)\n\n\nclass DummyPreprocessor (Preprocessor):\n def __init__(self, return_none=False, probability_of_none=1, **kwargs):\n Preprocessor.__init__(self)\n self.return_none = return_none\n self.probability_of_none = probability_of_none\n\n def __call__(self, data, annotation):\n \"\"\"Does nothing, simply converts the data type of the data, ignoring any annotation.\"\"\"\n if self.return_none:\n return numpy.random.choice([None, data], p=[self.probability_of_none, 1-self.probability_of_none])\n \n return data\n\npreprocessor = DummyPreprocessor()\n\n\nclass DummyPreprocessorMetadata (DummyPreprocessor):\n\n def __call__(self, data, annotation, metadata=None):\n \"\"\"Does nothing, simply converts the data type of the data, ignoring any annotation.\"\"\"\n assert isinstance(metadata, BioFile)\n return super(DummyPreprocessorMetadata, self).__call__(data, annotation)\n\npreprocessor_metadata = DummyPreprocessorMetadata()\n"
] |
[
[
"numpy.random.seed",
"numpy.random.choice"
]
] |
fakufaku/doamm
|
[
"66c7124573fb2a2c705335f2f7e877378e585042"
] |
[
"tests/test_norm.py"
] |
[
"import numpy as np\n\n\ndef extract_off_diagonal(X):\n \"\"\"\n Parameters\n ----------\n X: array_like, shape (..., M, M)\n A multi dimensional array\n\n Returns\n -------\n Y: array_like, shape (..., M * (M - 1) / 2)\n The linearized entries under the main diagonal\n \"\"\"\n # we need to format the sensors\n M = X.shape[-1]\n assert X.shape[-2] == M\n indices = np.arange(M)\n mask = np.ravel_multi_index(np.where(indices[:, None] > indices[None, :]), (M, M))\n print(indices[:, None] > indices[None, :])\n print(mask)\n\n return X.reshape(X.shape[:-2] + (X.shape[-2] * X.shape[-1],))[..., mask]\n return X[..., mask]\n\n\nif __name__ == \"__main__\":\n\n n_dim = 3\n n_mics = 4\n n_subsp = 2\n w = 2 * np.pi * 20.0 / 343.0 # 440 Hz @ 343 m/s\n\n # direction vector\n q = np.random.randn(n_dim)\n q /= np.linalg.norm(q)\n\n # microphones\n I = np.arange(n_dim * n_mics)\n np.random.shuffle(I)\n L = I.reshape((n_dim, n_mics))\n\n # noise subspace matrix\n E = np.random.randn(n_mics, n_subsp) + 1j * np.random.randn(n_mics, n_subsp)\n E /= np.linalg.norm(E, axis=0, keepdims=True)\n\n steervec = np.exp(1j * w * L.T @ q)\n\n # computation 1\n\n ell1 = np.linalg.norm(np.conj(E).T @ steervec) ** 2\n\n # computation 2\n V = E @ np.conj(E.T)\n V = 0.5 * (V + np.conj(V.T))\n diag_fact = np.trace(np.abs(V))\n diag_fact2 = np.sum(np.linalg.norm(E, axis=1) ** 2)\n Ld = extract_off_diagonal(L[:, :, None] - L[:, None, :])\n E_vec_cpx = extract_off_diagonal(V)\n E_vec = np.r_[np.real(E_vec_cpx), np.imag(E_vec_cpx)]\n\n e = w * Ld.T @ q\n steervec_real = np.r_[np.cos(e), np.sin(e)]\n\n ell2 = diag_fact + 2 * np.inner(steervec_real, E_vec)\n\n # computation 3\n steervec_3 = np.exp(-1j * w * Ld.T @ q)\n ell3 = diag_fact + 2 * np.sum(np.real(steervec_3 * E_vec_cpx))\n\n # computation 4\n ell4 = np.real(np.conj(steervec) @ V @ steervec)\n\n # computation 5\n ell5 = np.real(np.trace((steervec[:, None] @ np.conj(steervec[None, :])) @ V))\n\n print(f\"diag_fact 1: {diag_fact} 2: {diag_fact2}\")\n print(f\"ell 1: {ell1} 2: {ell2} 3: {ell3} 4: {ell4} 5: {ell5}\")\n"
] |
[
[
"numpy.imag",
"numpy.conj",
"numpy.abs",
"numpy.inner",
"numpy.arange",
"numpy.linalg.norm",
"numpy.random.shuffle",
"numpy.cos",
"numpy.sin",
"numpy.real",
"numpy.random.randn",
"numpy.exp",
"numpy.where"
]
] |
pihvi/edutime
|
[
"c8f16e96b1c8b199dd1146c203084898040222b9"
] |
[
"scrap/groupedViz.py"
] |
[
"import pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv('./data/sigite2014-difficulty-data.csv', sep=';')\n\n\ndef save_plot(title):\n file = 'plots/' + title + '.png'\n plt.savefig(file)\n plt.clf()\n plt.cla()\n plt.close()\n return file\n\n\nmax_weeks = 6\nmax_assigments = 200\nwith open('reports/seconds_spent.md', 'w') as report:\n for week in range(1, max_weeks + 1):\n report.write('### Week ' + str(week) + ' \\n')\n prefix = 'SECONDS_SPENT_ON_viikko0' + str(week) + '_'\n for col in df.filter(regex=prefix + '.*', axis=1).columns[:max_assigments]:\n name = col.replace(prefix, '')\n data = df[col]\n med = data.median()\n std = data.std()\n data = data[data < med + std]\n data.hist(bins=50)\n plt.axvline(med, color='red', linestyle='dashed', linewidth=2)\n filename = save_plot(name)\n\n data = df['DIFFICULTY_viikko0' + str(week) + '_' + name]\n data.hist(bins=5)\n filename2 = save_plot(name + '_difficulty')\n\n data = df['EDUCATIONAL_VALUE_viikko0' + str(week) + '_' + name]\n data.hist(bins=5)\n filename3 = save_plot(name + '_eduvalue')\n\n report.write('#### ' + name + ' \\n')\n report.write('times | difficulty | educational value \\n')\n report.write('--- | --- | --- \\n')\n report.write(' |  |  \\n')\n"
] |
[
[
"pandas.read_csv",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close"
]
] |
factzero/pytorch_jaguarface_examples
|
[
"f248ff8899b8fe9d41a1e8ac095ed5b6688987ed"
] |
[
"retinaFace/detect_video.py"
] |
[
"# -*- coding: UTF-8 -*-\nimport argparse\nimport cv2\nimport numpy as np\nimport time\nimport torch\nfrom core.config import cfg_mnet\nfrom core.retinaface import RetinaFace\nfrom utils.prior_box import PriorBox\nfrom utils.box_utils import decode, decode_landm, nms\n\n\nparser = argparse.ArgumentParser(description='Retinaface')\nparser.add_argument('--trained_model', default='./weights/mobilenet0.25_Final.pth', type=str,\n help='Trained state_dict file path to open')\nparser.add_argument('--network', default='mobilenet0.25', help='Backbone network mobilenet0.25')\nparser.add_argument('--cpu', action=\"store_true\", default=False, help='Use cpu inference')\nparser.add_argument('--confidence_threshold', default=0.02, type=float, help='confidence_threshold')\nparser.add_argument('--top_k', default=5000, type=int, help='top_k')\nparser.add_argument('--nms_threshold', default=0.4, type=float, help='nms_threshold')\nparser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')\nparser.add_argument('--vis_thres', default=0.6, type=float, help='visualization_threshold')\nparser.add_argument('--video_path', default='', type=str, help='video path')\nargs = parser.parse_args()\n\n\ndef check_keys(model, pretrained_state_dict):\n ckpt_keys = set(pretrained_state_dict.keys())\n model_keys = set(model.state_dict().keys())\n used_pretrained_keys = model_keys & ckpt_keys\n unused_pretrained_keys = ckpt_keys - model_keys\n missing_keys = model_keys - ckpt_keys\n print('Missing keys:{}'.format(len(missing_keys)))\n print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))\n print('Used keys:{}'.format(len(used_pretrained_keys)))\n assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'\n return True\n\n\ndef remove_prefix(state_dict, prefix):\n ''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''\n print('remove prefix \\'{}\\''.format(prefix))\n def f(x): return x.split(prefix, 1)[-1] if x.startswith(prefix) else x\n return {f(key): value for key, value in state_dict.items()}\n\n\ndef load_model(model, pretrained_path, load_to_cpu):\n print('Loading pretrained model from {}'.format(pretrained_path))\n if load_to_cpu:\n pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)\n else:\n device = torch.cuda.current_device()\n pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))\n if \"state_dict\" in pretrained_dict.keys():\n pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')\n else:\n pretrained_dict = remove_prefix(pretrained_dict, 'module.')\n check_keys(model, pretrained_dict)\n model.load_state_dict(pretrained_dict, strict=False)\n return model\n\n\nif __name__ == \"__main__\":\n torch.set_grad_enabled(False)\n\n cfg = None\n if args.network == \"mobilenet0.25\":\n cfg = cfg_mnet\n net = RetinaFace(cfg=cfg, phase='test')\n net = load_model(net, args.trained_model, args.cpu)\n net.eval()\n print('Finished loading model!')\n device = torch.device(\"cpu\" if args.cpu else \"cuda\")\n net = net.to(device)\n \n im_proc_w, im_proc_h = 640, 360\n priorbox = PriorBox(cfg, image_size=(im_proc_h, im_proc_w))\n priors = priorbox.forward()\n priors = priors.to(device)\n prior_data = priors.data\n\n video_path = 0 if args.video_path == '' else args.video_path\n vid = cv2.VideoCapture(video_path)\n while True:\n _, img_raw = vid.read()\n img_raw = cv2.resize(img_raw, (im_proc_w, im_proc_h))\n img = np.float32(img_raw)\n im_height, im_width, _ = img.shape\n scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])\n img -= (104, 117, 123)\n img = img.transpose(2, 0, 1)\n img = torch.from_numpy(img).unsqueeze(0)\n img = img.to(device)\n scale = scale.to(device)\n\n tic = time.time()\n loc, conf, landms = net(img) # forward pass\n print('net forward time: {:.4f}'.format(time.time() - tic))\n\n boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])\n boxes = boxes * scale\n boxes = boxes.cpu().numpy()\n scores = conf.squeeze(0).data.cpu().numpy()[:, 1]\n landms = decode_landm(landms.data.squeeze(0), prior_data, cfg['variance'])\n scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],\n img.shape[3], img.shape[2], img.shape[3], img.shape[2],\n img.shape[3], img.shape[2]])\n scale1 = scale1.to(device)\n landms = landms * scale1\n landms = landms.cpu().numpy()\n\n # ignore low scores\n inds = np.where(scores > args.confidence_threshold)[0]\n boxes = boxes[inds]\n landms = landms[inds]\n scores = scores[inds]\n\n # keep top-K before NMS\n order = scores.argsort()[::-1][:args.top_k]\n boxes = boxes[order]\n landms = landms[order]\n scores = scores[order]\n\n # do NMS\n dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)\n keep = nms(dets, args.nms_threshold)\n dets = dets[keep, :]\n landms = landms[keep]\n\n # keep top-K faster NMS\n dets = dets[:args.keep_top_k, :]\n landms = landms[:args.keep_top_k, :]\n\n dets = np.concatenate((dets, landms), axis=1)\n\n for b in dets:\n if b[4] < args.vis_thres:\n continue\n text = \"{:.4f}\".format(b[4])\n b = list(map(int, b))\n cv2.rectangle(img_raw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)\n cx = b[0]\n cy = b[1] + 12\n cv2.putText(img_raw, text, (cx, cy), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))\n\n # landms\n cv2.circle(img_raw, (b[5], b[6]), 1, (0, 0, 255), 4)\n cv2.circle(img_raw, (b[7], b[8]), 1, (0, 255, 255), 4)\n cv2.circle(img_raw, (b[9], b[10]), 1, (255, 0, 255), 4)\n cv2.circle(img_raw, (b[11], b[12]), 1, (0, 255, 0), 4)\n cv2.circle(img_raw, (b[13], b[14]), 1, (255, 0, 0), 4)\n \n cv2.imshow('output', img_raw)\n if cv2.waitKey(1) == ord('q'):\n break\n \n cv2.destroyAllWindows()\n\n"
] |
[
[
"numpy.hstack",
"torch.Tensor",
"torch.cuda.current_device",
"torch.load",
"torch.from_numpy",
"numpy.concatenate",
"torch.set_grad_enabled",
"numpy.float32",
"torch.device",
"numpy.where"
]
] |
JLLeitschuh/TIPL
|
[
"89c5d82932f89a2b4064d5d86ac83045ce9bc7d5"
] |
[
"snippets/Python/edgegrowing.py"
] |
[
"\"\"\" A script which implements the : try to segment the pores using a region growing \n/ k-means clustering - like algorithm where neighboring pores which have similar \nenough (a threshold value) orientations are grouped together. \nThat might make the data easier to visualize and ideally the layers we sometimes see will become even clearer\"\"\"\nimport tracktools as tt\nimport os, sys\nimport numpy as np\n\n# class\nkeys2dict = lambda keys, defaultVal=[]: dict(map(lambda x: (x, defaultVal), keys))\nfullset = lambda inlist: np.unique(\n map(lambda x: x[0], inlist) + map(lambda x: x[1], inlist)\n) # all elements in a dictionary / list\n\n\ndef run(\n objFile,\n edgeFile,\n objVars=[\"PCA3_X\", \"PCA3_Y\", \"PCA3_Z\"],\n distEq=lambda x, y: abs(x[0] * y[0] + x[1] * y[1] + x[2] * y[2]),\n distThresh=5e-2,\n):\n \"\"\"The main function in the script, lacFile and edgeFile are the input data files for the lacuna and the edge list respectively\n\tlacVars is the list of variables used for the distance metric that are to be saved in that order with the data\n\tdistEq is the equation based on lacVars for computing the distance from lacuna x to lacuna y\"\"\"\n objData = tt.lacunData(objFile, rotMat=-1)\n edgeData = tt.edgeData(edgeFile)\n\n cxnDict = cxnListToDict(edgeData.lkeys)\n # grpLabels is a dictionary for objects where each object has a label and a score\n # the score is not used now but could be used to allow for collapsing groups into the simplist catagories\n keepVars = map(lambda x: objData.lheader[x], objVars) # translate names to indices\n extVars = lambda inRow: map(lambda y: float(inRow[y]), keepVars)\n grpLabels = dict(\n map(lambda x: (x, [x, extVars(objData.ldata[x]), -1]), objData.lkeys)\n )\n joinList = [1]\n while len(joinList) > 0:\n joinList = connectGroups(grpLabels, cxnDict, distEq, distThresh)\n print(\"Connecting\", len(grpLabels), \"with \", len(joinList), \" joins\")\n grpLabels = processJoin(grpLabels, joinList)\n uniqueGrps = np.unique(map(lambda x: x[0], grpLabels.values()))\n print(len(grpLabels), \" in \", len(uniqueGrps), \" unique groups\")\n return (objData, grpLabels)\n\n\ndef runEdges(\n objFile,\n edgeFile,\n objVars=[\"PCA3_X\", \"PCA3_Y\", \"PCA3_Z\"],\n distEq=lambda x, y: abs(x[0] * y[0] + x[1] * y[1] + x[2] * y[2]),\n distThresh=5e-2,\n):\n \"\"\"The main function in the script, lacFile and edgeFile are the input data files for the lacuna and the edge list respectively\n\tlacVars is the list of variables used for the distance metric that are to be saved in that order with the data\n\tdistEq is the equation based on lacVars for computing the distance from lacuna x to lacuna y\"\"\"\n objData = tt.lacunData(objFile, rotMat=-1)\n edgeList = tt.edgeData(edgeFile).lkeys # never load the whole data\n\n # grpLabels is a dictionary for objects where each object has a label and a score\n # the score is not used now but could be used to allow for collapsing groups into the simplist catagories\n keepVars = map(lambda x: objData.lheader[x], objVars) # translate names to indices\n extVars = lambda inRow: map(lambda y: float(inRow[y]), keepVars)\n grpLabels = dict(\n map(lambda x: (x, [x, extVars(objData.ldata[x]), -1]), objData.lkeys)\n )\n joinList = [1]\n while len(joinList) > 0:\n joinList = connectGroupsEdges(grpLabels, edgeList, distEq, distThresh)\n print(\"Connecting\", len(grpLabels), \"with \", len(joinList), \" joins\")\n grpLabels = processJoin(grpLabels, joinList)\n uniqueGrps = np.unique(map(lambda x: x[0], grpLabels.values()))\n print(len(grpLabels), \" in \", len(uniqueGrps), \" unique groups\")\n grpInfo = groupInfo(grpList, objData)\n nGrpLabels = swapGroups(grpLabels, edgeList, grpInfo, distEq, distThresh)\n return (objData, jGrpLabels)\n\n\ndef connectImproveGroups(grpLabels, edgeList, grpMetric, distFunc, distThresh):\n \"\"\" iterate over the edges instead of the objects \"\"\"\n\n joinList = {}\n minmax = lambda a, b: (min(a, b), max(a, b))\n for (i, j) in edgeList:\n (iGrp, iGrpVec, iScore) = grpLabels[i]\n (jGrp, jGrpVec, jScore) = grpLabels[j]\n if iGrp != jGrp: # not already in the same group\n gKey = minmax(iGrp, jGrp) # order correctly\n if not (joinList.has_key(gKey)): # not already a pending join\n cDist = distFunc(iGrpVec, jGrpVec)\n if cDist < distThresh:\n joinList[gKey] = 1\n\n return joinList.keys()\n\n\ndef connectGroupsEdges(grpLabels, edgeList, distFunc, distThresh):\n \"\"\" iterate over the edges instead of the objects \"\"\"\n joinList = {}\n minmax = lambda a, b: (min(a, b), max(a, b))\n for (i, j) in edgeList:\n (iGrp, iGrpVec, iScore) = grpLabels[i]\n (jGrp, jGrpVec, jScore) = grpLabels[j]\n if iGrp != jGrp: # not already in the same group\n gKey = minmax(iGrp, jGrp) # order correctly\n if not (joinList.has_key(gKey)): # not already a pending join\n cDist = distFunc(iGrpVec, jGrpVec)\n if cDist < distThresh:\n joinList[gKey] = 1\n\n return joinList.keys()\n\n\ndef swapGroups(grpLabels, edgeList, grpInfo, distFunc, distThresh):\n \"\"\" iterate over the edges instead of the objects \"\"\"\n newGrpLabels = {}\n for (i, j) in edgeList:\n (iGrp, iGrpVec, iScore) = grpLabels[i]\n (jGrp, jGrpVec, jScore) = grpLabels[j]\n if (iGrp != jGrp) & (iGrp > 0) & (jGrp > 0): # not already in the same group\n newIgrp = iGrp\n newJgrp = jGrp\n iGrpMeanVec = grpInfo[iGrp][2].v0()\n jGrpMeanVec = grpInfo[jGrp][2].v0()\n distItoImean = distFunc(iGrpVec, iGrpMeanVec)\n distItoJmean = distFunc(iGrpVec, jGrpMeanVec)\n distJtoImean = distFunc(jGrpVec, iGrpMeanVec)\n distJtoJmean = distFunc(jGrpVec, jGrpMeanVec)\n if distItoImean > distThresh:\n distItoImean = distThresh\n newIgrp = 0\n if distJtoJmean > distThresh:\n newJgrp = 0\n distJtoJmean = distThresh\n if distItoJmean < distItoImean: # swap i\n newIgrp = jGrp\n distItoImean = distItoJmean\n if distJtoImean < distJtoJmean: # swap j\n newJgrp = iGrp\n distJtoJmean = distJtoImean\n iVals = newGrpLabels.get(i, (iGrp, iGrpVec, distThresh * 20))\n if distItoImean < iVals[2]:\n newGrpLabels[i] = (newIgrp, iGrpVec, distItoImean)\n jVals = newGrpLabels.get(j, (jGrp, jGrpVec, distThresh * 20))\n if distJtoJmean < jVals[2]:\n newGrpLabels[j] = (newJgrp, jGrpVec, distJtoJmean)\n\n return newGrpLabels\n\n\ndef connectGroups(grpLabels, cxnDict, distFunc, distThresh):\n joinList = []\n for (i, cVal) in grpLabels.items():\n (grpId, objVec, score) = cVal\n for cCxn in cxnDict[i]: # all neighbors\n (otGrpId, otObjVec, otScore) = grpLabels[cCxn]\n if otGrpId != grpId:\n if distFunc(objVec, otObjVec) < distThresh:\n joinList += [(grpId, otGrpId)]\n\n return joinList\n\n\ndef dictlistContains(dictDict, cKey):\n for (iKey, iList) in dictDict.items():\n if cKey in iList:\n return iKey\n return None\n\n\ndef joinToMap(joinList):\n \"\"\" converts a list of join operations into a mapping from the original values to the new values \"\"\"\n newLabels = {}\n for (x, y) in joinList:\n xdd = dictlistContains(newLabels, x)\n ydd = dictlistContains(newLabels, y)\n if (xdd is None) & (ydd is None):\n newLabels[min(x, y)] = [x, y]\n elif xdd is not None:\n newLabels[xdd] += [y]\n elif ydd is not None:\n newLabels[ydd] += [x]\n selfMap = {}\n for (jnk, cVals) in newLabels.items():\n cMin = min(cVals)\n for cVal in cVals:\n selfMap[cVal] = cMin\n return selfMap\n\n\ndef processJoin(grpLabels, joinList):\n selfMap = joinToMap(joinList)\n print(\"Map Down\", len(selfMap.keys()), \" to \", len(np.unique(selfMap.values())))\n # Applys the mapping to the group if it's not there identify map\n applyMap = lambda x: (x[0], [selfMap.get(x[1][0], x[1][0])] + x[1][1:])\n return dict(map(applyMap, grpLabels.items()))\n\n\ndef cxnListToDict(cxnlist):\n keys = fullset(cxnlist)\n sDict = keys2dict(keys)\n for (id1, id2) in cxnlist:\n sDict[id1] += [id2]\n sDict[id2] += [id1]\n return sDict\n\n\nclass AlignTensor:\n \"\"\" Include the logic for reading in the alignment tensor so it can be easily swapped out for other things\n\t>>> tempTensor=AlignTensor(lacData=None)\n\t>>> for i in range(100): tempTensor.addpt([0,0,(-1)**i])\n\t>>> int(tempTensor.align()*100)\n\t100\n\t>>> tempTensor=AlignTensor(lacData=None)\n\t>>> for i in range(100): tempTensor.addpt([0,sqrt(2)/2*(-1)**i,sqrt(2)/2*(-1)**i])\n\t>>> for i in range(100): tempTensor.addpt([sqrt(2)/2*(-1)**i,0,sqrt(2)/2*(-1)**(i+1)])\n\t>>> int(tempTensor.align()*100)\n\t100\n\t>>> tempTensor=AlignTensor(lacData=None)\n\t>>> for i in range(100): tempTensor.addpt([0,sqrt(2)/2*(-1)**i,sqrt(2)/2*(-1)**i])\n\t>>> for i in range(100): tempTensor.addpt([sqrt(2)/2*(-1)**i,0,sqrt(2)/2*(-1)**(i+1)])\n\t>>> for i in range(100): tempTensor.addpt([0,(-1)**i,0])\n\t>>> print int(tempTensor.align()*100)\n\t>>> int(tempTensor.align()*100)\n\t92\n\t\"\"\"\n\n import numpy as np\n\n def __init__(self, lacData, startList=None, objVars=[\"PCA3_X\", \"PCA3_Y\", \"PCA3_Z\"]):\n self.vars = objVars\n if lacData is not None:\n self.objCols = map(lambda x: lacData.lheader[x], objVars)\n if startList is None:\n self.list = []\n else:\n self.list = startList\n self._cov = None\n\n def addpt(self, pt):\n self.list += [pt]\n return self\n\n def add(self, lacData, row):\n self.addpt(map(lambda cCol: float(lacData.ldata[row][cCol]), self.objCols))\n return self\n\n def cov(self):\n if self._cov is None:\n tu = np.matrix(self.list)\n self._cov = (np.dot(tu.T, tu.conj()) * 1.0 / tu.shape[0]).squeeze()\n (self.evals, self.evecs) = np.linalg.eigh(self._cov)\n return self._cov\n\n def v0(self):\n self.cov()\n mDex = list(self.evals).index(max(self.evals))\n return self.evecs[mDex]\n\n def align(self):\n self.cov()\n return (max(self.evals) - min(self.evals)) / max(self.evals)\n\n\ndef groupInfo(grpList, lacData, objVars=[\"PCA3_X\", \"PCA3_Y\", \"PCA3_Z\"]):\n grpInfo = {}\n addObjects = lambda x, a: (x[0] + a[0], x[1] + a[1], x[2].add(lacData, a[2]))\n volCol = lacData.lheader[\"VOLUME\"]\n objCols = map(lambda x: lacData.lheader[x], objVars)\n for (i, grpVals) in grpList.items():\n gV = grpVals[0]\n colMapper = lambda cCol: float(lacData.ldata[i][cCol])\n grpInfo[gV] = addObjects(\n grpInfo.get(gV, (0, 0, AlignTensor(lacData, objVars=objVars))),\n (1, colMapper(volCol), i),\n ) #\n\n return grpInfo\n\n\ndef writeOutput(outFile, lacData, grpList, colName=\"Group\"):\n fOut = open(outFile, \"w\")\n fOut.write(lacData.preamble + \"\\n\")\n ordCols = map(\n lambda y: dict(map(lambda x: (x[1], x[0]), lacData.lheader.items()))[y],\n sorted(lacData.lheader.values()),\n )\n grpInfo = groupInfo(grpList, lacData)\n fOut.write(\",\".join(ordCols + [colName, colName + \"_CNT\", colName + \"_VOL\"]) + \"\\n\")\n for (i, grpVals) in grpList.items():\n fOut.write(\n \",\".join(\n lacData.ldata[i]\n + [\n str(grpVals[0]),\n str(grpInfo[grpVals[0]][0]),\n str(grpInfo[grpVals[0]][1]),\n ]\n )\n + \"\\n\"\n )\n fOut.close()\n\n\nif __name__ == \"__main__\":\n lacFile = sys.argv[1]\n edgeFile = sys.argv[2]\n outFile = lacFile + \"_rg.csv\"\n (a, b) = runEdges(lacFile, edgeFile, distThresh=0.1)\n writeOutput(outFile, a, b)\n"
] |
[
[
"numpy.matrix",
"numpy.linalg.eigh"
]
] |
jchen42703/understanding-clouds-kaggle
|
[
"6972deb25cdf363ae0d9a9ad26d538280613fc94"
] |
[
"clouds/models/clf_models.py"
] |
[
"import torch.nn as nn\nimport torch\n\nimport pretrainedmodels\n\nclass Pretrained(nn.Module):\n \"\"\"\n A generalized class for fetching a pretrained model from Cadene/pretrainedmodels\n From: https://github.com/catalyst-team/mlcomp/blob/master/mlcomp/contrib/model/pretrained.py\n \"\"\"\n def __init__(self, variant, num_classes, pretrained=True, activation=None):\n super().__init__()\n params = {'num_classes': 1000}\n if not pretrained:\n params['pretrained'] = None\n model = pretrainedmodels.__dict__[variant](**params)\n\n if \"se_res\" in variant:\n model.avg_pool = nn.AdaptiveAvgPool2d((1, 1))\n\n if variant == \"inceptionresnetv2\":\n model.avgpool_1a = nn.AdaptiveAvgPool2d((1, 1))\n\n self.need_refactor = False\n if 'resnet' in variant:\n self.need_refactor = True\n\n if self.need_refactor:\n self.l1 = nn.Sequential(*list(model.children())[:-1])\n if torch.cuda.is_available():\n self.l1 = self.l1.to('cuda:0')\n self.last = nn.Linear(model.last_linear.in_features, num_classes)\n else:\n self.model = model\n linear = self.model.last_linear\n if isinstance(linear, nn.Linear):\n self.model.last_linear = nn.Linear(\n model.last_linear.in_features,\n num_classes\n )\n elif isinstance(linear, nn.Conv2d):\n self.model.last_linear = nn.Conv2d(\n linear.in_channels,\n num_classes,\n kernel_size=linear.kernel_size,\n bias=True\n )\n\n if callable(activation) or activation is None:\n self.activation = activation\n elif activation == 'softmax':\n self.activation = nn.Softmax(dim=1)\n elif activation == 'sigmoid':\n self.activation = nn.Sigmoid()\n else:\n raise ValueError(\n 'Activation should be \"sigmoid\"/\"softmax\"/callable/None')\n\n def forward(self, x):\n if not self.need_refactor:\n res = self.model(x)\n if isinstance(res, tuple):\n return res[0]\n return res\n x = self.l1(x)\n x = x.view(x.size()[0], -1)\n x = self.last(x)\n if self.activation:\n x = self.activation(x)\n return x\n\n__all__ = [\"Pretrained\"]\n"
] |
[
[
"torch.nn.Softmax",
"torch.nn.Conv2d",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.cuda.is_available"
]
] |
mzj14/mesh
|
[
"bf04d24e7a9c54733dea014b82e5985a039da67c"
] |
[
"examples/rnn-bench.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Mesh TensorFlow Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"MNIST using Mesh TensorFlow and TF Estimator.\n\nThis is an illustration, not a good model.\n\n# python rnn-bench.py --hidden_size=200 --train_epochs=1 --epochs_between_evals=1 --log_steps=1 --eval_steps=2 --mesh_shape=\"b1:2;b2:2\" --layout=\"hidden_1:b1;hidden_2:b2\"\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport mesh_tensorflow as mtf\nimport mnist_dataset as dataset # local file import\nimport tensorflow as tf\nimport time\n\ntf.flags.DEFINE_string(\"data_dir\", \"data-source\",\n \"Path to directory containing the MNIST dataset\")\ntf.flags.DEFINE_string(\"model_dir\", \"model\", \"Estimator model_dir\")\ntf.flags.DEFINE_integer(\"batch_size\", 200,\n \"Mini-batch size for the training. Note that this \"\n \"is the global batch size and not the per-shard batch.\")\ntf.flags.DEFINE_integer(\"hidden_size\", 128, \"Size of each hidden layer.\")\ntf.flags.DEFINE_integer(\"train_epochs\", 1, \"Total number of training epochs.\")\ntf.flags.DEFINE_integer(\"epochs_between_evals\", 1,\n \"# of epochs between evaluations.\")\ntf.flags.DEFINE_integer(\"log_steps\", 10, \"Number of log steps as a logging unit\")\ntf.flags.DEFINE_integer(\"eval_steps\", 10000,\n \"Total number of evaluation steps. If `0`, evaluation \"\n \"after training is skipped.\")\ntf.flags.DEFINE_string(\"mesh_shape\", \"b1:2;b2:2\", \"mesh shape\")\ntf.flags.DEFINE_string(\"layout\", \"hidden_1:b1;classes:b2\",\n \"layout rules\")\n\nFLAGS = tf.flags.FLAGS\n\n\ndef mnist_model(image, labels, mesh, hs_t):\n \"\"\"The model.\n\n Args:\n image: tf.Tensor with shape [batch, 28*28]\n labels: a tf.Tensor with shape [batch] and dtype tf.int32\n mesh: a mtf.Mesh\n hs_t: a mtf.Tensor with shape [batch, hidden_1]\n Returns:\n logits: a mtf.Tensor with shape [batch, 10]\n loss: a mtf.Tensor with shape []\n hs_t: an updated mtf.Tensor\n \"\"\"\n input_num = 28\n timesteps_num = 28\n classes_num = 10\n\n batch_dim = mtf.Dimension(\"batch\", FLAGS.batch_size)\n input_dim = mtf.Dimension(\"input\", input_num)\n timesteps_dim = mtf.Dimension(\"timesteps\", timesteps_num)\n classes_dim = mtf.Dimension(\"classes\", classes_num)\n hidden_dim_1 = mtf.Dimension(\"hidden_1\", FLAGS.hidden_size)\n hidden_dim_2 = mtf.Dimension(\"hidden_2\", FLAGS.hidden_size)\n\n x = mtf.import_tf_tensor(mesh, tf.reshape(image, [FLAGS.batch_size, 28, 28]), [batch_dim, timesteps_dim, input_dim])\n y = mtf.import_tf_tensor(mesh, tf.reshape(labels, [FLAGS.batch_size]), [batch_dim])\n hs_t = mtf.import_tf_tensor(mesh, hs_t, [batch_dim, hidden_dim_1])\n\n Wxh = mtf.get_variable(mesh, \"Wxh\", [input_dim, hidden_dim_2])\n Whh = mtf.get_variable(mesh, \"Whh\", [hidden_dim_1, hidden_dim_2])\n Why = mtf.get_variable(mesh, \"Why\", [hidden_dim_2, classes_dim])\n bh = mtf.get_variable(mesh, \"bh\", [hidden_dim_2])\n by = mtf.get_variable(mesh, \"by\", [classes_dim])\n\n x_list = mtf.unstack(x, timesteps_dim)\n\n for xs_t in x_list:\n hs_t = mtf.tanh(mtf.einsum([xs_t, Wxh], [batch_dim, hidden_dim_2]) + mtf.einsum([hs_t, Whh], [batch_dim, hidden_dim_2]) + bh)\n logits = mtf.einsum([hs_t, Why], [batch_dim, classes_dim]) + by\n\n if labels is None:\n loss = None\n else:\n loss = mtf.layers.softmax_cross_entropy_with_logits(\n logits, mtf.one_hot(y, classes_dim), classes_dim)\n loss = mtf.reduce_mean(loss)\n return logits, loss, hs_t\n\n\ndef model_fn(features, labels, mode, params):\n \"\"\"The model_fn argument for creating an Estimator.\"\"\"\n tf.logging.info(\"features = %s labels = %s mode = %s params=%s\" %\n (features, labels, mode, params))\n global_step = tf.train.get_global_step()\n graph = mtf.Graph()\n # wrapped graph named \"my_mesh\"\n mesh = mtf.Mesh(graph, \"my_mesh\")\n hs_t = tf.constant(0, dtype=tf.float32, shape=[FLAGS.batch_size, FLAGS.hidden_size])\n logits, loss, hs_t = mnist_model(features, labels, mesh, hs_t)\n # dimension \"b1\" is 2; dimension \"b2\" is 2;\n mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)\n # 1st dimension of tensor is split by \"b1\"; 2nd by \"b2\"\n layout_rules = mtf.convert_to_layout_rules(FLAGS.layout)\n mesh_size = mesh_shape.size\n print(\"mesh_shape.size = \", mesh_shape.size)\n mesh_devices = [\"\"] * mesh_size\n mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(\n mesh_shape, layout_rules, mesh_devices)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n var_grads = mtf.gradients(\n [loss], [v.outputs[0] for v in graph.trainable_variables])\n optimizer = mtf.optimize.AdafactorOptimizer()\n update_ops = optimizer.apply_grads(var_grads, graph.trainable_variables)\n\n lowering = mtf.Lowering(graph, {mesh: mesh_impl})\n restore_hook = mtf.MtfRestoreHook(lowering)\n\n tf_logits = lowering.export_to_tf_tensor(logits)\n if mode != tf.estimator.ModeKeys.PREDICT:\n tf_loss = lowering.export_to_tf_tensor(loss)\n tf.summary.scalar(\"loss\", tf_loss)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n tf_update_ops = [lowering.lowered_operation(op) for op in update_ops]\n tf_update_ops.append(tf.assign_add(global_step, 1))\n train_op = tf.group(tf_update_ops)\n saver = tf.train.Saver(\n tf.global_variables(),\n sharded=True,\n max_to_keep=10,\n keep_checkpoint_every_n_hours=2,\n defer_build=False, save_relative_paths=True)\n tf.add_to_collection(tf.GraphKeys.SAVERS, saver)\n saver_listener = mtf.MtfCheckpointSaverListener(lowering)\n saver_hook = tf.train.CheckpointSaverHook(\n FLAGS.model_dir,\n save_steps=1000,\n saver=saver,\n listeners=[saver_listener])\n\n accuracy = tf.metrics.accuracy(\n labels=labels, predictions=tf.argmax(tf_logits, axis=1))\n\n # Name tensors to be logged with LoggingTensorHook.\n tf.identity(tf_loss, \"cross_entropy\")\n tf.identity(accuracy[1], name=\"train_accuracy\")\n\n # Save accuracy scalar to Tensorboard output.\n tf.summary.scalar(\"train_accuracy\", accuracy[1])\n\n # restore_hook must come before saver_hook\n return tf.estimator.EstimatorSpec(\n tf.estimator.ModeKeys.TRAIN, loss=tf_loss, train_op=train_op,\n training_chief_hooks=[restore_hook, saver_hook])\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"classes\": tf.argmax(tf_logits, axis=1),\n \"probabilities\": tf.nn.softmax(tf_logits),\n }\n return tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.PREDICT,\n predictions=predictions,\n prediction_hooks=[restore_hook],\n export_outputs={\n \"classify\": tf.estimator.export.PredictOutput(predictions)\n })\n if mode == tf.estimator.ModeKeys.EVAL:\n return tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.EVAL,\n loss=tf_loss,\n evaluation_hooks=[restore_hook],\n eval_metric_ops={\n \"accuracy\":\n tf.metrics.accuracy(\n labels=labels, predictions=tf.argmax(tf_logits, axis=1)),\n })\n\n\ndef run_mnist():\n \"\"\"Run MNIST training and eval loop.\"\"\"\n mnist_classifier = tf.estimator.Estimator(\n model_fn=model_fn,\n model_dir=FLAGS.model_dir,\n config=tf.estimator.RunConfig(log_step_count_steps=FLAGS.log_steps))\n\n # Set up training and evaluation input functions.\n def train_input_fn():\n \"\"\"Prepare data for training.\"\"\"\n\n # When choosing shuffle buffer sizes, larger sizes result in better\n # randomness, while smaller sizes use less memory. MNIST is a small\n # enough dataset that we can easily shuffle the full epoch.\n ds = dataset.train(FLAGS.data_dir)\n # ds_batched = ds.cache().shuffle(buffer_size=50000).batch(FLAGS.batch_size)\n ds_batched = ds.cache().batch(FLAGS.batch_size)\n # Iterate through the dataset a set number (`epochs_between_evals`) of times\n # during each training session.\n ds = ds_batched.repeat(FLAGS.epochs_between_evals)\n return ds\n\n def eval_input_fn():\n return dataset.test(FLAGS.data_dir).batch(FLAGS.batch_size).repeat()\n\n # Train and evaluate model.\n for _ in range(FLAGS.train_epochs // FLAGS.epochs_between_evals):\n mnist_classifier.train(input_fn=train_input_fn, hooks=None)\n eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn, steps=FLAGS.eval_steps)\n print(\"\\nEvaluation results:\\n\\t%s\\n\" % eval_results)\n\n\ndef main(_):\n run_mnist()\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n"
] |
[
[
"tensorflow.global_variables",
"tensorflow.estimator.RunConfig",
"tensorflow.group",
"tensorflow.summary.scalar",
"tensorflow.assign_add",
"tensorflow.estimator.export.PredictOutput",
"tensorflow.train.get_global_step",
"tensorflow.logging.set_verbosity",
"tensorflow.argmax",
"tensorflow.app.run",
"tensorflow.identity",
"tensorflow.logging.info",
"tensorflow.add_to_collection",
"tensorflow.flags.DEFINE_integer",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.flags.DEFINE_string",
"tensorflow.reshape",
"tensorflow.train.CheckpointSaverHook",
"tensorflow.estimator.EstimatorSpec"
]
] |
scikit-learn-contrib/vectorizers
|
[
"45751ca46f8e7b4e042ed3edd4917e38818da28a"
] |
[
"vectorizers/timed_token_cooccurrence_vectorizer.py"
] |
[
"from .preprocessing import (\n preprocess_timed_token_sequences,\n)\nfrom collections.abc import Iterable\nfrom .base_cooccurrence_vectorizer import BaseCooccurrenceVectorizer\nfrom .preprocessing import preprocess_timed_token_sequences\nfrom .coo_utils import (\n coo_append,\n coo_sum_duplicates,\n CooArray,\n merge_all_sum_duplicates,\n em_update_matrix,\n)\nimport numpy as np\nimport numba\nfrom ._window_kernels import (\n _TIMED_KERNEL_FUNCTIONS,\n window_at_index,\n)\n\n\[email protected](nogil=True)\ndef numba_build_skip_grams(\n token_sequences,\n window_size_array,\n window_reversals,\n kernel_functions,\n kernel_args,\n mix_weights,\n normalize_windows,\n n_unique_tokens,\n array_lengths,\n):\n \"\"\"Generate a matrix of (weighted) counts of co-occurrences of tokens within\n windows in a set of sequences of tokens. Each sequence in the collection of\n sequences provides an effective boundary over which skip-grams may not pass\n (such as sentence boundaries in an NLP context). This is done for a collection\n of different window and kernel types simultaneously.\n\n Parameters\n ----------\n token_sequences: Iterable of Iterables\n The collection of (token, time_stamp) sequences to generate skip-gram data for.\n\n n_unique_tokens: int\n The number of unique tokens in the token_dictionary.\n\n window_size_array: numpy.ndarray(float, size = (n_windows, n_unique_tokens))\n A collection of window sizes per vocabulary index per window function\n\n window_reversals: numpy.array(bool, size = (n_windows,))\n Array indicating whether the window is after or not.\n\n kernel_functions: kernel_functions: tuple\n The n-tuple of kernel functions\n\n kernel_args: tuple of tuples\n Arguments to pass through to the kernel functions per function\n\n mix_weights: numpy.array(bool, size = (n_windows,))\n The scalars values used to combine the values of the kernel functions\n\n normalize_windows: bool\n Indicates whether or nor to L_1 normalize the kernel values per window occurrence\n\n array_lengths: numpy.array(int, size = (n_windows,))\n The lengths of the arrays per window used to the store the coo matrix triples.\n\n Returns\n -------\n cooccurrence_matrix: CooArray\n Weight counts of values (kernel weighted counts) that token_head[i] cooccurred with token_tail[i]\n \"\"\"\n\n n_windows = window_size_array.shape[0]\n array_mul = n_windows * n_unique_tokens + 1\n\n coo_data = [\n CooArray(\n np.zeros(array_lengths[i], dtype=np.int32),\n np.zeros(array_lengths[i], dtype=np.int32),\n np.zeros(array_lengths[i], dtype=np.float32),\n np.zeros(array_lengths[i], dtype=np.int64),\n np.zeros(1, dtype=np.int64),\n np.zeros(2 * np.int64(np.ceil(np.log2(array_lengths[i]))), dtype=np.int64),\n np.zeros(1, dtype=np.int64),\n )\n for i in range(n_windows)\n ]\n\n for d_i, seq in enumerate(token_sequences):\n for w_i, target_pair in enumerate(seq):\n windows = []\n kernels = []\n target_word = np.int32(target_pair[0])\n target_time = target_pair[1]\n for i in range(n_windows):\n win = window_at_index(\n seq,\n window_size_array[i, target_word],\n w_i,\n reverse=window_reversals[i],\n )\n\n this_window = np.array([w[0] for w in win], dtype=np.int32)\n time_deltas = np.array(\n [np.abs(w[1] - target_time) for w in win], dtype=np.float32\n )\n this_kernel = mix_weights[i] * kernel_functions[i](\n this_window, time_deltas, *kernel_args[i]\n )\n windows.append(this_window)\n kernels.append(this_kernel)\n\n total = 0\n if normalize_windows:\n sums = np.array([np.sum(ker) for ker in kernels])\n total = np.sum(sums)\n if total <= 0:\n total = 1\n\n for i, window in enumerate(windows):\n this_ker = kernels[i]\n for j, context in enumerate(window):\n val = np.float32(this_ker[j] / total)\n if val > 0:\n row = target_word\n col = context + i * n_unique_tokens\n key = col + array_mul * row\n coo_data[i] = coo_append(coo_data[i], (row, col, val, key))\n\n for coo in coo_data:\n coo_sum_duplicates(coo)\n merge_all_sum_duplicates(coo)\n\n return coo_data\n\n\[email protected](nogil=True)\ndef numba_em_cooccurrence_iteration(\n token_sequences,\n window_size_array,\n window_reversals,\n kernel_functions,\n kernel_args,\n mix_weights,\n n_unique_tokens,\n prior_indices,\n prior_indptr,\n prior_data,\n):\n \"\"\"\n Performs one round of EM on the given (hstack of) n cooccurrence matrices provided in csr format.\n\n Note: The algorithm assumes the matrix is an hstack of cooccurrence matrices with the same vocabulary,\n with kernel and window parameters given in the same order.\n\n Parameters\n ----------\n\n token_sequences: Iterable of Iterables\n The collection of (token, time_stamp) sequences to generate skip-gram data for.\n\n window_size_array : numpy.ndarray of shape(n, n_vocab)\n The collection of window sizes per token per directed cooccurrence\n\n window_reversals: numpy.array(bool)\n The collection of indicators whether or not the window is after the target token.\n\n kernel_functions: tuple\n The n-tuple of kernel functions\n\n kernel_args: tuple(tuples)\n The n-tuple of update_kernel args per kernel function\n\n mix_weights: tuple\n The n-tuple of mix weights to apply to the kernel functions\n\n n_unique_tokens: int\n The number of unique tokens\n\n prior_indices: numpy.array\n The csr indices of the hstacked cooccurrence matrix\n\n prior_indptr: numpy.array\n The csr indptr of the hstacked cooccurrence matrix\n\n prior_data: numpy.array\n The csr data of the hstacked cooccurrence matrix\n\n Returns\n -------\n posterior_data: numpy.array\n The data of the updated csr matrix after one iteration of EM.\n\n \"\"\"\n\n posterior_data = np.zeros_like(prior_data)\n n_windows = window_size_array.shape[0]\n window_reversal_const = np.zeros(len(window_reversals)).astype(np.int32)\n window_reversal_const[window_reversals] = 1\n\n for d_i, seq in enumerate(token_sequences):\n for w_i, target_pair in enumerate(seq):\n windows = []\n kernels = []\n target_word = np.int32(target_pair[0])\n target_time = target_pair[1]\n for i in range(n_windows):\n win = window_at_index(\n seq,\n window_size_array[i, target_word],\n w_i,\n reverse=window_reversals[i],\n )\n\n this_window = np.array([w[0] for w in win], dtype=np.int32)\n time_deltas = np.array([np.abs(w[1] - target_time) for w in win])\n this_kernel = mix_weights[i] * kernel_functions[i](\n this_window, time_deltas, *kernel_args[i]\n )\n windows.append(this_window)\n kernels.append(this_kernel)\n\n posterior_data = em_update_matrix(\n posterior_data,\n prior_indices,\n prior_indptr,\n prior_data,\n n_unique_tokens,\n target_word,\n windows,\n kernels,\n )\n\n return posterior_data\n\n\nclass TimedTokenCooccurrenceVectorizer(BaseCooccurrenceVectorizer):\n \"\"\"Given a sequence, or list of sequences of (tokens, timestamp) pairs produce a collection of directed\n co-occurrence count matrix of tokens, where timestamps are an int or float type. If passed a single\n sequence of tokens it will use windows to determine co-occurrence. If passed a list of sequences of\n tokens it will use windows within each sequence in the list -- with windows not extending beyond the\n boundaries imposed by the individual sequences in the list.\n\n Upon the construction of the count matrices, it will hstack them together and run\n n_iter iterations of EM to update the counts.\n\n Parameters\n ----------\n token_dictionary: dictionary or None (optional, default=None)\n A fixed dictionary mapping tokens to indices, or None if the dictionary\n should be learned from the training data.\n\n max_unique_tokens: int or None (optional, default=None)\n The maximal number of elements contained in the vocabulary. If not None, this is\n will prune the vocabulary to the top 'max_vocabulary_size' most frequent remaining tokens\n after other possible preprocessing.\n\n min_occurrences: int or None (optional, default=None)\n The minimal number of occurrences of a token for it to be considered and\n counted. If None then there is no constraint, or the constraint is\n determined by min_frequency.\n\n max_occurrences int or None (optional, default=None)\n The maximal number of occurrences of a token for it to be considered and\n counted. If None then there is no constraint, or the constraint is\n determined by max_frequency.\n\n min_frequency: float or None (optional, default=None)\n The minimal frequency of occurrence of a token for it to be considered and\n counted. If None then there is no constraint, or the constraint is\n determined by min_occurrences.\n\n max_frequency: float or None (optional, default=None)\n The maximal frequency of occurrence of a token for it to be considered and\n counted. If None then there is no constraint, or the constraint is\n determined by max_occurrences.\n\n min_document_occurrences: int or None (optional, default=None)\n The minimal number of documents with an occurrences of a token for the token to be considered and\n counted. If None then there is no constraint, or the constraint is\n determined by min_document_frequency.\n\n max_document_occurrences int or None (optional, default=None)\n The maximal number of documents with an occurrences of a token for the token to be considered and\n counted. If None then there is no constraint, or the constraint is\n determined by max_document_frequency.\n\n min_document_frequency: float or None (optional, default=None)\n The minimal frequency of documents with an occurrences of a token for the token to be considered and\n counted. If None then there is no constraint, or the constraint is\n determined by min_document_occurrences.\n\n max_document_frequency: float or None (optional, default=None)\n The maximal frequency documents with an occurrences of a token for the token to be considered and\n counted. If None then there is no constraint, or the constraint is\n determined by max_document_occurrences.\n\n excluded_tokens: set or None (optional, default=None)\n A set of tokens that should be ignored entirely. If None then no tokens will\n be ignored in this fashion.\n\n excluded_token_regex: str or None (optional, default=None)\n The regular expression by which tokens are ignored if re.fullmatch returns True.\n\n window_functions: (Iterable of) numba.jitted callable or str (optional, default=['fixed'])\n Functions producing a sequence of window radii given a window_radius parameter and term frequencies.\n The string options are ['fixed', 'variable'] for using pre-defined functions.\n\n kernel_functions: (Iterable of) numba.jitted callable or str (optional, default=['flat'])\n Functions producing weights given a window of tokens and a window_radius.\n The string options are ['flat', 'harmonic', 'geometric'] for using pre-defined functions.\n\n window_radii: (Iterable of) int (optional, default=[5])\n Argument to pass through to the window function. Outside of boundary cases,\n this is the expected width of the (directed) windows produced by the window function.\n\n window_args: (Iterable of) dicts (optional, default = None)\n Optional arguments for the window functions\n\n kernel_args: (Iterable of) tuple of dicts (optional, default = None)\n Optional arguments for the kernel functions, including 'normalize' which L1 normalizes\n the kernel for each window.\n\n window_orientations: (Iterable of) strings (['before', 'after', 'directional'])\n The orientations of the cooccurrence windows. Whether to return all the tokens that\n occurred within a window before, after, or on either side separately.\n\n mix_weights: (Iterable of) tuple of float (optional, default = None)\n The mix weights to combine the values from the kernel function on each window.\n The default provides no additional rescaling (equivalent to a uniform mixture).\n\n normalize_windows: bool (optional, default = True)\n Perform L1 normalization on the combined mixture of kernel functions per window.\n\n n_threads: int (optional, default=1)\n When processing token sequences to build the matrix, break the list of sequences into\n n_threads equal sized chunks to process in parallel with Dask.\n\n validate_data: bool (optional, default=True)\n Check whether the data is valid (e.g. of homogeneous token type).\n\n mask_string: str (optional, default=None)\n Prunes the filtered tokens when None, otherwise replaces them with the\n provided mask_string.\n\n nullify_mask: bool (optional, default=False)\n Sets all cooccurrences with the mask_string equal to zero by skipping over them\n during processing.\n\n n_iter: int (optional, default = 0)\n Number of EM iterations to perform\n\n epsilon: float32 (optional default = 0)\n Sets values in the cooccurrence matrix (after l_1 normalizing the columns)\n less than epsilon to zero\n\n coo_initial_memory: str (optional, default = \"0.5 GiB\")\n This value, giving a memory size in k, M, G or T, describes how much memory\n to initialize for accumulating the (row, col, val) triples of larger data sets.\n Optimizations to use significantly less memory are made for data sets with small expected numbers of\n non zeros. More memory will be allocated during processing if need be.\n\n \"\"\"\n\n def __init__(\n self,\n token_dictionary=None,\n max_unique_tokens=None,\n min_occurrences=None,\n max_occurrences=None,\n min_frequency=None,\n max_frequency=None,\n min_document_occurrences=None,\n max_document_occurrences=None,\n min_document_frequency=None,\n max_document_frequency=None,\n excluded_tokens=None,\n excluded_token_regex=None,\n window_functions=\"fixed\",\n kernel_functions=\"flat\",\n window_args=None,\n kernel_args=None,\n window_radii=5,\n mix_weights=None,\n window_orientations=\"directional\",\n n_threads=1,\n validate_data=True,\n mask_string=None,\n nullify_mask=False,\n normalize_windows=True,\n n_iter=0,\n epsilon=0,\n coo_initial_memory=\"0.5 GiB\",\n ):\n super().__init__(\n token_dictionary=token_dictionary,\n max_unique_tokens=max_unique_tokens,\n min_occurrences=min_occurrences,\n max_occurrences=max_occurrences,\n min_frequency=min_frequency,\n max_frequency=max_frequency,\n min_document_occurrences=min_document_occurrences,\n max_document_occurrences=max_document_occurrences,\n min_document_frequency=min_document_frequency,\n max_document_frequency=max_document_frequency,\n excluded_tokens=excluded_tokens,\n excluded_token_regex=excluded_token_regex,\n window_functions=window_functions,\n kernel_functions=kernel_functions,\n window_args=window_args,\n kernel_args=kernel_args,\n window_radii=window_radii,\n mix_weights=mix_weights,\n window_orientations=window_orientations,\n n_threads=n_threads,\n validate_data=validate_data,\n mask_string=mask_string,\n nullify_mask=nullify_mask,\n normalize_windows=normalize_windows,\n n_iter=n_iter,\n epsilon=epsilon,\n coo_initial_memory=coo_initial_memory,\n )\n self.delta_mean_ = None\n self._preprocessing = preprocess_timed_token_sequences\n\n def _get_default_kernel_functions(self):\n return _TIMED_KERNEL_FUNCTIONS\n\n def _em_cooccurrence_iteration(self, token_sequences, cooccurrence_matrix):\n # call the numba function to return the new matrix.data\n return numba_em_cooccurrence_iteration(\n token_sequences=token_sequences,\n n_unique_tokens=len(self.token_label_dictionary_),\n window_size_array=self._window_len_array,\n window_reversals=self._window_reversals,\n kernel_functions=self._kernel_functions,\n kernel_args=self._full_kernel_args,\n mix_weights=self._mix_weights,\n prior_data=cooccurrence_matrix.data,\n prior_indices=cooccurrence_matrix.indices,\n prior_indptr=cooccurrence_matrix.indptr,\n )\n\n def _build_skip_grams(self, token_sequences):\n # call the numba function for returning the list of CooArrays\n return numba_build_skip_grams(\n token_sequences=token_sequences,\n window_size_array=self._window_len_array,\n window_reversals=self._window_reversals,\n kernel_functions=self._kernel_functions,\n kernel_args=self._full_kernel_args,\n mix_weights=self._mix_weights,\n normalize_windows=self.normalize_windows,\n n_unique_tokens=len(self.token_label_dictionary_),\n array_lengths=self._coo_sizes,\n )\n\n def _set_additional_params(self, token_sequences):\n self.delta_mean_ = 0.0\n total_t = 0.0\n for doc in token_sequences:\n seq = np.array([pair[1] for pair in doc])\n self.delta_mean_ += np.sum(seq[1:] - seq[:-1])\n total_t += len(seq) - 1\n self.delta_mean_ /= total_t\n\n def _set_full_kernel_args(self):\n # Set the full kernel args\n self._full_kernel_args = numba.typed.List([])\n for i, args in enumerate(self._kernel_args):\n default_kernel_array_args = {\n \"delta\": self.delta_mean_,\n \"mask_index\": self._mask_index,\n \"normalize\": False,\n \"offset\": 0,\n }\n default_kernel_array_args.update(args)\n self._full_kernel_args.append(tuple(default_kernel_array_args.values()))\n"
] |
[
[
"numpy.log2",
"numpy.abs",
"numpy.int32",
"numpy.zeros_like",
"numpy.float32",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
vlukes/io3d
|
[
"34d048b7f737a5e56610879f6ab103128e8f0750"
] |
[
"tests/qt_test.py"
] |
[
"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nfrom loguru import logger\n\n# import funkcí z jiného adresáře\nimport os.path\nimport os.path as op\n\n# import copy\n\nimport unittest\nimport pytest\n\n# sample_data_path = os.path.dirname(os.path.abspath(__file__))\n# sample_data_path\n# sys.path.append(os.path.join(path_to_script, \"../extern/pyseg_base/src/\"))\n# sys.path.append(os.path.join(path_to_script, \"../extern/py3DSeedEditor/\"))\n# sys.path.append(os.path.join(path_to_script, \"../src/\"))\n\n# from PyQt4.QtGui import QFileDialog, QApplication, QMainWindow\n\nimport numpy as np\n\ntry:\n import dicom\n\n dicom.debug(False)\nexcept:\n import pydicom as dicom\n\n dicom.config.debug(False)\n\n#\nimport sys\nimport io3d\nimport io3d.dcmreaddata as dcmr\n\n# sample_data_path = \"~/data/medical/orig/sample_data/\"\n# sample_data_path = op.expanduser(sample_data_path)\n\nfrom PyQt5.QtWidgets import QApplication\nimport io3d.outputqt\nimport io3d.datareaderqt\nimport io3d.datasets\n\n\nclass QtTest(unittest.TestCase):\n interactivetTest = False\n\n # def setUp(self):\n # import imtools\n # import imtools.sample_data\n # imtools.sample_data.get_sample_data([\"jatra_5mm\", \"volumetrie\"], SAMPLE_DATA_DIR)\n # def setUp(self):\n # self.dcmdir = os.path.join(path_to_script, '../sample_data/jatra_5mm')\n # self.data3d, self.metadata = dcmr.dcm_read_from_dir(self.dcmdir)\n # reader = dcmr.DicomReader(self.dcmdir)\n # self.data3d = reader.get_3Ddata()\n # self.metadata = reader.get_metaData()\n def test_select_ouput_path(self):\n app = QApplication(sys.argv)\n sopw = io3d.outputqt.SelectOutputPathWidget(\n widget_label=\"widget label\", path=\"~/lisa_data/sample.{}.pkl\"\n )\n sopw.show()\n\n in_path = \"~/sample{}.vtk\"\n sopw.set_path(in_path)\n\n out_path = sopw.get_path()\n home = op.expanduser(\"~\")\n\n rp1 = op.relpath(home, in_path)\n rp2 = op.relpath(home, out_path)\n # app.exec_()\n self.assertEqual(rp1, rp2)\n\n def test_select_ouput_filename(self):\n app = QApplication(sys.argv)\n sopw = io3d.outputqt.SelectOutputPathWidget(\n widget_label=\"widget label\", path=\"~/lisa_data/sample.{}.pkl\"\n )\n sopw.show()\n\n in_path = \"~/sample{}.vtk\"\n sopw.set_path(in_path)\n\n out_path = sopw.get_filename()\n # app.exec_()\n self.assertEqual(out_path, \"sample{}.vtk\")\n\n def test_read_datareader(self):\n sdp = io3d.datasets.join_path(\"sample_data\")\n dp = io3d.datasets.join_path(\"sample_data/jatra_5mm/\")\n app = QApplication(sys.argv)\n\n drw = io3d.datareaderqt.DataReaderWidget(loaddir=sdp, qt_app=app)\n # (widget_label=\"widget label\", path=\"~/lisa_data/sample.{}.pkl\")\n\n drw.show()\n drw.datapath = dp\n drw.read_data_from_prepared_datapath()\n # print(drw.datap[\"data3d\"].shape)\n error = np.sum(\n np.abs(np.asarray([93, 512, 512]) - np.asarray(drw.datap[\"data3d\"].shape))\n )\n # app.exec_()\n self.assertEqual(error, 0)\n\n def test_read_datareader_get_path_dialog_surround_with_inject_dirpath(self):\n sdp = io3d.datasets.join_path(\"medical\", \"orig\", \"sample_data\", get_root=True)\n dp = io3d.datasets.join_path(\n \"medical/orig/sample_data/jatra_5mm/\", get_root=True\n )\n app = QApplication(sys.argv)\n\n drw = io3d.datareaderqt.DataReaderWidget(loaddir=sdp, qt_app=app)\n # (widget_label=\"widget label\", path=\"~/lisa_data/sample.{}.pkl\")\n\n drw.show()\n drw.datapath = dp\n drw._skip_get_path_dialog_for_tests = True\n drw.read_data_dir_dialog()\n # drw.read_data_from_prepared_datapath()\n # print(drw.datap[\"data3d\"].shape)\n error = np.sum(\n np.abs(np.asarray([93, 512, 512]) - np.asarray(drw.datap[\"data3d\"].shape))\n )\n # app.exec_()\n self.assertEqual(error, 0)\n\n def test_read_datareader_get_path_dialog_surround_with_inject_filepath(self):\n sdp = io3d.datasets.join_path(\"medical\", \"orig\", \"sample_data\", get_root=True)\n # dp = io3d.datasets.join_path(\"medical/orig/sample_data/ct_head.rawiv\", get_root=True)\n dp = io3d.datasets.join_path(\"medical/orig/liver-orig001.mhd\", get_root=True)\n app = QApplication(sys.argv)\n\n drw = io3d.datareaderqt.DataReaderWidget(loaddir=sdp, qt_app=app)\n # (widget_label=\"widget label\", path=\"~/lisa_data/sample.{}.pkl\")\n\n drw.show()\n drw.datapath = dp\n drw._skip_get_path_dialog_for_tests = True\n drw.read_data_dir_dialog()\n # drw.read_data_from_prepared_datapath()\n # print(drw.datap[\"data3d\"].shape)\n data3d = drw.datap[\"data3d\"]\n error = np.sum(np.array_equal(np.asarray([183, 512, 512]), data3d.shape))\n # app.exec_()\n\n @pytest.mark.interactive\n def test_read_datareader_interactive(self):\n sdp = io3d.datasets.join_path(\"sample_data\")\n dp = io3d.datasets.join_path(\"sample_data/jatra_5mm/\")\n app = QApplication(sys.argv)\n\n drw = io3d.datareaderqt.DataReaderWidget(loaddir=sdp, qt_app=app)\n # (widget_label=\"widget label\", path=\"~/lisa_data/sample.{}.pkl\")\n\n drw.show()\n # drw.datapath = dp\n # drw.read_data_from_prepared_datapath()\n # error = np.sum(np.abs(np.asarray([93, 512, 512]) - np.asarray(drw.datap[\"data3d\"].shape)))\n app.exec_()\n # self.assertEqual(error, 0)\n\n def test_qstring(self):\n if sys.version_info.major == 2:\n from PyQt5.QtCore import QString\n\n text = QString(\"i am qstring\")\n else:\n text = \"i am str\"\n\n txt = io3d.datareaderqt.get_str_py2_compatibility(text)\n self.assertTrue(type(txt) is str)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.asarray"
]
] |
gaze-abyss/HMS
|
[
"18de6c5db86f7b24b8e07ec1321df50d0382dad2"
] |
[
"train/models/utils.py"
] |
[
"import os\nfrom collections import *\nfrom itertools import *\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom . import constants\n\nbinding_data = \"binding.npy\"\ndomain_sequences = \"dseq_mtx.npy\"\npeptide_sequences = \"pseq_mtx.npy\"\ninteraction_sequences = \"iseqs_mtx.npy\"\n\nInteractionDataTuple = namedtuple(\"InteractionDataTuple\", [\"binds\", \"domain_seqs\", \"peptide_seqs\", \"interaction_seqs\"])\ndef split_data(data_directory, validation_chunk, include_all=False, excluded_chunks=None, n_folds=constants.DEF_n_folds, seed=constants.DEF_fold_seed):\n files = [constants.binding_file, constants.domain_file, constants.peptide_file, constants.interaction_file]\n data = [np.load(os.path.join(data_directory, f)) for f in files]\n \n data_sz = data[0].shape[0]\n \n np.random.seed(seed)\n randomized = np.random.permutation(data_sz)\n chunks = np.array_split(randomized, n_folds)\n \n vindices = chunks[validation_chunk]\n \n validation_data = InteractionDataTuple(*[mtx[vindices] for mtx in data])\n\n if include_all:\n _excluded = []\n else:\n _excluded = [validation_chunk, *(excluded_chunks if excluded_chunks is not None else [])]\n \n tindices = [i for cidx, chunk in enumerate(chunks) for i in chunk if cidx not in _excluded]\n train_data = InteractionDataTuple(*[mtx[tindices] for mtx in data])\n\n np.random.seed()\n \n return train_data, validation_data\n\ndef _data_chunker(data, nchunks):\n randomized = np.random.permutation(data.binds.shape[0])\n splits = np.array_split(randomized, nchunks)\n\n for spl in splits:\n binds_spl = data.binds[spl]\n seqs_spl = [s[spl] for s in data[1:]]\n\n yield seqs_spl, binds_spl\n\ndef training_iterator(interaction_data, chunk_size=None, nchunks=None):\n if chunk_size is None and nchunks is None: \n raise ValueError(\"Need to pass a chunk size or num. of chunks\")\n\n if nchunks is None:\n nchunks = interaction_data.binds.shape[0] // chunk_size + 1\n\n while True:\n yield _data_chunker(interaction_data, nchunks) \n\ndef make_sparse(sequences, domain_length, peptide_length, n_amino_acids):\n def _sparsify(arr, ncols):\n nrows = arr.shape[0]\n\n ridxes, compressed_cidxes = np.where(arr >= 0)\n cidxes = arr[ridxes, compressed_cidxes]\n \n vals = np.ones(ridxes.size)\n \n idxes = np.vstack([ridxes, cidxes]).T\n \n shape = [nrows, ncols]\n \n return tf.SparseTensorValue(\n indices = idxes, \n values = vals,\n dense_shape = shape\n )\n \n col_sizes = [domain_length * n_amino_acids, peptide_length * n_amino_acids, domain_length * peptide_length * n_amino_acids * n_amino_acids]\n return [_sparsify(m, ncols) for m, ncols in zip(sequences, col_sizes)]\n\n"
] |
[
[
"numpy.random.seed",
"numpy.ones",
"numpy.random.permutation",
"tensorflow.SparseTensorValue",
"numpy.array_split",
"numpy.where",
"numpy.vstack"
]
] |
rokroskar/GPy
|
[
"0f8dbba56d480902c86cfe8bad9e79d9eabae009",
"0f8dbba56d480902c86cfe8bad9e79d9eabae009"
] |
[
"GPy/inference/gradient_descent_update_rules.py",
"GPy/kern/parts/rbf_inv.py"
] |
[
"'''\nCreated on 24 Apr 2013\n\n@author: maxz\n'''\nimport numpy\n\nclass GDUpdateRule():\n _gradnat = None\n _gradnatold = None\n def __init__(self, initgrad, initgradnat=None):\n self.grad = initgrad\n if initgradnat:\n self.gradnat = initgradnat\n else:\n self.gradnat = initgrad\n # self.grad, self.gradnat\n def _gamma(self):\n raise NotImplemented(\"\"\"Implement gamma update rule here, \n you can use self.grad and self.gradold for parameters, as well as\n self.gradnat and self.gradnatold for natural gradients.\"\"\")\n def __call__(self, grad, gradnat=None, si=None, *args, **kw):\n \"\"\"\n Return gamma for given gradients and optional natural gradients\n \"\"\"\n if not gradnat:\n gradnat = grad\n self.gradold = self.grad\n self.gradnatold = self.gradnat\n self.grad = grad\n self.gradnat = gradnat\n self.si = si\n return self._gamma(*args, **kw)\n\nclass FletcherReeves(GDUpdateRule):\n '''\n Fletcher Reeves update rule for gamma\n '''\n def _gamma(self, *a, **kw):\n tmp = numpy.dot(self.grad.T, self.gradnat)\n if tmp:\n return tmp / numpy.dot(self.gradold.T, self.gradnatold)\n return tmp\n\nclass PolakRibiere(GDUpdateRule):\n '''\n Fletcher Reeves update rule for gamma\n '''\n def _gamma(self, *a, **kw):\n tmp = numpy.dot((self.grad - self.gradold).T, self.gradnat)\n if tmp:\n return tmp / numpy.dot(self.gradold.T, self.gradnatold)\n return tmp\n",
"# Copyright (c) 2012, GPy authors (see AUTHORS.txt).\n# Licensed under the BSD 3-clause license (see LICENSE.txt)\n\n\nfrom rbf import RBF\nimport numpy as np\nimport hashlib\nfrom scipy import weave\nfrom ...util.linalg import tdot\nfrom ...util.config import *\n\n\nclass RBFInv(RBF):\n \"\"\"\n Radial Basis Function kernel, aka squared-exponential, exponentiated quadratic or Gaussian kernel. It only\n differs from RBF in that here the parametrization is wrt the inverse lengthscale:\n\n .. math::\n\n k(r) = \\sigma^2 \\exp \\\\bigg(- \\\\frac{1}{2} r^2 \\\\bigg) \\ \\ \\ \\ \\ \\\\text{ where } r^2 = \\sum_{i=1}^d \\\\frac{ (x_i-x^\\prime_i)^2}{\\ell_i^2}\n\n where \\ell_i is the lengthscale, \\sigma^2 the variance and d the dimensionality of the input.\n\n :param input_dim: the number of input dimensions\n :type input_dim: int\n :param variance: the variance of the kernel\n :type variance: float\n :param lengthscale: the vector of lengthscale of the kernel\n :type lengthscale: array or list of the appropriate size (or float if there is only one lengthscale parameter)\n :param ARD: Auto Relevance Determination. If equal to \"False\", the kernel is isotropic (ie. one single lengthscale parameter \\ell), otherwise there is one lengthscale parameter per dimension.\n :type ARD: Boolean\n :rtype: kernel object\n\n .. Note: this object implements both the ARD and 'spherical' version of the function\n \"\"\"\n\n def __init__(self, input_dim, variance=1., inv_lengthscale=None, ARD=False):\n self.input_dim = input_dim\n self.name = 'rbf_inv'\n self.ARD = ARD\n if not ARD:\n self.num_params = 2\n if inv_lengthscale is not None:\n inv_lengthscale = np.asarray(inv_lengthscale)\n assert inv_lengthscale.size == 1, \"Only one lengthscale needed for non-ARD kernel\"\n else:\n inv_lengthscale = np.ones(1)\n else:\n self.num_params = self.input_dim + 1\n if inv_lengthscale is not None:\n inv_lengthscale = np.asarray(inv_lengthscale)\n assert inv_lengthscale.size == self.input_dim, \"bad number of lengthscales\"\n else:\n inv_lengthscale = np.ones(self.input_dim)\n\n self._set_params(np.hstack((variance, inv_lengthscale.flatten())))\n\n # initialize cache\n self._Z, self._mu, self._S = np.empty(shape=(3, 1))\n self._X, self._X2, self._params = np.empty(shape=(3, 1))\n\n # a set of optional args to pass to weave\n weave_options_openmp = {'headers' : ['<omp.h>'],\n 'extra_compile_args': ['-fopenmp -O3'],\n 'extra_link_args' : ['-lgomp'],\n 'libraries': ['gomp']}\n weave_options_noopenmp = {'extra_compile_args': ['-O3']}\n\n if config.getboolean('parallel', 'openmp'):\n self.weave_options = weave_options_openmp\n self.weave_support_code = \"\"\"\n #include <omp.h>\n #include <math.h>\n \"\"\"\n else:\n self.weave_options = weave_options_noopenmp\n self.weave_support_code = \"\"\"\n #include <math.h>\n \"\"\"\n\n def _get_params(self):\n return np.hstack((self.variance, self.inv_lengthscale))\n\n def _set_params(self, x):\n assert x.size == (self.num_params)\n self.variance = x[0]\n self.inv_lengthscale = x[1:]\n self.inv_lengthscale2 = np.square(self.inv_lengthscale)\n # TODO: We can rewrite everything with inv_lengthscale and never need to do the below\n self.lengthscale = 1. / self.inv_lengthscale\n self.lengthscale2 = np.square(self.lengthscale)\n # reset cached results\n self._X, self._X2, self._params = np.empty(shape=(3, 1))\n self._Z, self._mu, self._S = np.empty(shape=(3, 1)) # cached versions of Z,mu,S\n\n def _get_param_names(self):\n if self.num_params == 2:\n return ['variance', 'inv_lengthscale']\n else:\n return ['variance'] + ['inv_lengthscale%i' % i for i in range(self.inv_lengthscale.size)]\n\n # TODO: Rewrite computations so that lengthscale is not needed (but only inv. lengthscale)\n def dK_dtheta(self, dL_dK, X, X2, target):\n self._K_computations(X, X2)\n target[0] += np.sum(self._K_dvar * dL_dK)\n if self.ARD:\n dvardLdK = self._K_dvar * dL_dK\n var_len3 = self.variance / np.power(self.lengthscale, 3)\n len2 = self.lengthscale2\n if X2 is None:\n # save computation for the symmetrical case\n dvardLdK = dvardLdK + dvardLdK.T\n code = \"\"\"\n int q,i,j;\n double tmp;\n for(q=0; q<input_dim; q++){\n tmp = 0;\n for(i=0; i<num_data; i++){\n for(j=0; j<i; j++){\n tmp += (X(i,q)-X(j,q))*(X(i,q)-X(j,q))*dvardLdK(i,j);\n }\n }\n target(q+1) += var_len3(q)*tmp*(-len2(q));\n }\n \"\"\"\n num_data, num_inducing, input_dim = int(X.shape[0]), int(X.shape[0]), int(self.input_dim)\n weave.inline(code, arg_names=['num_data', 'num_inducing', 'input_dim', 'X', 'X2', 'target', 'dvardLdK', 'var_len3', 'len2'], type_converters=weave.converters.blitz, **self.weave_options)\n else:\n code = \"\"\"\n int q,i,j;\n double tmp;\n for(q=0; q<input_dim; q++){\n tmp = 0;\n for(i=0; i<num_data; i++){\n for(j=0; j<num_inducing; j++){\n tmp += (X(i,q)-X2(j,q))*(X(i,q)-X2(j,q))*dvardLdK(i,j);\n }\n }\n target(q+1) += var_len3(q)*tmp*(-len2(q));\n }\n \"\"\"\n num_data, num_inducing, input_dim = int(X.shape[0]), int(X2.shape[0]), int(self.input_dim)\n # [np.add(target[1+q:2+q],var_len3[q]*np.sum(dvardLdK*np.square(X[:,q][:,None]-X2[:,q][None,:])),target[1+q:2+q]) for q in range(self.input_dim)]\n weave.inline(code, arg_names=['num_data', 'num_inducing', 'input_dim', 'X', 'X2', 'target', 'dvardLdK', 'var_len3', 'len2'], type_converters=weave.converters.blitz, **self.weave_options)\n else:\n target[1] += (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dK) * (-self.lengthscale2)\n\n def dK_dX(self, dL_dK, X, X2, target):\n self._K_computations(X, X2)\n if X2 is None:\n _K_dist = 2*(X[:, None, :] - X[None, :, :])\n else:\n _K_dist = X[:, None, :] - X2[None, :, :] # don't cache this in _K_computations because it is high memory. If this function is being called, chances are we're not in the high memory arena.\n dK_dX = (-self.variance * self.inv_lengthscale2) * np.transpose(self._K_dvar[:, :, np.newaxis] * _K_dist, (1, 0, 2))\n target += np.sum(dK_dX * dL_dK.T[:, :, None], 0)\n\n def dKdiag_dX(self, dL_dKdiag, X, target):\n pass\n\n\n #---------------------------------------#\n # PSI statistics #\n #---------------------------------------#\n\n # def dpsi1_dtheta(self, dL_dpsi1, Z, mu, S, target):\n # self._psi_computations(Z, mu, S)\n # denom_deriv = S[:, None, :] / (self.lengthscale ** 3 + self.lengthscale * S[:, None, :])\n # d_length = self._psi1[:, :, None] * (self.lengthscale * np.square(self._psi1_dist / (self.lengthscale2 + S[:, None, :])) + denom_deriv)\n # target[0] += np.sum(dL_dpsi1 * self._psi1 / self.variance)\n # dpsi1_dlength = d_length * dL_dpsi1[:, :, None]\n # if not self.ARD:\n # target[1] += dpsi1_dlength.sum()*(-self.lengthscale2)\n # else:\n # target[1:] += dpsi1_dlength.sum(0).sum(0)*(-self.lengthscale2)\n # #target[1:] = target[1:]*(-self.lengthscale2)\n\n def dpsi1_dtheta(self, dL_dpsi1, Z, mu, S, target):\n self._psi_computations(Z, mu, S)\n tmp = 1 + S[:, None, :] * self.inv_lengthscale2\n # d_inv_length_old = -self._psi1[:, :, None] * ((self._psi1_dist_sq - 1.) / (self.lengthscale * self._psi1_denom) + self.inv_lengthscale) / self.inv_lengthscale2\n d_length = -(self._psi1[:, :, None] * ((np.square(self._psi1_dist) * self.inv_lengthscale) / (tmp ** 2) + (S[:, None, :] * self.inv_lengthscale) / (tmp)))\n # d_inv_length = -self._psi1[:, :, None] * ((self._psi1_dist_sq - 1.) / self._psi1_denom + self.lengthscale)\n target[0] += np.sum(dL_dpsi1 * self._psi1 / self.variance)\n dpsi1_dlength = d_length * dL_dpsi1[:, :, None]\n if not self.ARD:\n target[1] += dpsi1_dlength.sum() # *(-self.lengthscale2)\n else:\n target[1:] += dpsi1_dlength.sum(0).sum(0) # *(-self.lengthscale2)\n # target[1:] = target[1:]*(-self.lengthscale2)\n\n def dpsi1_dZ(self, dL_dpsi1, Z, mu, S, target):\n self._psi_computations(Z, mu, S)\n dpsi1_dZ = -self._psi1[:, :, None] * ((self.inv_lengthscale2 * self._psi1_dist) / self._psi1_denom)\n target += np.sum(dL_dpsi1[:, :, None] * dpsi1_dZ, 0)\n\n def dpsi1_dmuS(self, dL_dpsi1, Z, mu, S, target_mu, target_S):\n self._psi_computations(Z, mu, S)\n tmp = (self._psi1[:, :, None] * self.inv_lengthscale2) / self._psi1_denom\n target_mu += np.sum(dL_dpsi1[:, :, None] * tmp * self._psi1_dist, 1)\n target_S += np.sum(dL_dpsi1[:, :, None] * 0.5 * tmp * (self._psi1_dist_sq - 1), 1)\n\n def dpsi2_dtheta(self, dL_dpsi2, Z, mu, S, target):\n \"\"\"Shape N,num_inducing,num_inducing,Ntheta\"\"\"\n self._psi_computations(Z, mu, S)\n d_var = 2.*self._psi2 / self.variance\n # d_length = 2.*self._psi2[:, :, :, None] * (self._psi2_Zdist_sq * self._psi2_denom + self._psi2_mudist_sq + S[:, None, None, :] / self.lengthscale2) / (self.lengthscale * self._psi2_denom)\n d_length = -2.*self._psi2[:, :, :, None] * (self._psi2_Zdist_sq * self._psi2_denom + self._psi2_mudist_sq + S[:, None, None, :] * self.inv_lengthscale2) / (self.inv_lengthscale * self._psi2_denom)\n target[0] += np.sum(dL_dpsi2 * d_var)\n dpsi2_dlength = d_length * dL_dpsi2[:, :, :, None]\n if not self.ARD:\n target[1] += dpsi2_dlength.sum() # *(-self.lengthscale2)\n else:\n target[1:] += dpsi2_dlength.sum(0).sum(0).sum(0) # *(-self.lengthscale2)\n # target[1:] = target[1:]*(-self.lengthscale2)\n\n def dpsi2_dZ(self, dL_dpsi2, Z, mu, S, target):\n self._psi_computations(Z, mu, S)\n term1 = self._psi2_Zdist * self.inv_lengthscale2 # num_inducing, num_inducing, input_dim\n term2 = (self._psi2_mudist * self.inv_lengthscale2) / self._psi2_denom # N, num_inducing, num_inducing, input_dim\n dZ = self._psi2[:, :, :, None] * (term1[None] + term2)\n target += (dL_dpsi2[:, :, :, None] * dZ).sum(0).sum(0)\n\n def dpsi2_dmuS(self, dL_dpsi2, Z, mu, S, target_mu, target_S):\n \"\"\"Think N,num_inducing,num_inducing,input_dim \"\"\"\n self._psi_computations(Z, mu, S)\n tmp = (self.inv_lengthscale2 * self._psi2[:, :, :, None]) / self._psi2_denom\n target_mu += -2.*(dL_dpsi2[:, :, :, None] * tmp * self._psi2_mudist).sum(1).sum(1)\n target_S += (dL_dpsi2[:, :, :, None] * tmp * (2.*self._psi2_mudist_sq - 1)).sum(1).sum(1)\n\n #---------------------------------------#\n # Precomputations #\n #---------------------------------------#\n\n def _K_computations(self, X, X2):\n if not (np.array_equal(X, self._X) and np.array_equal(X2, self._X2) and np.array_equal(self._params , self._get_params())):\n self._X = X.copy()\n self._params = self._get_params().copy()\n if X2 is None:\n self._X2 = None\n X = X * self.inv_lengthscale\n Xsquare = np.sum(np.square(X), 1)\n self._K_dist2 = -2.*tdot(X) + (Xsquare[:, None] + Xsquare[None, :])\n else:\n self._X2 = X2.copy()\n X = X * self.inv_lengthscale\n X2 = X2 * self.inv_lengthscale\n self._K_dist2 = -2.*np.dot(X, X2.T) + (np.sum(np.square(X), 1)[:, None] + np.sum(np.square(X2), 1)[None, :])\n self._K_dvar = np.exp(-0.5 * self._K_dist2)\n\n def _psi_computations(self, Z, mu, S):\n # here are the \"statistics\" for psi1 and psi2\n if not np.array_equal(Z, self._Z):\n # Z has changed, compute Z specific stuff\n self._psi2_Zhat = 0.5 * (Z[:, None, :] + Z[None, :, :]) # M,M,Q\n self._psi2_Zdist = 0.5 * (Z[:, None, :] - Z[None, :, :]) # M,M,Q\n self._psi2_Zdist_sq = np.square(self._psi2_Zdist * self.inv_lengthscale) # M,M,Q\n\n if not (np.array_equal(Z, self._Z) and np.array_equal(mu, self._mu) and np.array_equal(S, self._S)):\n # something's changed. recompute EVERYTHING\n\n # psi1\n self._psi1_denom = S[:, None, :] * self.inv_lengthscale2 + 1.\n self._psi1_dist = Z[None, :, :] - mu[:, None, :]\n self._psi1_dist_sq = (np.square(self._psi1_dist) * self.inv_lengthscale2) / self._psi1_denom\n self._psi1_exponent = -0.5 * np.sum(self._psi1_dist_sq + np.log(self._psi1_denom), -1)\n self._psi1 = self.variance * np.exp(self._psi1_exponent)\n\n # psi2\n self._psi2_denom = 2.*S[:, None, None, :] * self.inv_lengthscale2 + 1. # N,M,M,Q\n self._psi2_mudist, self._psi2_mudist_sq, self._psi2_exponent, _ = self.weave_psi2(mu, self._psi2_Zhat)\n # self._psi2_mudist = mu[:,None,None,:]-self._psi2_Zhat #N,M,M,Q\n # self._psi2_mudist_sq = np.square(self._psi2_mudist)/(self.lengthscale2*self._psi2_denom)\n # self._psi2_exponent = np.sum(-self._psi2_Zdist_sq -self._psi2_mudist_sq -0.5*np.log(self._psi2_denom),-1) #N,M,M,Q\n self._psi2 = np.square(self.variance) * np.exp(self._psi2_exponent) # N,M,M,Q\n\n # store matrices for caching\n self._Z, self._mu, self._S = Z, mu, S\n\n def weave_psi2(self, mu, Zhat):\n N, input_dim = int(mu.shape[0]), int(mu.shape[1])\n num_inducing = int(Zhat.shape[0])\n\n mudist = np.empty((N, num_inducing, num_inducing, input_dim))\n mudist_sq = np.empty((N, num_inducing, num_inducing, input_dim))\n psi2_exponent = np.zeros((N, num_inducing, num_inducing))\n psi2 = np.empty((N, num_inducing, num_inducing))\n\n psi2_Zdist_sq = self._psi2_Zdist_sq\n _psi2_denom = self._psi2_denom.squeeze().reshape(N, self.input_dim)\n half_log_psi2_denom = 0.5 * np.log(self._psi2_denom).squeeze().reshape(N, self.input_dim)\n variance_sq = float(np.square(self.variance))\n if self.ARD:\n inv_lengthscale2 = self.inv_lengthscale2\n else:\n inv_lengthscale2 = np.ones(input_dim) * self.inv_lengthscale2\n\n if config.getboolean('parallel', 'openmp'):\n pragma_string = '#pragma omp parallel for private(tmp)'\n else:\n pragma_string = ''\n\n code = \"\"\"\n double tmp;\n\n %s\n for (int n=0; n<N; n++){\n for (int m=0; m<num_inducing; m++){\n for (int mm=0; mm<(m+1); mm++){\n for (int q=0; q<input_dim; q++){\n //compute mudist\n tmp = mu(n,q) - Zhat(m,mm,q);\n mudist(n,m,mm,q) = tmp;\n mudist(n,mm,m,q) = tmp;\n\n //now mudist_sq\n tmp = tmp*tmp*inv_lengthscale2(q)/_psi2_denom(n,q);\n mudist_sq(n,m,mm,q) = tmp;\n mudist_sq(n,mm,m,q) = tmp;\n\n //now psi2_exponent\n tmp = -psi2_Zdist_sq(m,mm,q) - tmp - half_log_psi2_denom(n,q);\n psi2_exponent(n,mm,m) += tmp;\n if (m !=mm){\n psi2_exponent(n,m,mm) += tmp;\n }\n //psi2 would be computed like this, but np is faster\n //tmp = variance_sq*exp(psi2_exponent(n,m,mm));\n //psi2(n,m,mm) = tmp;\n //psi2(n,mm,m) = tmp;\n }\n }\n }\n }\n\n \"\"\" % pragma_string\n\n weave.inline(code, support_code=self.weave_support_code,\n arg_names=['N', 'num_inducing', 'input_dim', 'mu', 'Zhat', 'mudist_sq', 'mudist', 'inv_lengthscale2', '_psi2_denom', 'psi2_Zdist_sq', 'psi2_exponent', 'half_log_psi2_denom', 'psi2', 'variance_sq'],\n type_converters=weave.converters.blitz, **self.weave_options)\n\n return mudist, mudist_sq, psi2_exponent, psi2\n"
] |
[
[
"numpy.dot"
],
[
"numpy.square",
"numpy.hstack",
"numpy.dot",
"numpy.log",
"numpy.array_equal",
"numpy.power",
"numpy.asarray",
"numpy.ones",
"scipy.weave.inline",
"numpy.transpose",
"numpy.exp",
"numpy.zeros",
"numpy.sum",
"numpy.empty"
]
] |
liuxfiu/qmodels
|
[
"f04d28923c623495d7d1cc3962fb8cac61dc2685"
] |
[
"qmodels/notyet/mm1-evt.py"
] |
[
"# for random distributions, random number generators, statistics\nimport random\nimport numpy as np\nimport scipy.stats as stats\n\n# for simulation\nimport simulus\n\ndef exp_generator(mean, seed):\n rv = stats.expon(scale=mean)\n rv.random_state = np.random.RandomState(seed)\n while True:\n # 100 random numbers as a batch\n for x in rv.rvs(100):\n yield x\n\ndef arrive():\n global num_in_system\n print('%g: customer arrives (num_in_system=%d->%d)' %\n (sim.now, num_in_system, num_in_system+1))\n \n # increment the total number of customers in system\n num_in_system += 1\n \n # schedule next customer's arrival\n sim.sched(arrive, offset=next(inter_arrival_time))\n \n # the arrived customer is the only one in system\n if num_in_system == 1:\n # schedule the customer's departure\n sim.sched(depart, offset=next(service_time))\n\ndef depart():\n global num_in_system\n print('%g: customer departs (num_in_system=%d->%d)' %\n (sim.now, num_in_system, num_in_system-1))\n \n # decrement the total number of customers in system\n num_in_system -= 1\n \n # there are remaining customers in system\n if num_in_system > 0:\n # schedule the next customer's departure\n sim.sched(depart, offset=next(service_time))\n\nrandom.seed(13579) # global random seed\n\nsim = simulus.simulator('ssq')\ninter_arrival_time = exp_generator(1.2, sim.rng().randrange(2**32))\nservice_time = exp_generator(0.8, sim.rng().randrange(2**32))\n\nnum_in_system = 0\nsim.sched(arrive, offset=next(inter_arrival_time))\nsim.run(10)\n"
] |
[
[
"scipy.stats.expon",
"numpy.random.RandomState"
]
] |
chemistry-scripts/cclib
|
[
"e8e0ea9b3e9b7091f8dfc4dd52d5e5e84a1cc258",
"e8e0ea9b3e9b7091f8dfc4dd52d5e5e84a1cc258"
] |
[
"cclib/parser/jaguarparser.py",
"test/method/testvolume.py"
] |
[
"# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2020, the cclib development team\n#\n# This file is part of cclib (http://cclib.github.io) and is distributed under\n# the terms of the BSD 3-Clause License.\n\n\"\"\"Parser for Jaguar output files\"\"\"\n\n\nimport re\n\nimport numpy\n\nfrom cclib.parser import logfileparser\nfrom cclib.parser import utils\n\n\nclass Jaguar(logfileparser.Logfile):\n \"\"\"A Jaguar output file\"\"\"\n\n def __init__(self, *args, **kwargs):\n\n # Call the __init__ method of the superclass\n super(Jaguar, self).__init__(logname=\"Jaguar\", *args, **kwargs)\n\n def __str__(self):\n \"\"\"Return a string representation of the object.\"\"\"\n return \"Jaguar output file %s\" % (self.filename)\n\n def __repr__(self):\n \"\"\"Return a representation of the object.\"\"\"\n return 'Jaguar(\"%s\")' % (self.filename)\n\n def normalisesym(self, label):\n \"\"\"Normalise the symmetries used by Jaguar.\n\n To normalise, three rules need to be applied:\n (1) To handle orbitals of E symmetry, retain everything before the /\n (2) Replace two p's by \"\n (2) Replace any remaining single p's by '\n \"\"\"\n ans = label.split(\"/\")[0].replace(\"pp\", '\"').replace(\"p\", \"'\")\n return ans\n\n def before_parsing(self):\n\n # We need to track whether we are inside geometry optimization in order\n # to parse SCF targets/values correctly.\n self.geoopt = False\n\n def after_parsing(self):\n\n # This is to make sure we always have optdone after geometry optimizations,\n # even if it is to be empty for unconverged runs. We have yet to test this\n # with a regression for Jaguar, though.\n if self.geoopt and not hasattr(self, 'optdone'):\n self.optdone = []\n\n def extract(self, inputfile, line):\n \"\"\"Extract information from the file object inputfile.\"\"\"\n\n # Extract the package version number.\n if \"Jaguar version\" in line:\n tokens = line.split()\n base_version = tokens[3][:-1]\n package_version = \"{}+{}\".format(base_version, tokens[5])\n self.metadata[\"package_version\"] = package_version\n self.metadata[\"legacy_package_version\"] = base_version\n\n # Extract the basis set name\n if line[2:12] == \"basis set:\":\n self.metadata[\"basis_set\"] = line.split()[2]\n\n # Extract charge and multiplicity\n if line[2:22] == \"net molecular charge\":\n self.set_attribute('charge', int(line.split()[-1]))\n self.set_attribute('mult', int(next(inputfile).split()[-1]))\n\n # The Gaussian basis set information is printed before the geometry, and we need\n # to do some indexing to get this into cclib format, because fn increments\n # for each engular momentum, but cclib does not (we have just P instead of\n # all three X/Y/Z with the same parameters. On the other hand, fn enumerates\n # the atomic orbitals correctly, so use it to build atombasis.\n #\n # Gaussian basis set information\n #\n # renorm mfac*renorm\n # atom fn prim L z coef coef coef\n # -------- ----- ---- --- ------------- ----------- ----------- -----------\n # C1 1 1 S 7.161684E+01 1.5433E-01 2.7078E+00 2.7078E+00\n # C1 1 2 S 1.304510E+01 5.3533E-01 2.6189E+00 2.6189E+00\n # ...\n # C1 3 6 X 2.941249E+00 2.2135E-01 1.2153E+00 1.2153E+00\n # 4 Y 1.2153E+00\n # 5 Z 1.2153E+00\n # C1 2 8 S 2.222899E-01 1.0000E+00 2.3073E-01 2.3073E-01\n # C1 3 7 X 6.834831E-01 8.6271E-01 7.6421E-01 7.6421E-01\n # ...\n # C2 6 1 S 7.161684E+01 1.5433E-01 2.7078E+00 2.7078E+00\n # ...\n #\n if line.strip() == \"Gaussian basis set information\":\n\n self.skip_lines(inputfile, ['b', 'renorm', 'header', 'd'])\n\n # This is probably the only place we can get this information from Jaguar.\n self.gbasis = []\n\n atombasis = []\n line = next(inputfile)\n fn_per_atom = []\n while line.strip():\n\n if len(line.split()) > 3:\n\n aname = line.split()[0]\n fn = int(line.split()[1])\n prim = int(line.split()[2])\n L = line.split()[3]\n z = float(line.split()[4])\n coef = float(line.split()[5])\n\n # The primitive count is reset for each atom, so use that for adding\n # new elements to atombasis and gbasis. We could also probably do this\n # using the atom name, although that perhaps might not always be unique.\n if prim == 1:\n atombasis.append([])\n fn_per_atom = []\n self.gbasis.append([])\n\n # Remember that fn is repeated when functions are contracted.\n if not fn-1 in atombasis[-1]:\n atombasis[-1].append(fn-1)\n\n # Here we use fn only to know when a new contraction is encountered,\n # so we don't need to decrement it, and we don't even use all values.\n # What's more, since we only wish to save the parameters for each subshell\n # once, we don't even need to consider lines for orbitals other than\n # those for X*, making things a bit easier.\n if not fn in fn_per_atom:\n fn_per_atom.append(fn)\n label = {'S': 'S', 'X': 'P', 'XX': 'D', 'XXX': 'F'}[L]\n self.gbasis[-1].append((label, []))\n igbasis = fn_per_atom.index(fn)\n self.gbasis[-1][igbasis][1].append([z, coef])\n\n else:\n\n fn = int(line.split()[0])\n L = line.split()[1]\n\n # Some AO indices are only printed in these lines, for L > 0.\n if not fn-1 in atombasis[-1]:\n atombasis[-1].append(fn-1)\n\n line = next(inputfile)\n\n # The indices for atombasis can also be read later from the molecular orbital output.\n self.set_attribute('atombasis', atombasis)\n\n # This length of atombasis should always be the number of atoms.\n self.set_attribute('natom', len(self.atombasis))\n\n # Effective Core Potential\n #\n # Atom Electrons represented by ECP\n # Mo 36\n # Maximum angular term 3\n # F Potential 1/r^n Exponent Coefficient\n # ----- -------- -----------\n # 0 140.4577691 -0.0469492\n # 1 89.4739342 -24.9754989\n # ...\n # S-F Potential 1/r^n Exponent Coefficient\n # ----- -------- -----------\n # 0 33.7771969 2.9278406\n # 1 10.0120020 34.3483716\n # ...\n # O 0\n # Cl 10\n # Maximum angular term 2\n # D Potential 1/r^n Exponent Coefficient\n # ----- -------- -----------\n # 1 94.8130000 -10.0000000\n # ...\n if line.strip() == \"Effective Core Potential\":\n\n self.skip_line(inputfile, 'blank')\n line = next(inputfile)\n assert line.split()[0] == \"Atom\"\n assert \" \".join(line.split()[1:]) == \"Electrons represented by ECP\"\n\n self.coreelectrons = []\n line = next(inputfile)\n while line.strip():\n if len(line.split()) == 2:\n self.coreelectrons.append(int(line.split()[1]))\n line = next(inputfile)\n\n if line[2:14] == \"new geometry\" or line[1:21] == \"Symmetrized geometry\" or line.find(\"Input geometry\") > 0:\n # Get the atom coordinates\n if not hasattr(self, \"atomcoords\") or line[1:21] == \"Symmetrized geometry\":\n # Wipe the \"Input geometry\" if \"Symmetrized geometry\" present\n self.atomcoords = []\n p = re.compile(r\"(\\D+)\\d+\") # One/more letters followed by a number\n atomcoords = []\n atomnos = []\n angstrom = next(inputfile)\n title = next(inputfile)\n line = next(inputfile)\n while line.strip():\n temp = line.split()\n element = p.findall(temp[0])[0]\n atomnos.append(self.table.number[element])\n atomcoords.append(list(map(float, temp[1:])))\n line = next(inputfile)\n self.atomcoords.append(atomcoords)\n self.atomnos = numpy.array(atomnos, \"i\")\n self.set_attribute('natom', len(atomcoords))\n\n # Hartree-Fock energy after SCF\n if line[1:18] == \"SCFE: SCF energy:\":\n self.metadata[\"methods\"].append(\"HF\")\n if not hasattr(self, \"scfenergies\"):\n self.scfenergies = []\n temp = line.strip().split()\n scfenergy = float(temp[temp.index(\"hartrees\") - 1])\n scfenergy = utils.convertor(scfenergy, \"hartree\", \"eV\")\n self.scfenergies.append(scfenergy)\n\n # Energy after LMP2 correction\n if line[1:18] == \"Total LMP2 Energy\":\n self.metadata[\"methods\"].append(\"LMP2\")\n if not hasattr(self, \"mpenergies\"):\n self.mpenergies = [[]]\n lmp2energy = float(line.split()[-1])\n lmp2energy = utils.convertor(lmp2energy, \"hartree\", \"eV\")\n self.mpenergies[-1].append(lmp2energy)\n\n if line[15:45] == \"Geometry optimization complete\":\n if not hasattr(self, 'optdone'):\n self.optdone = []\n self.optdone.append(len(self.geovalues) - 1)\n\n if line.find(\"number of occupied orbitals\") > 0:\n # Get number of MOs\n occs = int(line.split()[-1])\n line = next(inputfile)\n virts = int(line.split()[-1])\n self.nmo = occs + virts\n self.homos = numpy.array([occs-1], \"i\")\n\n self.unrestrictedflag = False\n\n if line[1:28] == \"number of occupied orbitals\":\n self.homos = numpy.array([float(line.strip().split()[-1])-1], \"i\")\n\n if line[2:27] == \"number of basis functions\":\n nbasis = int(line.strip().split()[-1])\n self.set_attribute('nbasis', nbasis)\n\n if line.find(\"number of alpha occupied orb\") > 0:\n # Get number of MOs for an unrestricted calc\n\n aoccs = int(line.split()[-1])\n line = next(inputfile)\n avirts = int(line.split()[-1])\n line = next(inputfile)\n boccs = int(line.split()[-1])\n line = next(inputfile)\n bvirt = int(line.split()[-1])\n\n self.nmo = aoccs + avirts\n self.homos = numpy.array([aoccs-1, boccs-1], \"i\")\n self.unrestrictedflag = True\n\n if line[0:4] == \"etot\":\n # Get SCF convergence information\n if not hasattr(self, \"scfvalues\"):\n self.scfvalues = []\n self.scftargets = [[5E-5, 5E-6]]\n values = []\n while line[0:4] == \"etot\":\n # Jaguar 4.2\n # etot 1 N N 0 N -382.08751886450 2.3E-03 1.4E-01\n # etot 2 Y Y 0 N -382.27486023153 1.9E-01 1.4E-03 5.7E-02\n # Jaguar 6.5\n # etot 1 N N 0 N -382.08751881733 2.3E-03 1.4E-01\n # etot 2 Y Y 0 N -382.27486018708 1.9E-01 1.4E-03 5.7E-02\n temp = line.split()[7:]\n if len(temp) == 3:\n denergy = float(temp[0])\n else:\n denergy = 0 # Should really be greater than target value\n # or should we just ignore the values in this line\n ddensity = float(temp[-2])\n maxdiiserr = float(temp[-1])\n if not self.geoopt:\n values.append([denergy, ddensity])\n else:\n values.append([ddensity])\n try:\n line = next(inputfile)\n except StopIteration:\n self.logger.warning('File terminated before end of last SCF! Last error: {}'.format(maxdiiserr))\n break\n self.scfvalues.append(values)\n\n # MO energies and symmetries.\n # Jaguar 7.0: provides energies and symmetries for both\n # restricted and unrestricted calculations, like this:\n # Alpha Orbital energies/symmetry label:\n # -10.25358 Bu -10.25353 Ag -10.21931 Bu -10.21927 Ag\n # -10.21792 Bu -10.21782 Ag -10.21773 Bu -10.21772 Ag\n # ...\n # Jaguar 6.5: prints both only for restricted calculations,\n # so for unrestricted calculations the output it looks like this:\n # Alpha Orbital energies:\n # -10.25358 -10.25353 -10.21931 -10.21927 -10.21792 -10.21782\n # -10.21773 -10.21772 -10.21537 -10.21537 -1.02078 -0.96193\n # ...\n # Presence of 'Orbital energies' is enough to catch all versions.\n if \"Orbital energies\" in line:\n\n # Parsing results is identical for restricted/unrestricted\n # calculations, just assert later that alpha/beta order is OK.\n spin = int(line[2:6] == \"Beta\")\n\n # Check if symmetries are printed also.\n issyms = \"symmetry label\" in line\n\n if not hasattr(self, \"moenergies\"):\n self.moenergies = []\n if issyms and not hasattr(self, \"mosyms\"):\n self.mosyms = []\n\n # Grow moeneriges/mosyms and make sure they are empty when\n # parsed multiple times - currently cclib returns only\n # the final output (ex. in a geomtry optimization).\n if len(self.moenergies) < spin+1:\n self.moenergies.append([])\n self.moenergies[spin] = []\n if issyms:\n if len(self.mosyms) < spin+1:\n self.mosyms.append([])\n self.mosyms[spin] = []\n\n line = next(inputfile).split()\n while len(line) > 0:\n if issyms:\n energies = [float(line[2*i]) for i in range(len(line)//2)]\n syms = [line[2*i+1] for i in range(len(line)//2)]\n else:\n energies = [float(e) for e in line]\n energies = [utils.convertor(e, \"hartree\", \"eV\") for e in energies]\n self.moenergies[spin].extend(energies)\n if issyms:\n syms = [self.normalisesym(s) for s in syms]\n self.mosyms[spin].extend(syms)\n line = next(inputfile).split()\n\n line = next(inputfile)\n\n # The second trigger string is in the version 8.3 unit test and the first one was\n # encountered in version 6.x and is followed by a bit different format. In particular,\n # the line with occupations is missing in each block. Here is a fragment of this block\n # from version 8.3:\n #\n # *****************************************\n #\n # occupied + virtual orbitals: final wave function\n #\n # *****************************************\n #\n #\n # 1 2 3 4 5\n # eigenvalues- -11.04064 -11.04058 -11.03196 -11.03196 -11.02881\n # occupations- 2.00000 2.00000 2.00000 2.00000 2.00000\n # 1 C1 S 0.70148 0.70154 -0.00958 -0.00991 0.00401\n # 2 C1 S 0.02527 0.02518 0.00380 0.00374 0.00371\n # ...\n #\n if line.find(\"Occupied + virtual Orbitals- final wvfn\") > 0 or \\\n line.find(\"occupied + virtual orbitals: final wave function\") > 0:\n\n self.skip_lines(inputfile, ['b', 's', 'b', 'b'])\n\n if not hasattr(self, \"mocoeffs\"):\n self.mocoeffs = []\n\n aonames = []\n lastatom = \"X\"\n\n readatombasis = False\n if not hasattr(self, \"atombasis\"):\n self.atombasis = []\n for i in range(self.natom):\n self.atombasis.append([])\n readatombasis = True\n\n offset = 0\n\n spin = 1 + int(self.unrestrictedflag)\n for s in range(spin):\n mocoeffs = numpy.zeros((len(self.moenergies[s]), self.nbasis), \"d\")\n\n if s == 1: # beta case\n self.skip_lines(inputfile, ['s', 'b', 'title', 'b', 's', 'b', 'b'])\n\n for k in range(0, len(self.moenergies[s]), 5):\n self.updateprogress(inputfile, \"Coefficients\")\n\n # All known version have a line with indices followed by the eigenvalues.\n self.skip_lines(inputfile, ['numbers', 'eigens'])\n\n # Newer version also have a line with occupation numbers here.\n line = next(inputfile)\n if \"occupations-\" in line:\n line = next(inputfile)\n\n for i in range(self.nbasis):\n\n info = line.split()\n\n # Fill atombasis only first time around.\n if readatombasis and k == 0:\n orbno = int(info[0])\n atom = info[1]\n if atom[1].isalpha():\n atomno = int(atom[2:])\n else:\n atomno = int(atom[1:])\n self.atombasis[atomno-1].append(orbno-1)\n\n if not hasattr(self, \"aonames\"):\n if lastatom != info[1]:\n scount = 1\n pcount = 3\n dcount = 6 # six d orbitals in Jaguar\n\n if info[2] == 'S':\n aonames.append(\"%s_%i%s\" % (info[1], scount, info[2]))\n scount += 1\n\n if info[2] == 'X' or info[2] == 'Y' or info[2] == 'Z':\n aonames.append(\"%s_%iP%s\" % (info[1], pcount / 3, info[2]))\n pcount += 1\n\n if info[2] == 'XX' or info[2] == 'YY' or info[2] == 'ZZ' or \\\n info[2] == 'XY' or info[2] == 'XZ' or info[2] == 'YZ':\n\n aonames.append(\"%s_%iD%s\" % (info[1], dcount / 6, info[2]))\n dcount += 1\n\n lastatom = info[1]\n\n for j in range(len(info[3:])):\n mocoeffs[j+k, i] = float(info[3+j])\n\n line = next(inputfile)\n\n if not hasattr(self, \"aonames\"):\n self.aonames = aonames\n\n offset += 5\n self.mocoeffs.append(mocoeffs)\n\n # Atomic charges from Mulliken population analysis:\n #\n # Atom C1 C2 C3 C4 C5\n # Charge 0.00177 -0.06075 -0.05956 0.00177 -0.06075\n #\n # Atom H6 H7 H8 C9 C10\n # ...\n if line.strip() == \"Atomic charges from Mulliken population analysis:\":\n\n if not hasattr(self, 'atomcharges'):\n self.atomcharges = {}\n\n charges = []\n self.skip_line(inputfile, \"blank\")\n line = next(inputfile)\n while \"sum of atomic charges\" not in line:\n assert line.split()[0] == \"Atom\"\n line = next(inputfile)\n assert line.split()[0] == \"Charge\"\n charges.extend([float(c) for c in line.split()[1:]])\n self.skip_line(inputfile, \"blank\")\n line = next(inputfile)\n\n self.atomcharges['mulliken'] = charges\n\n if (line[2:6] == \"olap\") or (line.strip() == \"overlap matrix:\"):\n\n if line[6] == \"-\":\n return\n # This was continue (in loop) before parser refactoring.\n # continue # avoid \"olap-dev\"\n self.aooverlaps = numpy.zeros((self.nbasis, self.nbasis), \"d\")\n\n for i in range(0, self.nbasis, 5):\n self.updateprogress(inputfile, \"Overlap\")\n\n self.skip_lines(inputfile, ['b', 'header'])\n\n for j in range(i, self.nbasis):\n temp = list(map(float, next(inputfile).split()[1:]))\n self.aooverlaps[j, i:(i+len(temp))] = temp\n self.aooverlaps[i:(i+len(temp)), j] = temp\n\n if line[2:24] == \"start of program geopt\":\n if not self.geoopt:\n # Need to keep only the RMS density change info\n # if this is a geooptz\n self.scftargets = [[self.scftargets[0][0]]]\n if hasattr(self, \"scfvalues\"):\n self.scfvalues[0] = [[x[0]] for x in self.scfvalues[0]]\n self.geoopt = True\n else:\n self.scftargets.append([5E-5])\n\n # Get Geometry Opt convergence information\n #\n # geometry optimization step 7\n # energy: -382.30219111487 hartrees\n # [ turning on trust-radius adjustment ]\n # ** restarting optimization from step 6 **\n #\n #\n # Level shifts adjusted to satisfy step-size constraints\n # Step size: 0.0360704\n # Cos(theta): 0.8789215\n # Final level shift: -8.6176299E-02\n #\n # energy change: 2.5819E-04 . ( 5.0000E-05 )\n # gradient maximum: 5.0947E-03 . ( 4.5000E-04 )\n # gradient rms: 1.2996E-03 . ( 3.0000E-04 )\n # displacement maximum: 1.3954E-02 . ( 1.8000E-03 )\n # displacement rms: 4.6567E-03 . ( 1.2000E-03 )\n #\n if line[2:28] == \"geometry optimization step\":\n\n if not hasattr(self, \"geovalues\"):\n self.geovalues = []\n self.geotargets = numpy.zeros(5, \"d\")\n\n gopt_step = int(line.split()[-1])\n\n energy = next(inputfile)\n blank = next(inputfile)\n\n # A quick hack for messages that show up right after the energy\n # at this point, which include:\n # ** restarting optimization from step 2 **\n # [ turning on trust-radius adjustment ]\n # as found in regression file ptnh3_2_H2O_2_2plus.out and other logfiles.\n restarting_from_1 = False\n while blank.strip():\n if blank.strip() == \"** restarting optimization from step 1 **\":\n restarting_from_1 = True\n blank = next(inputfile)\n\n # One or more blank lines, depending on content.\n line = next(inputfile)\n while not line.strip():\n line = next(inputfile)\n\n # Note that the level shift message is followed by a blank, too.\n if \"Level shifts adjusted\" in line:\n while line.strip():\n line = next(inputfile)\n line = next(inputfile)\n\n # The first optimization step does not produce an energy change, and\n # ther is also no energy change when the optimization is restarted\n # from step 1 (since step 1 had no change).\n values = []\n target_index = 0\n if (gopt_step == 1) or restarting_from_1:\n values.append(0.0)\n target_index = 1\n while line.strip():\n if len(line) > 40 and line[41] == \"(\":\n # A new geo convergence value\n values.append(float(line[26:37]))\n self.geotargets[target_index] = float(line[43:54])\n target_index += 1\n line = next(inputfile)\n self.geovalues.append(values)\n\n # IR output looks like this:\n # frequencies 72.45 113.25 176.88 183.76 267.60 312.06\n # symmetries Au Bg Au Bu Ag Bg\n # intensities 0.07 0.00 0.28 0.52 0.00 0.00\n # reduc. mass 1.90 0.74 1.06 1.42 1.19 0.85\n # force const 0.01 0.01 0.02 0.03 0.05 0.05\n # C1 X 0.00000 0.00000 0.00000 -0.05707 -0.06716 0.00000\n # C1 Y 0.00000 0.00000 0.00000 0.00909 -0.02529 0.00000\n # C1 Z 0.04792 -0.06032 -0.01192 0.00000 0.00000 0.11613\n # C2 X 0.00000 0.00000 0.00000 -0.06094 -0.04635 0.00000\n # ... etc. ...\n # This is a complete ouput, some files will not have intensities,\n # and older Jaguar versions sometimes skip the symmetries.\n if line[2:23] == \"start of program freq\":\n\n self.skip_line(inputfile, 'blank')\n\n # Version 8.3 has two blank lines here, earlier versions just one.\n line = next(inputfile)\n if not line.strip():\n line = next(inputfile)\n\n self.vibfreqs = []\n self.vibdisps = []\n self.vibrmasses = []\n forceconstants = False\n intensities = False\n while line.strip():\n if \"force const\" in line:\n forceconstants = True\n if \"intensities\" in line:\n intensities = True\n line = next(inputfile)\n\n # In older version, the last block had an extra blank line after it,\n # which could be caught. This is not true in newer version (including 8.3),\n # but in general it would be better to bound this loop more strictly.\n freqs = next(inputfile)\n while freqs.strip() and not \"imaginary frequencies\" in freqs:\n\n # Number of modes (columns printed in this block).\n nmodes = len(freqs.split())-1\n\n # Append the frequencies.\n self.vibfreqs.extend(list(map(float, freqs.split()[1:])))\n line = next(inputfile).split()\n\n # May skip symmetries (older Jaguar versions).\n if line[0] == \"symmetries\":\n if not hasattr(self, \"vibsyms\"):\n self.vibsyms = []\n self.vibsyms.extend(list(map(self.normalisesym, line[1:])))\n line = next(inputfile).split()\n if intensities:\n if not hasattr(self, \"vibirs\"):\n self.vibirs = []\n self.vibirs.extend(list(map(float, line[1:])))\n line = next(inputfile).split()\n self.vibrmasses.extend(list(map(float, line[2:])))\n if forceconstants:\n line = next(inputfile).split()\n if not hasattr(self, \"vibfconsts\"):\n self.vibfconsts = []\n self.vibfconsts.extend(list(map(float, line[2:])))\n\n # Start parsing the displacements.\n # Variable 'q' holds up to 7 lists of triplets.\n q = [[] for i in range(7)]\n for n in range(self.natom):\n # Variable 'p' holds up to 7 triplets.\n p = [[] for i in range(7)]\n for i in range(3):\n line = next(inputfile)\n disps = [float(disp) for disp in line.split()[2:]]\n for j in range(nmodes):\n p[j].append(disps[j])\n for i in range(nmodes):\n q[i].append(p[i])\n\n self.vibdisps.extend(q[:nmodes])\n\n self.skip_line(inputfile, 'blank')\n freqs = next(inputfile)\n\n # Convert new data to arrays.\n self.vibfreqs = numpy.array(self.vibfreqs, \"d\")\n self.vibdisps = numpy.array(self.vibdisps, \"d\")\n self.vibrmasses = numpy.array(self.vibrmasses, \"d\")\n if hasattr(self, \"vibirs\"):\n self.vibirs = numpy.array(self.vibirs, \"d\")\n if hasattr(self, \"vibfconsts\"):\n self.vibfconsts = numpy.array(self.vibfconsts, \"d\")\n\n # Parse excited state output (for CIS calculations).\n # Jaguar calculates only singlet states.\n if line[2:15] == \"Excited State\":\n if not hasattr(self, \"etenergies\"):\n self.etenergies = []\n if not hasattr(self, \"etoscs\"):\n self.etoscs = []\n if not hasattr(self, \"etsecs\"):\n self.etsecs = []\n self.etsyms = []\n etenergy = float(line.split()[3])\n etenergy = utils.convertor(etenergy, \"eV\", \"wavenumber\")\n self.etenergies.append(etenergy)\n\n self.skip_lines(inputfile, ['line', 'line', 'line', 'line'])\n\n line = next(inputfile)\n self.etsecs.append([])\n # Jaguar calculates only singlet states.\n self.etsyms.append('Singlet-A')\n while line.strip() != \"\":\n fromMO = int(line.split()[0])-1\n toMO = int(line.split()[2])-1\n coeff = float(line.split()[-1])\n self.etsecs[-1].append([(fromMO, 0), (toMO, 0), coeff])\n line = next(inputfile)\n # Skip 3 lines\n for i in range(4):\n line = next(inputfile)\n strength = float(line.split()[-1])\n self.etoscs.append(strength)\n\n if line[:20] == ' Total elapsed time:' \\\n or line[:18] == ' Total cpu seconds':\n self.metadata['success'] = True\n",
"# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2018, the cclib development team\n#\n# This file is part of cclib (http://cclib.github.io) and is distributed under\n# the terms of the BSD 3-Clause License.\n\n\"\"\"Test the Volume and related methods in cclib\"\"\"\n\nimport os, sys\nimport unittest\nimport numpy\n\nfrom cclib.method import volume\nfrom cclib.parser import Gaussian, Psi4\n\nsys.path.insert(1, \"..\")\n\nfrom ..test_data import getdatafile\nfrom numpy.testing import assert_allclose\n\n\nclass VolumeTest(unittest.TestCase):\n def test_scinotation(self):\n \"\"\"Does the scientific notation writer work as expected?\"\"\"\n\n self.assertEqual(volume.scinotation(1.0 / 654), \" 1.52905E-03\")\n self.assertEqual(volume.scinotation(-1.0 / 654), \"-1.52905E-03\")\n\n def test_wavefunction(self):\n \"\"\"Does the volume occupied by the HOMO integrate to the correct\n values?\n \"\"\"\n\n data_basis, _ = getdatafile(Gaussian, \"basicGaussian09\", [\"dvb_sp.out\"])\n data_sp, _ = getdatafile(Gaussian, \"basicGaussian09\", [\"dvb_sp.out\"])\n\n vol = volume.Volume((-3.0, -6.0, -2.0), (3.0, 6.0, 2.0), (0.25, 0.25, 0.25))\n\n wavefn = volume.wavefunction(data_sp, vol, data_sp.mocoeffs[0][data_sp.homos[0]])\n integral = wavefn.integrate()\n integral_square = wavefn.integrate_square()\n\n self.assertAlmostEqual(integral, 0, delta=1e-6) # not necessarily true for all wavefns\n self.assertAlmostEqual(integral_square, 1.00, delta=1e-2) # true for all wavefns\n print(integral, integral_square)\n\n def test_density(self):\n \"\"\"Does the volume occupied by the combined electron density integrate\n to the correct value?\n \"\"\"\n\n data_basis, _ = getdatafile(Gaussian, \"basicGaussian09\", [\"dvb_sp.out\"])\n data_sp, _ = getdatafile(Gaussian, \"basicGaussian09\", [\"dvb_sp.out\"])\n\n vol = volume.Volume((-3.0, -6.0, -2.0), (3.0, 6.0, 2.0), (0.25, 0.25, 0.25))\n\n frontierorbs = [data_sp.mocoeffs[0][(data_sp.homos[0] - 3) : (data_sp.homos[0] + 1)]]\n density = volume.electrondensity(data_sp, vol, frontierorbs)\n integral = density.integrate()\n\n self.assertTrue(abs(integral - 8.00) < 1e-2)\n print(\"Combined Density of 4 Frontier orbitals=\", integral)\n\n def test_cube(self):\n \"\"\"Does the cube file written out for electron density on a Cartesian grid match\n expected values?\n \"\"\"\n\n data, logfile = getdatafile(Psi4, \"basicPsi4-1.2.1\", [\"water_mp2.out\"])\n\n # Reference values were calculated using cubegen method in Psi4.\n # First six rows are information about the coordinates of the grid and comments.\n tmp = []\n\n with open(os.path.dirname(os.path.realpath(__file__)) + \"/water_mp2.cube\") as f:\n lines = f.readlines()\n for line in lines[6 : len(lines)]:\n tmp.extend(line.split())\n tmp = numpy.asanyarray(tmp, dtype=float)\n refcube = numpy.resize(tmp, (13, 13, 13))\n\n # Values for the grid below are constructed to match Psi4 cube file.\n vol = volume.Volume(\n (-1.587532, -1.587532, -1.356299),\n (1.58754, 1.58754, 1.81877),\n (0.26458860545, 0.26458860545, 0.26458860545),\n )\n density = volume.electrondensity(data, vol, [data.mocoeffs[0][: data.homos[0]]])\n\n assert_allclose(density.data, refcube, atol=0.5, rtol=0.1)\n\n def test_roundtrip_cube(self):\n \"\"\"Write a cube file and then read it back. Check if the volume object contains\n identical information on each grid point\"\"\"\n\n data, logfile = getdatafile(Psi4, \"basicPsi4-1.2.1\", [\"water_mp2.out\"])\n vol = volume.Volume((-1, -1, -1), (1, 1, 1), (0.4, 0.4, 0.4))\n density = volume.electrondensity(data, vol, [data.mocoeffs[0][: data.homos[0]]])\n\n density.writeascube(\"coarsewater.cube\")\n density_recovered = volume.read_from_cube(\"coarsewater.cube\")\n\n assert_allclose(density.data, density_recovered.data, rtol=0.05)\n"
] |
[
[
"numpy.array",
"numpy.zeros"
],
[
"numpy.asanyarray",
"numpy.resize",
"numpy.testing.assert_allclose"
]
] |
ssg-aero/pyvista
|
[
"5150b062cf835c6c6a44f6aefa4d53a1ad832ba3"
] |
[
"pyvista/jupyter/notebook.py"
] |
[
"\"\"\"\nSupport dynamic or static jupyter notebook plotting.\n\nIncludes:\n\n* ``ipyvtklink``\n* ``panel``\n* ``pythreejs``\n* ``ipygany``\n\n\"\"\"\nimport warnings\nimport os\n\nimport numpy as np\n\n# This module should not be imported at the __init__ level, only as a\n# lazy import when trying to plot using jupyter notebooks\ntry:\n import IPython\n from IPython import display\nexcept ImportError: # pragma: no cover\n raise ImportError('Install IPython to display an image in a notebook')\n\nfrom pyvista import _vtk\n\nPANEL_EXTENSION_SET = [False]\n\n\ndef handle_plotter(plotter, backend=None, screenshot=None,\n return_viewer=False, **kwargs):\n \"\"\"Show the ``pyvista`` plot in a jupyter environment.\n\n Parameters\n ----------\n return_viewer : bool, optional\n Return the jupyterlab viewer, scene, or display object\n when plotting with jupyter notebook.\n\n Returns\n -------\n IPython Widget\n IPython widget when ``return_viewer==True``. Otherwise, ``None``.\n\n \"\"\"\n if screenshot is False:\n screenshot = None\n\n try:\n if backend == 'pythreejs':\n return show_pythreejs(plotter, return_viewer, **kwargs)\n if backend == 'ipyvtklink':\n return show_ipyvtk(plotter, return_viewer)\n if backend == 'panel':\n return show_panel(plotter, return_viewer)\n if backend == 'ipygany':\n from pyvista.jupyter.pv_ipygany import show_ipygany\n return show_ipygany(plotter, return_viewer, **kwargs)\n except ImportError as e:\n warnings.warn(f'Failed to use notebook backend: \\n\\n{e}\\n\\n'\n 'Falling back to a static output.')\n\n return show_static_image(plotter, screenshot, return_viewer)\n\n\ndef show_static_image(plotter, screenshot, return_viewer):\n \"\"\"Display a static image to be displayed within a jupyter notebook.\"\"\"\n import PIL.Image\n\n if plotter.last_image is None:\n # Must render here, otherwise plotter will segfault.\n plotter.render()\n plotter.last_image = plotter.screenshot(screenshot, return_img=True)\n image = PIL.Image.fromarray(plotter.last_image)\n\n # close plotter as this will be a static image and there is no\n # point to keeping the plotter around.\n plotter.close()\n\n # Simply display the result: either ipyvtklink object or image display\n if return_viewer:\n return image\n display.display(image)\n\n\ndef show_ipyvtk(plotter, return_viewer):\n \"\"\"Display an interactive viewer widget using ``ipyvtklink``.\"\"\"\n if any('SPYDER' in name for name in os.environ):\n warnings.warn('``use_ipyvtk`` is incompatible with Spyder.\\n'\n 'Use notebook=False for interactive '\n 'plotting within spyder or disable it globally with:\\n'\n 'pyvista.set_jupyter_backend(None)')\n\n try:\n from ipyvtklink.viewer import ViewInteractiveWidget\n except ImportError: # pragma: no cover\n raise ImportError('Please install `ipyvtklink` to use this feature: '\n 'https://github.com/Kitware/ipyvtklink')\n\n # Have to leave the Plotter open for the widget to use\n disp = ViewInteractiveWidget(plotter.ren_win, on_close=plotter.close,\n transparent_background=plotter.image_transparent_background)\n\n for renderer in plotter.renderers:\n renderer.AddObserver(_vtk.vtkCommand.ModifiedEvent, lambda *args: disp.update_canvas())\n\n if return_viewer:\n return disp\n display.display_html(disp)\n\n\ndef show_panel(plotter, return_viewer):\n \"\"\"Take the active renderer(s) from a plotter and show them using ``panel``.\"\"\"\n try:\n import panel as pn\n except ImportError: # pragma: no cover\n raise ImportError('Install ``panel`` to use this feature')\n\n # check if panel extension has been set\n if not PANEL_EXTENSION_SET[0]:\n pn.extension('vtk')\n PANEL_EXTENSION_SET[0] = True\n\n # only set window size if explicitly set within the plotter\n sizing = {}\n if not plotter._window_size_unset:\n width, height = plotter.window_size\n sizing = {'width': width,\n 'height': height}\n\n axes_enabled = plotter.renderer.axes_enabled\n pan = pn.panel(plotter.ren_win,\n sizing_mode='stretch_width',\n orientation_widget=axes_enabled,\n enable_keybindings=False, **sizing)\n\n # if plotter.renderer.axes_enabled:\n # pan.axes = build_panel_axes()\n\n if hasattr(plotter.renderer, 'cube_axes_actor'):\n pan.axes = build_panel_bounds(plotter.renderer.cube_axes_actor)\n\n if return_viewer:\n return pan\n display.display_html(pan)\n\n\ndef build_panel_bounds(actor):\n \"\"\"Build a panel bounds actor using the plotter cube_axes_actor.\"\"\"\n bounds = {}\n\n n_ticks = 5\n if actor.GetXAxisVisibility():\n xmin, xmax = actor.GetXRange()\n bounds['xticker'] = {'ticks': np.linspace(xmin, xmax, n_ticks)}\n\n if actor.GetYAxisVisibility():\n ymin, ymax = actor.GetYRange()\n bounds['yticker'] = {'ticks': np.linspace(ymin, ymax, n_ticks)}\n\n if actor.GetZAxisVisibility():\n zmin, zmax = actor.GetZRange()\n bounds['zticker'] = {'ticks': np.linspace(zmin, zmax, n_ticks)}\n\n bounds['origin'] = [xmin, ymin, zmin]\n bounds['grid_opacity'] = 0.5\n bounds['show_grid'] = True\n bounds['digits'] = 3\n bounds['fontsize'] = actor.GetLabelTextProperty(0).GetFontSize()\n\n return bounds\n\n\ndef show_pythreejs(plotter, return_viewer, **kwargs):\n \"\"\"Show a pyvista plotting scene using pythreejs.\"\"\"\n from .pv_pythreejs import convert_plotter\n renderer = convert_plotter(plotter)\n if return_viewer:\n return renderer\n display.display_html(renderer)\n\n"
] |
[
[
"numpy.linspace"
]
] |
pgabriela/dqn-jitsi-autoscaler
|
[
"b39eb335e584095ef66a9941dbe0b2ea21a02d4a"
] |
[
"simulation/dqn-simulation/r11/dqn-delta-test.py"
] |
[
"import math\nimport random\nimport time\nimport calendar\nimport os\nimport pickle\nimport requests\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom collections import namedtuple\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\n\n\n#############\n# Q-Network #\n#############\nclass DQN(nn.Module):\n def __init__(self, num_in_features, num_out_features):\n super(DQN, self).__init__()\n self.linear1 = nn.Linear(num_in_features, 32)\n self.ln1 = nn.LayerNorm(32)\n self.linear2 = nn.Linear(32, 64)\n self.ln2 = nn.LayerNorm(64)\n self.linear3 = nn.Linear(64, 64)\n self.ln3 = nn.LayerNorm(64)\n self.linear4 = nn.Linear(64, 32)\n self.ln4 = nn.LayerNorm(32)\n self.out_layer = nn.Linear(32, num_out_features)\n\n\n # Called with either one element to determine next action, or a batch\n # during optimization. Returns tensor([[add_class,remove_class, maintain_class]]).\n def forward(self, x):\n x = F.leaky_relu(self.ln1(self.linear1(x)))\n x = F.leaky_relu(self.ln2(self.linear2(x)))\n x = F.leaky_relu(self.ln3(self.linear3(x)))\n x = F.leaky_relu(self.ln4(self.linear4(x)))\n return self.out_layer(x)\n\n\n\n###############################\n# Hyperparameters & Utilities #\n###############################\n# if gpu is to be used\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndf = pd.read_csv('../dbV3.csv')\ntimeseries = pd.read_csv('../timeseries.csv')\nMIN_JVB_NUM = 1\nMAX_JVB_NUM = 50\nW1 = 3000\nACTION_COOLDOWN = 15\nLOOKBACK = 5\n\n# Q-Network paramteres\nN_FEATURES = 5\nN_ACTIONS = 3\n\n# Initialize\ncurr_time = time.time()\nprint(\"Loading Pre-trained model ...\")\npolicy_net = DQN(N_FEATURES, N_ACTIONS).to(device)\npolicy_net.load_state_dict(torch.load(\"parameters\"))\nprint(f\"Pre-trained model loaded in {time.time() - curr_time} seconds\")\n\n# define reward function\ndef calc_reward(state, action):\n curr_loss = state[0][4].item()\n if action == 0:\n jvb_num_delta = 1\n elif action == 1:\n jvb_num_delta = -1\n else:\n jvb_num_delta = 0\n reward = math.exp(-W1 * curr_loss) - 1e-2 * jvb_num_delta\n return reward\n\n# Loss approximation\ndef loss_from_nearest_points(c, p, tj, ij):\n PARTITIONS = 3\n losses = []\n #conf_partitions = [0, 1, 2, 3]\n part_partitions = [1, 5, 9, 13]\n tj_partitions = [1, 3, 5, 7]\n ij_partitions = [0, 2, 4, 7]\n\n for i in range(PARTITIONS):\n #curr_c = conf_partitions[i]\n #d = df[df['conferences'] == curr_c]\n flag = True\n for curr_p in range(part_partitions[i], part_partitions[i+1]):\n if not flag:\n break\n d1 = df[df['participants'] == curr_p]\n for curr_tj in range(tj_partitions[i], tj_partitions[i+1]):\n if not flag:\n break\n d2 = d1[d1['jvb_num'] == curr_tj]\n for curr_ij in range(ij_partitions[i], ij_partitions[i+1]):\n d3 = d2[d2['zero_conf'] == curr_ij]\n if len(d3) > 0:\n loss = d3['loss'].mean()\n participants_scale = p / curr_p\n curr_active_jvb_count = curr_tj - curr_ij\n if (tj - ij) == 0 or curr_active_jvb_count == 0:\n continue\n active_jvbs_scale = (tj - ij) / curr_active_jvb_count\n loss_scale = participants_scale / active_jvbs_scale\n estimated_loss = loss * loss_scale\n losses.append(estimated_loss)\n flag = False\n break\n\n return np.mean(losses)\n\n\n##############\n# Simulation #\n##############\ncurr_time = time.time()\nprint(\"Starting simulation...\")\n\n# list of [jvb id, conference count] pair of currently running JVBs\n# selected with round-robin, removed with graceful shutdown\ncurr_jvbs = [[0, 0], ]\nis_shutting_down = []\nprev_state = np.array([0, 1, 1, 0])\nlatest_losses = []\njvb_num_history = []\nidle_jvb_history = []\nrewards_history = []\nlosses_history = []\nlosses_dict = pickle.load(open('../losses_dict.pkl', 'rb'))\n\nconf_count_over_time = timeseries['conference_count']\npart_count_over_time = timeseries['participant_count']\n\nfor i in range(len(conf_count_over_time)):\n c1 = int(conf_count_over_time[i])\n p1 = int(part_count_over_time[i])\n\n # update conferences\n try:\n new_c = c1 - int(conf_count_over_time[i-1])\n except:\n new_c = c1\n if new_c > 0:\n # assign conferences\n for c in range(new_c):\n jvb_conferences = [x[1] if x[0] not in is_shutting_down else 1e10 for x in curr_jvbs]\n least_loaded_idx = np.argmin(jvb_conferences)\n curr_jvbs[least_loaded_idx][1] += 1\n elif new_c < 0:\n # remove conferences\n for c in range(abs(new_c)):\n for j in curr_jvbs:\n if j[1] > 0:\n j[1] -= 1\n break\n\n # update jvbs (check shutting down jvbs)\n for idx in range(len(is_shutting_down) - 1, -1, -1):\n for j in curr_jvbs:\n if j[0] == is_shutting_down[idx] and j[1] == 0:\n curr_jvbs.remove(j)\n is_shutting_down.pop(idx)\n break\n\n j1 = len(curr_jvbs)\n jvb_num_history.append(j1)\n z1 = len(list(filter(lambda x: x[1] == 0, curr_jvbs)))\n idle_jvb_history.append(z1)\n avg_loss = losses_dict.get(c1, {}).get(p1, {}).get(j1, {}).get(z1, -1)\n if avg_loss == -1:\n avg_loss = df[\n (df['conferences'] == c1)\n & (df['participants'] == p1)\n & (df['jvb_num'] == j1)\n & (df['zero_conf'] == z1)\n ]['loss'].mean()\n if pd.isna(avg_loss):\n if c1 == 0 or p1 == 0:\n avg_loss = 0\n else:\n avg_loss = df[\n (df['conferences'] >= c1-1) & (df['conferences'] <= c1+1)\n & (df['participants'] >= p1-1) & (df['participants'] <= p1+1)\n & (df['jvb_num'] >= j1-1) & (df['jvb_num'] <= j1+1)\n & (df['zero_conf'] >= z1-1) & (df['zero_conf'] <= z1+1)\n ]['loss'].mean()\n if pd.isna(avg_loss):\n avg_loss = loss_from_nearest_points(c1, p1, j1, z1)\n losses_dict.setdefault(c1, {}).setdefault(p1, {}).setdefault(j1, {})[z1] = avg_loss\n latest_losses.append(avg_loss)\n losses_history.append(avg_loss)\n\n assert j1 <= MAX_JVB_NUM and j1 >= MIN_JVB_NUM\n assert z1 <= MAX_JVB_NUM and j1 >= 0\n assert z1 <= j1\n\n if (i+1) % ACTION_COOLDOWN == 0:\n # Cooldown finished, Agent act\n l1 = np.mean(latest_losses[-LOOKBACK:])\n latest_losses = []\n curr_state = np.array([p1, j1, z1, l1])\n\n state = curr_state - prev_state\n \n p_delta = state[0]\n j_delta = state[1]\n z_delta = state[2]\n l_delta = state[3]\n state = [[p_delta, j_delta, z_delta, l_delta, l1]]\n state = torch.tensor(state, dtype=torch.float)\n\n # select action\n with torch.no_grad():\n # t.max(1) will return largest column value of each row.\n # second column on max result is index of where max element was\n # found, so we pick action with the larger expected reward.\n policy_net.eval()\n curr_action = policy_net(state).max(1)[1].view(1, 1).item()\n\n # apply action\n if curr_action == 0:\n # 'Add' class\n if len(curr_jvbs) < MAX_JVB_NUM:\n curr_jvbs.append([i+1, 0])\n elif curr_action == 1:\n # 'Remove' class\n if len(curr_jvbs) - len(is_shutting_down) > MIN_JVB_NUM:\n jvb_pair = None\n for j in curr_jvbs:\n if j[1] == 0:\n jvb_pair = j\n break\n if jvb_pair:\n curr_jvbs.remove(jvb_pair)\n else:\n if curr_jvbs[0][0] not in is_shutting_down:\n is_shutting_down.append(curr_jvbs[0][0])\n else:\n # 'Maintain' class\n pass\n \n # calculate reward\n reward = calc_reward(state, curr_action)\n rewards_history.append(reward)\n\n # Save current state & action\n prev_state = curr_state\n\n if (i+1) % 500 == 0:\n print(f\"Timesteps passed: {i+1}\", end=\"\\r\")\nprint(f\"\\nSimulation finished in {time.time() - curr_time} seconds\")\n\n\n#################\n# Visualization #\n#################\nplt.figure(figsize=(16, 9))\nplt.subplot(511)\nplt.title(\"Conferences\")\nplt.plot(np.arange(len(conf_count_over_time)), conf_count_over_time)\nplt.subplot(512)\nplt.title(\"Participants\")\nplt.plot(np.arange(len(part_count_over_time)), part_count_over_time)\nax = plt.subplot(513)\nplt.title(\"JVB Count\")\nplt.plot(np.arange(len(jvb_num_history)), jvb_num_history)\nplt.text(0.95, 0.95, 'Total JVB Count = ' + str(sum(jvb_num_history)),\n horizontalalignment='right',\n verticalalignment='top',\n transform=ax.transAxes)\nplt.text(0.95, 0.85, 'Average JVB Count = ' + str(np.mean(jvb_num_history)),\n horizontalalignment='right',\n verticalalignment='top',\n transform=ax.transAxes)\npickle.dump(jvb_num_history, open('jvb_num_history.pkl', 'wb'))\npickle.dump(idle_jvb_history, open('idle_jvb_history.pkl', 'wb'))\nax = plt.subplot(514)\nplt.title(\"Rewards\")\nplt.plot(np.arange(len(rewards_history)), rewards_history)\nplt.text(0.95, 0.95, 'Total Reward = ' + str(sum(rewards_history)),\n horizontalalignment='right',\n verticalalignment='top',\n transform=ax.transAxes)\nplt.text(0.95, 0.85, 'Average Reward = ' + str(np.mean(rewards_history)),\n horizontalalignment='right',\n verticalalignment='top',\n transform=ax.transAxes)\nax = plt.subplot(515)\nplt.title(\"Losses\")\nplt.plot(np.arange(len(losses_history)), losses_history)\nplt.text(0.95, 0.95, 'Total Loss = ' + str(sum(losses_history)),\n horizontalalignment='right',\n verticalalignment='top',\n transform=ax.transAxes)\nplt.text(0.95, 0.85, 'Average Loss = ' + str(np.mean(losses_history)),\n horizontalalignment='right',\n verticalalignment='top',\n transform=ax.transAxes)\npickle.dump(losses_history, open('losses_history.pkl', 'wb'))\nplt.show()\n"
] |
[
[
"pandas.read_csv",
"matplotlib.pyplot.title",
"torch.load",
"torch.nn.LayerNorm",
"torch.tensor",
"torch.nn.Linear",
"matplotlib.pyplot.subplot",
"numpy.mean",
"numpy.argmin",
"torch.cuda.is_available",
"torch.no_grad",
"pandas.isna",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
sx14/open-relation.pytorch
|
[
"3fe52a0c6129a80abbc84df53903d13b7dea05d6",
"d9ed2f0c3394e435374cf3ab5afeb47a6a56ed9a"
] |
[
"lib/model/rpn/rpn.py",
"open_relation/dataset/vrd/process/reformat_anno.py"
] |
[
"# coding: utf-8\n\nfrom __future__ import absolute_import\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom lib.model.utils.config import cfg\nfrom .proposal_layer import _ProposalLayer\nfrom .anchor_target_layer import _AnchorTargetLayer\nfrom lib.model.utils.net_utils import _smooth_l1_loss\n\nimport numpy as np\nimport math\nimport pdb\nimport time\n\nclass _RPN(nn.Module):\n \"\"\" region proposal network \"\"\"\n def __init__(self, din):\n super(_RPN, self).__init__()\n \n self.din = din # get depth of input feature map, e.g., 512\n self.anchor_scales = cfg.ANCHOR_SCALES\n self.anchor_ratios = cfg.ANCHOR_RATIOS\n self.feat_stride = cfg.FEAT_STRIDE[0]\n\n # define the convrelu layers processing input feature map\n self.RPN_Conv = nn.Conv2d(self.din, 512, 3, 1, 1, bias=True)\n\n # 前景/背景 得分向量\n # ATTENTION: 二维特征图上每一个点均对应 2 * 9个得分\n # define bg/fg classifcation score layer\n self.nc_score_out = len(self.anchor_scales) * len(self.anchor_ratios) * 2 # 2(bg/fg) * 9 (anchors)\n self.RPN_cls_score = nn.Conv2d(512, self.nc_score_out, 1, 1, 0)\n\n # box 偏移量\n # define anchor box offset prediction layer\n self.nc_bbox_out = len(self.anchor_scales) * len(self.anchor_ratios) * 4 # 4(coords) * 9 (anchors)\n self.RPN_bbox_pred = nn.Conv2d(512, self.nc_bbox_out, 1, 1, 0)\n\n # define proposal layer\n self.RPN_proposal = _ProposalLayer(self.feat_stride, self.anchor_scales, self.anchor_ratios)\n\n # define anchor target layer\n self.RPN_anchor_target = _AnchorTargetLayer(self.feat_stride, self.anchor_scales, self.anchor_ratios)\n\n self.rpn_loss_cls = 0\n self.rpn_loss_box = 0\n\n @staticmethod\n def reshape(x, d):\n input_shape = x.size()\n x = x.view(\n input_shape[0],\n int(d),\n int(float(input_shape[1] * input_shape[2]) / float(d)),\n input_shape[3]\n )\n return x\n\n def forward(self, base_feat, im_info, gt_boxes, num_boxes):\n\n batch_size = base_feat.size(0)\n\n # return feature map after convrelu layer\n rpn_conv1 = F.relu(self.RPN_Conv(base_feat), inplace=True)\n # get rpn classification score\n rpn_cls_score = self.RPN_cls_score(rpn_conv1)\n\n rpn_cls_score_reshape = self.reshape(rpn_cls_score, 2)\n rpn_cls_prob_reshape = F.softmax(rpn_cls_score_reshape, 1)\n rpn_cls_prob = self.reshape(rpn_cls_prob_reshape, self.nc_score_out)\n\n # get rpn offsets to the anchor boxes\n rpn_bbox_pred = self.RPN_bbox_pred(rpn_conv1)\n\n # proposal layer\n cfg_key = 'TRAIN' if self.training else 'TEST'\n\n rois = self.RPN_proposal((rpn_cls_prob.data, rpn_bbox_pred.data,\n im_info, cfg_key))\n\n self.rpn_loss_cls = 0\n self.rpn_loss_box = 0\n\n # generating training labels and build the rpn loss\n if self.training:\n assert gt_boxes is not None\n\n rpn_data = self.RPN_anchor_target((rpn_cls_score.data, gt_boxes, im_info, num_boxes))\n\n # compute classification loss\n rpn_cls_score = rpn_cls_score_reshape.permute(0, 2, 3, 1).contiguous().view(batch_size, -1, 2)\n rpn_label = rpn_data[0].view(batch_size, -1)\n\n rpn_keep = Variable(rpn_label.view(-1).ne(-1).nonzero().view(-1))\n rpn_cls_score = torch.index_select(rpn_cls_score.view(-1,2), 0, rpn_keep)\n rpn_label = torch.index_select(rpn_label.view(-1), 0, rpn_keep.data)\n rpn_label = Variable(rpn_label.long())\n self.rpn_loss_cls = F.cross_entropy(rpn_cls_score, rpn_label)\n fg_cnt = torch.sum(rpn_label.data.ne(0))\n\n rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = rpn_data[1:]\n\n # compute bbox regression loss\n rpn_bbox_inside_weights = Variable(rpn_bbox_inside_weights)\n rpn_bbox_outside_weights = Variable(rpn_bbox_outside_weights)\n rpn_bbox_targets = Variable(rpn_bbox_targets)\n\n self.rpn_loss_box = _smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights,\n rpn_bbox_outside_weights, sigma=3, dim=[1,2,3])\n\n return rois, self.rpn_loss_cls, self.rpn_loss_box\n",
"import os\nimport json\nimport numpy as np\nfrom open_relation.dataset.dataset_config import DatasetConfig\n\n\ndef load_list(list_path):\n if not os.path.exists(list_path):\n print('\"%s\" not exists.' % list_path)\n with open(list_path, 'r') as f:\n index2label = f.readlines()\n for i in range(len(index2label)):\n index2label[i] = index2label[i].strip()\n return index2label\n\n\ndef rlt_reformat(rlt_anno, obj_ind2label, pre_ind2label):\n\n def obj_reformat(obj_anno, obj_ind2label):\n obj = dict()\n obj['name'] = obj_ind2label[int(obj_anno['category'])]\n obj['ymin'] = int(obj_anno['bbox'][0])\n obj['ymax'] = int(obj_anno['bbox'][1])\n obj['xmin'] = int(obj_anno['bbox'][2])\n obj['xmax'] = int(obj_anno['bbox'][3])\n return obj\n\n sbj_anno = rlt_anno['subject']\n obj_anno = rlt_anno['object']\n sbj = obj_reformat(sbj_anno, obj_ind2label)\n obj = obj_reformat(obj_anno, obj_ind2label)\n pre = dict()\n pre['name'] = pre_ind2label[int(rlt_anno['predicate'])]\n # predicate box is union of obj box and sbj box\n pre['ymin'] = min(obj['ymin'], sbj['ymin'])\n pre['ymax'] = max(obj['ymax'], sbj['ymax'])\n pre['xmin'] = min(obj['xmin'], sbj['xmin'])\n pre['xmax'] = max(obj['xmax'], sbj['xmax'])\n new_rlt = dict()\n new_rlt['object'] = obj\n new_rlt['subject'] = sbj\n new_rlt['predicate'] = pre\n return new_rlt\n\n\n\ndef reformat_anno():\n dataset_config = DatasetConfig('vrd')\n org_anno_root = dataset_config.data_config['dirty_anno_root']\n dst_anno_root = dataset_config.data_config['clean_anno_root']\n\n # load vrd label list\n obj_label_list_path = os.path.join(dataset_config.dataset_root, 'object_labels.txt')\n obj_ind2label = load_list(obj_label_list_path)\n\n pre_label_list_path = os.path.join(dataset_config.dataset_root, 'predicate_labels.txt')\n pre_ind2label = load_list(pre_label_list_path)\n\n # all dirty annotation files\n anno_list = os.listdir(org_anno_root)\n for i, anno_name in enumerate(anno_list):\n print('processing [%d/%d]' % (len(anno_list), i+1))\n\n org_anno_path = os.path.join(org_anno_root, anno_name)\n org_anno = json.load(open(org_anno_path, 'r'))\n\n # for removing redundant objects from predicate\n obj_label_boxes = []\n\n # clean anno collection\n rlts = []\n for rlt in org_anno:\n # convert predicate anno\n new_rlt = rlt_reformat(rlt, obj_ind2label, pre_ind2label)\n rlts.append(new_rlt)\n\n obj_sbj = [rlt['object'], rlt['subject']]\n for obj in obj_sbj:\n # left top, right bottom\n # ymin, ymax, xmin, xmax, category\n label_box = obj['bbox']\n label_box.append(obj['category'])\n obj_label_boxes.append(label_box)\n\n objs = []\n # remove redundant objects\n if len(obj_label_boxes) > 0:\n obj_label_boxes = np.array(obj_label_boxes)\n unique_label_boxes = np.unique(obj_label_boxes, axis=0)\n for label_box in unique_label_boxes:\n obj = dict()\n obj['name'] = obj_ind2label[int(label_box[4])].strip()\n obj['ymin'] = int(label_box[0])\n obj['ymax'] = int(label_box[1])\n obj['xmin'] = int(label_box[2])\n obj['xmax'] = int(label_box[3])\n objs.append(obj)\n\n new_anno = dict()\n new_anno['objects'] = objs\n new_anno['relations'] = rlts\n save_path = os.path.join(dst_anno_root, anno_name)\n json.dump(new_anno, open(save_path, 'w'))\n"
] |
[
[
"torch.nn.functional.cross_entropy",
"torch.nn.functional.softmax",
"torch.nn.Conv2d",
"torch.autograd.Variable"
],
[
"numpy.array",
"numpy.unique"
]
] |
mpetroff/accessible-color-cycles
|
[
"473ad74eab8fb8926ea8ea15240f9ed5784e73bd"
] |
[
"aesthetic-models/numpy-version/convert_weights_to_numpy.py"
] |
[
"#!/usr/bin/env python3\n\n\"\"\"\nCopyright (c) 2021 Matthew Petroff\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport gzip\nimport os\nimport shutil\nimport numpy as np\nimport h5py\n\nENSEMBLE_COUNT = 100\n\n# Set model\nweights = {\"ensemble_count\": ENSEMBLE_COUNT}\nfor i in range(ENSEMBLE_COUNT):\n with gzip.open(f\"../weights/set_model_weights_{i:03d}.h5.gz\", \"rb\") as infile:\n weight_file = h5py.File(infile, \"r\")\n for key in [\"l1j\", \"l2j\", \"l1a\", \"l2a\", \"l1b\", \"l2b\"]:\n kernel = np.array(weight_file[key][key][\"kernel:0\"])\n bias = np.array(weight_file[key][key][\"bias:0\"])\n weights[key[1:] + f\"_{i:03d}_kernel\"] = kernel\n weights[key[1:] + f\"_{i:03d}_bias\"] = bias\n for key in [\"l3j\", \"l4j\", \"l5j\", \"l3a\", \"l4a\", \"l5a\", \"l3b\", \"l4b\", \"l5b\"]:\n depthwise_kernel = np.array(weight_file[key][key][\"depthwise_kernel:0\"])\n pointwise_kernel = np.array(weight_file[key][key][\"pointwise_kernel:0\"])\n bias = np.array(weight_file[key][key][\"bias:0\"])\n\n # Adjust format\n depthwise_kernel = np.flipud(depthwise_kernel)\n depthwise_kernel = np.transpose(depthwise_kernel, (1, 2, 0))\n pointwise_kernel = np.flipud(pointwise_kernel)\n pointwise_kernel = np.transpose(pointwise_kernel, (1, 2, 0))\n bias = np.expand_dims(bias, 0).T\n\n weights[key[1:] + f\"_{i:03d}_depthwise_kernel\"] = depthwise_kernel\n weights[key[1:] + f\"_{i:03d}_pointwise_kernel\"] = pointwise_kernel\n weights[key[1:] + f\"_{i:03d}_bias\"] = bias\n\nnpz_filename = \"set_model_weights.npz\"\nnp.savez(npz_filename, **weights)\n# Recompressing the NPZ file reduces its file size considerably since it contains\n# many small arrays, which are otherwise only compressed separately.\nwith open(npz_filename, \"rb\") as infile:\n with gzip.open(npz_filename + \".gz\", \"wb\") as outfile:\n shutil.copyfileobj(infile, outfile)\nos.remove(npz_filename)\n\n\n# Cycle model\nweights = {\"ensemble_count\": ENSEMBLE_COUNT}\nfor i in range(ENSEMBLE_COUNT):\n with gzip.open(f\"../weights/cycle_model_weights_{i:03d}.h5.gz\", \"rb\") as infile:\n weight_file = h5py.File(infile, \"r\")\n for key in [\"l1\", \"l2\"]:\n kernel = np.array(weight_file[key][key][\"kernel:0\"])\n bias = np.array(weight_file[key][key][\"bias:0\"])\n weights[key[1:] + f\"_{i:03d}_kernel\"] = kernel\n weights[key[1:] + f\"_{i:03d}_bias\"] = bias\n for key in [\"l3\", \"l4\", \"l5\"]:\n depthwise_kernel = np.array(weight_file[key][key][\"depthwise_kernel:0\"])\n pointwise_kernel = np.array(weight_file[key][key][\"pointwise_kernel:0\"])\n bias = np.array(weight_file[key][key][\"bias:0\"])\n\n # Adjust format\n depthwise_kernel = np.flipud(depthwise_kernel)\n depthwise_kernel = np.transpose(depthwise_kernel, (1, 2, 0))\n pointwise_kernel = np.flipud(pointwise_kernel)\n pointwise_kernel = np.transpose(pointwise_kernel, (1, 2, 0))\n bias = np.expand_dims(bias, 0).T\n\n weights[key[1:] + f\"_{i:03d}_depthwise_kernel\"] = depthwise_kernel\n weights[key[1:] + f\"_{i:03d}_pointwise_kernel\"] = pointwise_kernel\n weights[key[1:] + f\"_{i:03d}_bias\"] = bias\n\nnpz_filename = \"cycle_model_weights.npz\"\nnp.savez(npz_filename, **weights)\n# Recompressing the NPZ file reduces its file size considerably since it contains\n# many small arrays, which are otherwise only compressed separately.\nwith open(npz_filename, \"rb\") as infile:\n with gzip.open(npz_filename + \".gz\", \"wb\") as outfile:\n shutil.copyfileobj(infile, outfile)\nos.remove(npz_filename)\n"
] |
[
[
"numpy.savez",
"numpy.expand_dims",
"numpy.flipud",
"numpy.transpose",
"numpy.array"
]
] |
XeBoris/strax
|
[
"4157f6b3fc5732d44ecd5e0f83d92d90187997d9"
] |
[
"strax/io.py"
] |
[
"\"\"\"Read/write numpy arrays to/from compressed files or file-like objects\n\"\"\"\nfrom functools import partial\nimport bz2\nimport os\n\nimport numpy as np\nimport blosc\nimport zstd\n\nimport strax\nexport, __all__ = strax.exporter()\n\nblosc.set_releasegil(True)\n\n\nCOMPRESSORS = dict(\n bz2=dict(\n compress=bz2.compress,\n decompress=bz2.decompress),\n zstd=dict(\n compress=zstd.compress,\n decompress=zstd.decompress),\n blosc=dict(\n compress=partial(blosc.compress, shuffle=False),\n decompress=blosc.decompress),\n)\n\n\n@export\ndef load_file(f, compressor, dtype):\n \"\"\"Read and return data from file\n\n :param f: file name or handle to read from\n :param compressor: compressor to use for decompressing. If not passed,\n will try to load it from json metadata file.\n :param dtype: numpy dtype of data to load\n \"\"\"\n if isinstance(f, str):\n with open(f, mode='rb') as f:\n return _load_file(f, compressor, dtype)\n else:\n return _load_file(f, compressor, dtype)\n\n\ndef _load_file(f, compressor, dtype):\n try:\n data = f.read()\n if not len(data):\n return np.zeros(0, dtype=dtype)\n\n data = COMPRESSORS[compressor]['decompress'](data)\n return np.frombuffer(data, dtype=dtype)\n\n except Exception:\n raise strax.DataCorrupted(\n f\"Fatal Error while reading file {f}: \"\n + strax.utils.formatted_exception())\n\n\n@export\ndef save_file(f, data, compressor='zstd'):\n \"\"\"Save data to file and return number of bytes written\n\n :param f: file name or handle to save to\n :param data: data (numpy array) to save\n :param compressor: compressor to use\n \"\"\"\n if isinstance(f, str):\n final_fn = f\n temp_fn = f + '_temp'\n with open(temp_fn, mode='wb') as f:\n result = _save_file(f, data, compressor)\n os.rename(temp_fn, final_fn)\n return result\n else:\n return _save_file(f, data, compressor)\n\n\ndef _save_file(f, data, compressor='zstd'):\n assert isinstance(data, np.ndarray), \"Please pass a numpy array\"\n d_comp = COMPRESSORS[compressor]['compress'](data)\n f.write(d_comp)\n return len(d_comp)\n"
] |
[
[
"numpy.frombuffer",
"numpy.zeros"
]
] |
TanweerulHaque/hermes
|
[
"d24770d7f8b5a7d84f98795079360b5264862297"
] |
[
"hermes/custom/FC.py"
] |
[
"import tensorflow as tf\nimport pandas as pd\nfrom collections import defaultdict\n\n\nclass FileCallback(tf.keras.callbacks.Callback):\n def __init__(self, save_format=\"\", filepath=\"\"):\n super(FileCallback, self).__init__()\n self.save_format = save_format\n self.dfparams = defaultdict(list)\n self.filepath = filepath\n\n def on_epoch_end(self, epoch, logs=None):\n keys = list(logs.keys())\n for i in keys:\n self.dfparams[i].append(logs[i])\n\n def on_train_end(self, logs=None):\n df = pd.DataFrame(self.dfparams)\n s = f\"\"\"df.to_{self.save_format}('hardcoded.csv')\"\"\"\n eval(s)\n"
] |
[
[
"pandas.DataFrame"
]
] |
nnthanh101/realtime-fraud-detection-with-gnn-on-dgl
|
[
"520747ac5f4d760cd614543f45417e885509d15e"
] |
[
"src/sagemaker/FD_SL_DGL/code/fd_sl_deployment_entry_point.py"
] |
[
"#-*- coding:utf-8 -*-\n\n# Author:james Zhang\n# Datetime: Jan-10th 2021 19:10\n# Project: GCR Fraud_detection_on_DGL Solution\n\"\"\"\n This is the entry point of SageMaker inference endpoint, it fulfills:\n 1. Receive request and parse out the heterogeneous graph and target node\n 2. But a DGL heterogeneous graph\n 3. Use the RGCN model to perform inference\n 4. Send the score back to requesters\n\"\"\"\n\n\nimport os\nimport json\nimport dgl\nfrom datetime import datetime as dt\nimport pickle\n\nimport torch as th\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport dgl.function as fn\nimport numpy as np\n\nINPUT_SIZE = 390\nHIDDEN_SIZE = int(os.getenv('HIDDEN_SIZE', '16'))\nN_LAYERS = 2\nOUT_SIZE = 2\nEMBEDDING_SIZE = 390\nBASE_PATH = '/opt/ml/model/code/'\nTARGET_FEAT_MEAN = None\nTARGET_FEAT_STD = None\n\n\ndef load_train_graph_info(file_path):\n\n with open(file_path, 'rb') as f:\n info_dict = pickle.load(f)\n\n etypes = [can_etype for src_type, can_etype, dst_type in info_dict['etypes']]\n ntype_dict = info_dict['ntype_cnt']\n\n global TARGET_FEAT_MEAN\n TARGET_FEAT_MEAN = info_dict['feat_mean']\n global TARGET_FEAT_STD\n TARGET_FEAT_STD = info_dict['feat_std']\n\n return etypes, ntype_dict\n\n\n# Initialize model construction arguments\ndef initialize_arguments(metadata_file):\n\n etypes, ntype_dict = load_train_graph_info(metadata_file)\n\n input_size = INPUT_SIZE\n hidden_size = HIDDEN_SIZE\n n_layers = N_LAYERS\n out_size = OUT_SIZE\n embedding_size = EMBEDDING_SIZE\n\n return ntype_dict, etypes, input_size, hidden_size, out_size, n_layers, embedding_size\n\n\n# RGCN models\nclass HeteroRGCNLayer(nn.Module):\n def __init__(self, in_size, out_size, etypes):\n super(HeteroRGCNLayer, self).__init__()\n # W_r for each relation\n self.weight = nn.ModuleDict({\n name: nn.Linear(in_size, out_size) for name in etypes\n })\n\n def forward(self, G, feat_dict):\n # The input is a dictionary of node features for each type\n funcs = {}\n for srctype, etype, dsttype in G.canonical_etypes:\n # Compute W_r * h\n if srctype in feat_dict:\n Wh = self.weight[etype](feat_dict[srctype])\n # Save it in graph for message passing\n G.nodes[srctype].data['Wh_%s' % etype] = Wh\n # Specify per-relation message passing functions: (message_func, reduce_func).\n funcs[etype] = (fn.copy_u('Wh_%s' % etype, 'm'), fn.mean('m', 'h'))\n # Trigger message passing of multiple types.\n G.multi_update_all(funcs, 'sum')\n # return the updated node feature dictionary\n return {ntype: G.nodes[ntype].data['h'] for ntype in G.ntypes if 'h' in G.nodes[ntype].data}\n\n\nclass HeteroRGCN(nn.Module):\n def __init__(self, ntype_dict, etypes, in_size, hidden_size, out_size, n_layers, embedding_size):\n super(HeteroRGCN, self).__init__()\n # Use trainable node embeddings as featureless inputs.\n embed_dict = {ntype: nn.Parameter(th.Tensor(num_nodes, in_size))\n for ntype, num_nodes in ntype_dict.items() if ntype != 'target'}\n for key, embed in embed_dict.items():\n nn.init.xavier_uniform_(embed)\n self.embed = nn.ParameterDict(embed_dict)\n # create layers\n self.layers = nn.ModuleList()\n self.layers.append(HeteroRGCNLayer(embedding_size, hidden_size, etypes))\n # hidden layers\n for i in range(n_layers - 1):\n self.layers.append(HeteroRGCNLayer(hidden_size, hidden_size, etypes))\n\n # output layer\n self.layers.append(nn.Linear(hidden_size, out_size))\n\n def forward(self, g, features):\n\n # To use in real-time case, need to set embedding with input embeddings that are extracted from GrahpDB.\n # h_dict = self.embed\n h_dict = features\n\n # pass through all layers\n for i, layer in enumerate(self.layers[:-1]):\n if i != 0:\n h_dict = {k: F.leaky_relu(h) for k, h in h_dict.items()}\n h_dict = layer(g, h_dict)\n\n # get user binary logits\n bin_logist = self.layers[-1](h_dict['target'])\n\n # compute softmax value of binary logits\n softmax_logits = bin_logist.softmax(dim=-1)\n\n # return the probability to be One\n return softmax_logits\n\n\n# SageMaker inference functions\ndef model_fn(model_dir):\n\n print('------------------ Loading model -------------------')\n # --- load saved model ---\n s_t = dt.now()\n\n ntype_dict, etypes, in_size, hidden_size, out_size, n_layers, embedding_size = \\\n initialize_arguments(os.path.join(BASE_PATH, 'metadata.pkl'))\n\n rgcn_model = HeteroRGCN(ntype_dict, etypes, in_size, hidden_size, out_size, n_layers, embedding_size)\n\n stat_dict = th.load('model.pth')\n\n rgcn_model.load_state_dict(stat_dict)\n\n e_t = dt.now()\n print('--Load Model: {}'.format((e_t - s_t).microseconds))\n\n return rgcn_model\n\n\ndef recreate_grpha_data(graph_dict, n_feats, target_id):\n \"\"\"\n From the graph dictionary, build the input graph and node features for model.\n\n :param\n graph_dict: a Python dictionary, where key is a tuple containing source type and destination type, like ('target',\n 'card1'), and the value is a tuple of two Python lists, containing the original ids of source and\n destination nodes.\n n_feats: a Python dictionary, where key is node type string, and value is another dictionary with node ids as key and\n value is a list of 390 dimension floats.\n target_id: an id of a node in the graph to be inferred.\n\n :return:\n graph: a DGL heterogeneous graph, including reversed edges.\n\n new_n_feats: a Tensor in the order of new id nodes.\n\n new_pred_target_id: an integer for the target node in the new graph\n\n \"\"\"\n print('------------------ Convert to DLG Graph -------------------')\n # --- Step 1: collect all types of nodes together\n rel_list = []\n node_id_list = {}\n for can_etype, src_dst_tuple in graph_dict.items():\n\n src_type, dst_type = can_etype.split('<>')\n src_origin, dst_origin = np.array(src_dst_tuple[0]), np.array(src_dst_tuple[1])\n\n rel_list.append(((src_type, dst_type), (src_origin, dst_origin)))\n # rel_list.append(((dst_type, dst_type + '<>' + src_type, src_type), (dst_origin, src_origin)))\n\n if node_id_list.get(src_type) is not None:\n node_id_list[src_type] = np.append(node_id_list.get(src_type), src_origin)\n else:\n node_id_list[src_type] = src_origin\n\n if node_id_list.get(dst_type) is not None:\n node_id_list[dst_type] = np.append(node_id_list.get(dst_type), dst_origin)\n else:\n node_id_list[dst_type] = dst_origin\n\n # --- Step 2: for each type of node, unique their IDs and store\n node_new_list = {}\n for ntype, nid_list in node_id_list.items():\n # get new id\n nid_old, nid_new = np.unique(nid_list, return_inverse=True)\n node_new_list[ntype] = (nid_old, nid_new)\n\n # --- Step 3: map new node IDs to old node IDs\n rel_dict = {}\n node_type_idx = {}\n for rel in rel_list:\n src_type, dst_type = rel[0]\n src, dst = rel[1]\n\n _, nid_new = node_new_list[src_type]\n if node_type_idx.get(src_type) is not None:\n src_new = nid_new[node_type_idx.get(src_type):node_type_idx.get(src_type) + src.size]\n node_type_idx[src_type] = node_type_idx.get(src_type) + src.size\n else:\n src_new = nid_new[0: 0 + src.size]\n node_type_idx[src_type] = 0 + src.size\n\n _, nid_new = node_new_list[dst_type]\n if node_type_idx.get(dst_type) is not None:\n dst_new = nid_new[node_type_idx.get(dst_type):node_type_idx.get(dst_type) + dst.size]\n node_type_idx[dst_type] = node_type_idx.get(dst_type) + dst.size\n else:\n dst_new = nid_new[0: 0 + dst.size]\n node_type_idx[dst_type] = 0 + dst.size\n\n rel_dict[(src_type, src_type + '<>' + dst_type, dst_type)] = (th.from_numpy(src_new), th.from_numpy(dst_new))\n rel_dict[(dst_type, dst_type + '<>' + src_type, src_type)] = (th.from_numpy(dst_new), th.from_numpy(src_new))\n\n # Add target self-loop\n target_nid_old = node_new_list['target'][0]\n target_nid_new = np.arange(target_nid_old.shape[0])\n rel_dict[('target', 'self_relation', 'target')] = (th.from_numpy(target_nid_new),\n th.from_numpy(target_nid_new))\n\n # Extract the new target node id\n new_pred_target_id = th.tensor(np.searchsorted(target_nid_old, target_id)).long()\n\n print(\"New target node id: {}\".format(new_pred_target_id))\n\n # --- Step 4: process n_feats dictionary to get feature tensor\n new_n_feats = {}\n for in_ntype, in_feat_dict in n_feats.items():\n old_ids, _ = node_new_list[in_ntype]\n\n feats = []\n for old_id in old_ids:\n feats.append(in_feat_dict[str(old_id)])\n\n if in_ntype == 'target':\n global TARGET_FEAT_MEAN, TARGET_FEAT_STD\n np_feats = np.array(feats).astype(np.float32)\n th_feat = th.from_numpy(np_feats)\n norm_feat = (th_feat - TARGET_FEAT_MEAN) / TARGET_FEAT_STD\n\n new_n_feats[in_ntype] = norm_feat\n else:\n new_n_feats[in_ntype] = th.Tensor(feats)\n\n # --- Step 5: build DGL graph\n graph = dgl.heterograph(rel_dict)\n print(graph)\n\n return graph, new_n_feats, new_pred_target_id\n\n\ndef input_fn(request_body, request_content_type='application/json'):\n \"\"\"\n Preprocessing request_body that is in JSON format.\n :param request_body:\n :param request_content_type:\n :return:\n \"\"\"\n print('--START a session... ')\n\n # --------------------- receive request ------------------------------------------------ #\n input_data = json.loads(request_body)\n\n s_t = dt.now()\n\n subgraph_dict = input_data['graph']\n n_feats = input_data['n_feats']\n target_id = input_data['target_id']\n\n # print(n_feats)\n\n graph, new_n_feats, new_pred_target_id = recreate_grpha_data(subgraph_dict, n_feats, target_id)\n\n e_t = dt.now()\n print('--DP: {}'.format((e_t - s_t).microseconds))\n\n return (graph, new_n_feats, new_pred_target_id)\n\n\ndef predict_fn(input_data, model):\n\n # --------------------- Inference ------------------------------------------------ #\n s_t = dt.now()\n\n graph, new_n_feats, new_pred_target_id = input_data\n\n with th.no_grad():\n logits = model(graph, new_n_feats)\n res = logits[new_pred_target_id].cpu().detach().numpy()\n\n e_t = dt.now()\n print('--MI: {} --END'.format((e_t - s_t).microseconds))\n\n return res[1]\n\n\nif __name__ == '__main__':\n # method for local testing\n\n # --- load saved model ---\n # s_t = dt.now()\n #\n # model = model_fn('../')\n #\n # e_t = dt.now()\n # print('--Load Model: {}'.format((e_t - s_t).microseconds))\n\n # --- load subgraph data ---\n s_t = dt.now()\n\n subgraph_file = 'subgraph_100_101.pkl'\n with open('../clients_python/subgraph_100_101.pkl', 'rb') as f:\n subgraph_dict = pickle.load(f)\n\n e_t = dt.now()\n print('--Load Graph Data: {}'.format((e_t - s_t).microseconds))\n\n # --- build a new subgraph ---\n s_t = dt.now()\n\n g, n_feats, new_pred_target_id = recreate_grpha_data(subgraph_dict, None, 100)\n\n\n e_t = dt.now()\n print('--Convert Graph: {}'.format((e_t - s_t).microseconds))\n\n # --- use saved model to run prediction ---\n # print('------------------ Predict Logits -------------------')\n # s_t = dt.now()\n #\n # logits = model(g, n_feats)\n #\n # e_t = dt.now()\n # print('--Convert Graph: {}'.format((e_t - s_t).microseconds))\n #\n # print(logits[new_pred_target_id])\n"
] |
[
[
"torch.Tensor",
"numpy.unique",
"torch.load",
"numpy.arange",
"torch.nn.ParameterDict",
"torch.nn.ModuleList",
"torch.from_numpy",
"torch.nn.Linear",
"torch.no_grad",
"numpy.searchsorted",
"torch.nn.init.xavier_uniform_",
"torch.nn.functional.leaky_relu",
"numpy.array"
]
] |
swederik/structurefunction
|
[
"5c5583bb26d6092fa3b7a630192d8e79199f8df0"
] |
[
"coma/workflows/precoth.py"
] |
[
"import os\nimport os.path as op\nimport nipype.interfaces.io as nio # Data i/o\nimport nipype.interfaces.utility as util # utility\nimport nipype.pipeline.engine as pe # pypeline engine\nimport nipype.interfaces.fsl as fsl\nimport nipype.interfaces.freesurfer as fs\nimport nipype.interfaces.mrtrix as mrtrix\nimport nipype.interfaces.cmtk as cmtk\nfrom nipype.workflows.misc.utils import select_aparc\n\nfsl.FSLCommand.set_default_output_type('NIFTI')\n\nfrom coma.interfaces import RegionalValues, nonlinfit_fn, CMR_glucose\n\ndef summarize_precoth(dwi_network_file, fdg_stats_file, subject_id):\n import os.path as op\n import scipy.io as sio\n import networkx as nx\n\n fdg = sio.loadmat(fdg_stats_file)\n dwi_ntwk = nx.read_gpickle(dwi_network_file)\n\n # Thal L-1 R-2\n # Cortex 3 and 4\n # Prec L-5 R-6\n titles = [\"subjid\"]\n fdg_avg = [\"LTh_CMR_avg\",\"RTh_CMR_avg\",\"LCo_CMR_avg\",\"RCo_CMR_avg\",\"LPre_CMR_avg\",\"RPre_CMR_avg\"]\n f_avg = [fdg[\"func_mean\"][0][0],fdg[\"func_mean\"][1][0],fdg[\"func_mean\"][2][0],\n fdg[\"func_mean\"][3][0],fdg[\"func_mean\"][4][0],fdg[\"func_mean\"][5][0]]\n\n fdg_max = [\"LTh_CMR_max\",\"RTh_CMR_max\",\"LCo_CMR_max\",\"RCo_CMR_max\",\"LPre_CMR_max\",\"RPre_CMR_max\"]\n f_max = [fdg[\"func_max\"][0][0],fdg[\"func_max\"][1][0],fdg[\"func_max\"][2][0],\n fdg[\"func_max\"][3][0],fdg[\"func_max\"][4][0],fdg[\"func_max\"][5][0]]\n\n fdg_min = [\"LTh_CMR_min\",\"RTh_CMR_min\",\"LCo_CMR_min\",\"RCo_CMR_min\",\"LPre_CMR_min\",\"RPre_CMR_min\"]\n f_min = [fdg[\"func_min\"][0][0],fdg[\"func_min\"][1][0],fdg[\"func_min\"][2][0],\n fdg[\"func_min\"][3][0],fdg[\"func_min\"][4][0],fdg[\"func_min\"][5][0]]\n\n fdg_std = [\"LTh_CMR_std\",\"RTh_CMR_std\",\"LCo_CMR_std\",\"RCo_CMR_std\",\"LPre_CMR_std\",\"RPre_CMR_std\"]\n f_std = [fdg[\"func_stdev\"][0][0],fdg[\"func_stdev\"][1][0],fdg[\"func_stdev\"][2][0],\n fdg[\"func_stdev\"][3][0],fdg[\"func_stdev\"][4][0],fdg[\"func_stdev\"][5][0]]\n\n fdg_titles = fdg_avg + fdg_max + fdg_min + fdg_std\n\n dwi = nx.to_numpy_matrix(dwi_ntwk, weight=\"weight\")\n\n l_thal = [\"LTh_RTh\",\"LTh_LCo\",\"LTh_RCo\",\"LTh_LPre\",\"LTh_RPre\"]\n l_th = [dwi[0,1], dwi[0,2], dwi[0,3], dwi[0,4], dwi[0,5]]\n r_thal = [\"RTh_LCo\",\"RTh_RCo\",\"RTh_LPre\",\"RTh_RPre\"]\n r_th = [dwi[1,2], dwi[1,3], dwi[1,4], dwi[1,5]]\n l_co = [\"LCo_RCo\",\"LCo_LPre\",\"LCo_RPre\"]\n l_cor = [dwi[2,3], dwi[2,4], dwi[2,5]]\n r_co = [\"RCo_LPre\",\"RCo_RPre\"]\n r_cor = [dwi[3,4], dwi[3,5]]\n l_pre = [\"LPre_RPre\"]\n l_prec = [dwi[4,5]]\n conn_titles = l_thal + r_thal + l_co + r_co + l_pre\n\n all_titles = titles + fdg_titles + conn_titles\n volume_titles = [\"VoxLTh\",\"VoxRTh\",\"VoxLCo\", \"VoxRCo\", \"VoxLPre\", \"VoxRPre\"]\n all_titles = all_titles + volume_titles\n volumes = fdg[\"number_of_voxels\"]\n\n all_data = f_avg + f_max + f_min + f_std + l_th + r_th + l_cor + r_cor + l_prec + volumes[:,0].tolist()\n\n out_file = op.abspath(subject_id + \"_precoth.csv\")\n f = open(out_file, \"w\")\n title_str = \",\".join(all_titles) + \"\\n\"\n f.write(title_str)\n all_data = map(float, all_data)\n data_str = subject_id + \",\" + \",\".join(format(x, \"10.5f\") for x in all_data) + \"\\n\"\n f.write(data_str)\n f.close()\n return out_file\n\ndef extract_PreCoTh(in_file, out_filename):\n from nipype.utils.filemanip import split_filename\n import nibabel as nb\n import numpy as np\n import os.path as op\n in_image = nb.load(in_file)\n in_header = in_image.get_header()\n in_data = in_image.get_data()\n\n # Left, Right -> Now\n # Thalamus are 71 and 35\n # Thal L-1 R-2\n # Precuneus are 61 and 20\n # Prec L-5 R-6\n # Cortex 3 and 42\n # Cortex 3 and 4\n\n MAPPING = [\n [4, 2012], [4, 2019], [4, 2032], [4, 2014], [4, 2020], [4, 2018],\n [4, 2027], [4, 2028], [4, 2003], [4, 2024], [4, 2017], [4, 2026],\n [4, 2002], [4, 2023], [4, 2010], [4, 2022], [4, 2031], [4, 2029],\n [4, 2008], [4, 2005], [4, 2021], [4, 2011],\n [4, 2013], [4, 2007], [4, 2016], [4, 2006], [4, 2033], [4, 2009],\n [4, 2015], [4, 2001], [4, 2030], [4, 2034], [4, 2035],\n\n [3, 1012], [3, 1019], [3, 1032], [3, 1014], [3, 1020], [3, 1018],\n [3, 1027], [3, 1028], [3, 1003], [3, 1024], [3, 1017], [3, 1026],\n [3, 1002], [3, 1023], [3, 1010], [3, 1022], [3, 1031],\n [3, 1029], [3, 1008], [3, 1005], [3, 1021], [3, 1011], [3,1013],\n [3, 1007], [3, 1016], [3, 1006], [3, 1033],\n [3, 1009], [3, 1015], [3, 1001], [3, 1030], [3, 1034], [3, 1035],\n\n [5, 1025], [6, 2025], [1, 10], [2, 49]]\n\n niiGM = np.zeros(in_data.shape, dtype=np.uint)\n for ma in MAPPING:\n niiGM[in_data == ma[1]] = ma[0]\n\n _, name, _ = split_filename(in_file)\n out_file = op.abspath(out_filename)\n try:\n out_image = nb.Nifti1Image(\n data=niiGM, header=in_header, affine=in_image.get_affine())\n except TypeError:\n out_image = nb.Nifti1Image(\n dataobj=niiGM, header=in_header, affine=in_image.get_affine())\n nb.save(out_image, out_file)\n return out_file\n\n\ndef create_precoth_pipeline(name=\"precoth\", tractography_type='probabilistic', reg_pet_T1=True):\n inputnode = pe.Node(\n interface=util.IdentityInterface(fields=[\"subjects_dir\",\n \"subject_id\",\n \"dwi\",\n \"bvecs\",\n \"bvals\",\n \"fdgpet\",\n \"dose\",\n \"weight\",\n \"delay\",\n \"glycemie\",\n \"scan_time\"]),\n name=\"inputnode\")\n\n nonlinfit_interface = util.Function(input_names=[\"dwi\", \"bvecs\", \"bvals\", \"base_name\"],\n output_names=[\"tensor\", \"FA\", \"MD\", \"evecs\", \"evals\", \"rgb_fa\", \"norm\", \"mode\", \"binary_mask\", \"b0_masked\"], function=nonlinfit_fn)\n\n nonlinfit_node = pe.Node(interface=nonlinfit_interface, name=\"nonlinfit_node\")\n\n coregister = pe.Node(interface=fsl.FLIRT(dof=12), name = 'coregister')\n coregister.inputs.cost = ('normmi')\n\n invertxfm = pe.Node(interface=fsl.ConvertXFM(), name = 'invertxfm')\n invertxfm.inputs.invert_xfm = True\n\n WM_to_FA = pe.Node(interface=fsl.ApplyXfm(), name = 'WM_to_FA')\n WM_to_FA.inputs.interp = 'nearestneighbour'\n TermMask_to_FA = WM_to_FA.clone(\"TermMask_to_FA\")\n\n mni_for_reg = op.join(os.environ[\"FSL_DIR\"],\"data\",\"standard\",\"MNI152_T1_1mm.nii.gz\")\n reorientBrain = pe.Node(interface=fsl.FLIRT(dof=6), name = 'reorientBrain')\n reorientBrain.inputs.reference = mni_for_reg\n reorientROIs = pe.Node(interface=fsl.ApplyXfm(), name = 'reorientROIs')\n reorientROIs.inputs.interp = \"nearestneighbour\"\n reorientROIs.inputs.reference = mni_for_reg\n reorientRibbon = reorientROIs.clone(\"reorientRibbon\")\n reorientRibbon.inputs.interp = \"nearestneighbour\"\n reorientT1 = reorientROIs.clone(\"reorientT1\")\n reorientT1.inputs.interp = \"trilinear\"\n\n fsl2mrtrix = pe.Node(interface=mrtrix.FSL2MRTrix(), name='fsl2mrtrix')\n fsl2mrtrix.inputs.invert_y = True\n\n erode_mask_firstpass = pe.Node(interface=mrtrix.Erode(),\n name='erode_mask_firstpass')\n erode_mask_firstpass.inputs.out_filename = \"b0_mask_median3D_erode.nii.gz\"\n erode_mask_secondpass = pe.Node(interface=mrtrix.Erode(),\n name='erode_mask_secondpass')\n erode_mask_secondpass.inputs.out_filename = \"b0_mask_median3D_erode_secondpass.nii.gz\"\n \n threshold_FA = pe.Node(interface=fsl.ImageMaths(), name='threshold_FA')\n threshold_FA.inputs.op_string = \"-thr 0.8 -uthr 0.99\"\n threshold_mode = pe.Node(interface=fsl.ImageMaths(), name='threshold_mode')\n threshold_mode.inputs.op_string = \"-thr 0.1 -uthr 0.99\" \n\n make_termination_mask = pe.Node(interface=fsl.ImageMaths(), name='make_termination_mask')\n make_termination_mask.inputs.op_string = \"-bin\"\n\n get_wm_mask = pe.Node(interface=fsl.ImageMaths(), name='get_wm_mask')\n get_wm_mask.inputs.op_string = \"-thr 0.1\"\n\n MRmultiply = pe.Node(interface=mrtrix.MRMultiply(), name='MRmultiply')\n MRmultiply.inputs.out_filename = \"Eroded_FA.nii.gz\"\n MRmult_merge = pe.Node(interface=util.Merge(2), name='MRmultiply_merge')\n\n median3d = pe.Node(interface=mrtrix.MedianFilter3D(), name='median3D')\n\n fdgpet_regions = pe.Node(interface=RegionalValues(), name='fdgpet_regions')\n\n compute_cmr_glc_interface = util.Function(input_names=[\"in_file\", \"dose\", \"weight\", \"delay\",\n \"glycemie\", \"scan_time\"], output_names=[\"out_file\"], function=CMR_glucose)\n compute_cmr_glc = pe.Node(interface=compute_cmr_glc_interface, name='compute_cmr_glc')\n\n csdeconv = pe.Node(interface=mrtrix.ConstrainedSphericalDeconvolution(),\n name='csdeconv')\n\n estimateresponse = pe.Node(interface=mrtrix.EstimateResponseForSH(),\n name='estimateresponse')\n\n if tractography_type == 'probabilistic':\n CSDstreamtrack = pe.Node(\n interface=mrtrix.ProbabilisticSphericallyDeconvolutedStreamlineTrack(\n ),\n name='CSDstreamtrack')\n else:\n CSDstreamtrack = pe.Node(\n interface=mrtrix.SphericallyDeconvolutedStreamlineTrack(),\n name='CSDstreamtrack')\n\n #CSDstreamtrack.inputs.desired_number_of_tracks = 10000\n CSDstreamtrack.inputs.minimum_tract_length = 50\n\n tck2trk = pe.Node(interface=mrtrix.MRTrix2TrackVis(), name='tck2trk')\n\n extract_PreCoTh_interface = util.Function(input_names=[\"in_file\", \"out_filename\"],\n output_names=[\"out_file\"],\n function=extract_PreCoTh)\n thalamus2precuneus2cortex_ROIs = pe.Node(\n interface=extract_PreCoTh_interface, name='thalamus2precuneus2cortex_ROIs')\n\n\n wm_mask_interface = util.Function(input_names=[\"in_file\", \"out_filename\"],\n output_names=[\"out_file\"],\n function=wm_labels_only)\n make_wm_mask = pe.Node(\n interface=wm_mask_interface, name='make_wm_mask')\n\n write_precoth_data_interface = util.Function(input_names=[\"dwi_network_file\", \"fdg_stats_file\", \"subject_id\"],\n output_names=[\"out_file\"],\n function=summarize_precoth)\n write_csv_data = pe.Node(\n interface=write_precoth_data_interface, name='write_csv_data')\n\n thalamus2precuneus2cortex = pe.Node(\n interface=cmtk.CreateMatrix(), name=\"thalamus2precuneus2cortex\")\n thalamus2precuneus2cortex.inputs.count_region_intersections = True\n\n FreeSurferSource = pe.Node(\n interface=nio.FreeSurferSource(), name='fssource')\n mri_convert_Brain = pe.Node(\n interface=fs.MRIConvert(), name='mri_convert_Brain')\n mri_convert_Brain.inputs.out_type = 'niigz'\n mri_convert_Brain.inputs.no_change = True\n\n if reg_pet_T1:\n reg_pet_T1 = pe.Node(interface=fsl.FLIRT(dof=6), name = 'reg_pet_T1')\n reg_pet_T1.inputs.cost = ('corratio')\n \n reslice_fdgpet = mri_convert_Brain.clone(\"reslice_fdgpet\")\n reslice_fdgpet.inputs.no_change = True\n\n mri_convert_Ribbon = mri_convert_Brain.clone(\"mri_convert_Ribbon\")\n mri_convert_ROIs = mri_convert_Brain.clone(\"mri_convert_ROIs\")\n mri_convert_T1 = mri_convert_Brain.clone(\"mri_convert_T1\")\n\n workflow = pe.Workflow(name=name)\n workflow.base_output_dir = name\n\n workflow.connect(\n [(inputnode, FreeSurferSource, [(\"subjects_dir\", \"subjects_dir\")])])\n workflow.connect(\n [(inputnode, FreeSurferSource, [(\"subject_id\", \"subject_id\")])])\n\n workflow.connect(\n [(FreeSurferSource, mri_convert_T1, [('T1', 'in_file')])])\n workflow.connect(\n [(mri_convert_T1, reorientT1, [('out_file', 'in_file')])])\n\n workflow.connect(\n [(FreeSurferSource, mri_convert_Brain, [('brain', 'in_file')])])\n workflow.connect(\n [(mri_convert_Brain, reorientBrain, [('out_file', 'in_file')])])\n workflow.connect(\n [(reorientBrain, reorientROIs, [('out_matrix_file', 'in_matrix_file')])])\n workflow.connect(\n [(reorientBrain, reorientRibbon, [('out_matrix_file', 'in_matrix_file')])])\n workflow.connect(\n [(reorientBrain, reorientT1, [('out_matrix_file', 'in_matrix_file')])])\n\n workflow.connect(\n [(FreeSurferSource, mri_convert_ROIs, [(('aparc_aseg', select_aparc), 'in_file')])])\n workflow.connect(\n [(mri_convert_ROIs, reorientROIs, [('out_file', 'in_file')])])\n workflow.connect(\n [(reorientROIs, make_wm_mask, [('out_file', 'in_file')])])\n\n workflow.connect(\n [(FreeSurferSource, mri_convert_Ribbon, [(('ribbon', select_ribbon), 'in_file')])])\n workflow.connect(\n [(mri_convert_Ribbon, reorientRibbon, [('out_file', 'in_file')])])\n workflow.connect(\n [(reorientRibbon, make_termination_mask, [('out_file', 'in_file')])])\n\n workflow.connect([(inputnode, fsl2mrtrix, [(\"bvecs\", \"bvec_file\"),\n (\"bvals\", \"bval_file\")])])\n\n workflow.connect(inputnode, 'dwi', nonlinfit_node, 'dwi')\n workflow.connect(inputnode, 'subject_id', nonlinfit_node, 'base_name')\n workflow.connect(inputnode, 'bvecs', nonlinfit_node, 'bvecs')\n workflow.connect(inputnode, 'bvals', nonlinfit_node, 'bvals')\n\n workflow.connect([(inputnode, compute_cmr_glc, [(\"dose\", \"dose\")])])\n workflow.connect([(inputnode, compute_cmr_glc, [(\"weight\", \"weight\")])])\n workflow.connect([(inputnode, compute_cmr_glc, [(\"delay\", \"delay\")])])\n workflow.connect([(inputnode, compute_cmr_glc, [(\"glycemie\", \"glycemie\")])])\n workflow.connect([(inputnode, compute_cmr_glc, [(\"scan_time\", \"scan_time\")])])\n\n if reg_pet_T1:\n workflow.connect([(inputnode, reg_pet_T1, [(\"fdgpet\", \"in_file\")])])\n workflow.connect(\n [(reorientBrain, reg_pet_T1, [(\"out_file\", \"reference\")])])\n workflow.connect(\n [(reg_pet_T1, reslice_fdgpet, [(\"out_file\", \"in_file\")])])\n workflow.connect(\n [(reorientROIs, reslice_fdgpet, [(\"out_file\", \"reslice_like\")])])\n workflow.connect(\n [(reslice_fdgpet, compute_cmr_glc, [(\"out_file\", \"in_file\")])])\n else:\n workflow.connect([(inputnode, reslice_fdgpet, [(\"fdgpet\", \"in_file\")])])\n workflow.connect(\n [(reorientROIs, reslice_fdgpet, [(\"out_file\", \"reslice_like\")])])\n workflow.connect(\n [(reslice_fdgpet, compute_cmr_glc, [(\"out_file\", \"in_file\")])])\n workflow.connect(\n [(compute_cmr_glc, fdgpet_regions, [(\"out_file\", \"in_files\")])])\n workflow.connect(\n [(thalamus2precuneus2cortex_ROIs, fdgpet_regions, [(\"out_file\", \"segmentation_file\")])])\n\n workflow.connect([(nonlinfit_node, coregister,[(\"FA\",\"in_file\")])])\n workflow.connect([(make_wm_mask, coregister,[('out_file','reference')])])\n workflow.connect([(nonlinfit_node, tck2trk,[(\"FA\",\"image_file\")])])\n workflow.connect([(reorientBrain, tck2trk,[(\"out_file\",\"registration_image_file\")])])\n workflow.connect([(coregister, tck2trk,[(\"out_matrix_file\",\"matrix_file\")])])\n\n workflow.connect([(coregister, invertxfm,[(\"out_matrix_file\",\"in_file\")])])\n workflow.connect([(invertxfm, WM_to_FA,[(\"out_file\",\"in_matrix_file\")])])\n workflow.connect([(make_wm_mask, WM_to_FA,[(\"out_file\",\"in_file\")])])\n workflow.connect([(nonlinfit_node, WM_to_FA,[(\"FA\",\"reference\")])])\n \n workflow.connect([(invertxfm, TermMask_to_FA,[(\"out_file\",\"in_matrix_file\")])])\n workflow.connect([(make_termination_mask, TermMask_to_FA,[(\"out_file\",\"in_file\")])])\n workflow.connect([(nonlinfit_node, TermMask_to_FA,[(\"FA\",\"reference\")])])\n\n workflow.connect([(nonlinfit_node, median3d, [(\"binary_mask\", \"in_file\")])])\n workflow.connect(\n [(median3d, erode_mask_firstpass, [(\"out_file\", \"in_file\")])])\n workflow.connect(\n [(erode_mask_firstpass, erode_mask_secondpass, [(\"out_file\", \"in_file\")])])\n\n workflow.connect([(nonlinfit_node, MRmult_merge, [(\"FA\", \"in1\")])])\n workflow.connect(\n [(erode_mask_secondpass, MRmult_merge, [(\"out_file\", \"in2\")])])\n workflow.connect([(MRmult_merge, MRmultiply, [(\"out\", \"in_files\")])])\n workflow.connect([(MRmultiply, threshold_FA, [(\"out_file\", \"in_file\")])])\n workflow.connect(\n [(threshold_FA, estimateresponse, [(\"out_file\", \"mask_image\")])])\n\n workflow.connect([(inputnode, estimateresponse, [(\"dwi\", \"in_file\")])])\n workflow.connect(\n [(fsl2mrtrix, estimateresponse, [(\"encoding_file\", \"encoding_file\")])])\n\n workflow.connect([(inputnode, csdeconv, [(\"dwi\", \"in_file\")])])\n #workflow.connect(\n # [(TermMask_to_FA, csdeconv, [(\"out_file\", \"mask_image\")])])\n workflow.connect(\n [(estimateresponse, csdeconv, [(\"response\", \"response_file\")])])\n workflow.connect(\n [(fsl2mrtrix, csdeconv, [(\"encoding_file\", \"encoding_file\")])])\n workflow.connect(\n [(WM_to_FA, CSDstreamtrack, [(\"out_file\", \"seed_file\")])])\n workflow.connect(\n [(TermMask_to_FA, CSDstreamtrack, [(\"out_file\", \"mask_file\")])])\n workflow.connect(\n [(csdeconv, CSDstreamtrack, [(\"spherical_harmonics_image\", \"in_file\")])])\n \n workflow.connect([(CSDstreamtrack, tck2trk, [(\"tracked\", \"in_file\")])])\n\n workflow.connect(\n [(tck2trk, thalamus2precuneus2cortex, [(\"out_file\", \"tract_file\")])])\n workflow.connect(\n [(inputnode, thalamus2precuneus2cortex, [(\"subject_id\", \"out_matrix_file\")])])\n workflow.connect(\n [(inputnode, thalamus2precuneus2cortex, [(\"subject_id\", \"out_matrix_mat_file\")])])\n\n workflow.connect(\n [(reorientROIs, thalamus2precuneus2cortex_ROIs, [(\"out_file\", \"in_file\")])])\n workflow.connect(\n [(thalamus2precuneus2cortex_ROIs, thalamus2precuneus2cortex, [(\"out_file\", \"roi_file\")])])\n workflow.connect(\n [(thalamus2precuneus2cortex, fdgpet_regions, [(\"matrix_file\", \"resolution_network_file\")])])\n\n workflow.connect(\n [(inputnode, write_csv_data, [(\"subject_id\", \"subject_id\")])])\n workflow.connect(\n [(fdgpet_regions, write_csv_data, [(\"stats_file\", \"fdg_stats_file\")])])\n workflow.connect(\n [(thalamus2precuneus2cortex, write_csv_data, [(\"intersection_matrix_file\", \"dwi_network_file\")])])\n\n output_fields = [\"fa\", \"rgb_fa\", \"md\", \"csdeconv\", \"tracts_tck\", \"rois\", \"t1\",\n \"t1_brain\", \"wmmask_dtispace\", \"fa_t1space\", \"summary\", \"filtered_tractographies\",\n \"matrix_file\", \"connectome\", \"CMR_nodes\", \"fiber_labels_noorphans\", \"fiber_length_file\",\n \"fiber_label_file\", \"intersection_matrix_mat_file\"]\n\n outputnode = pe.Node(\n interface=util.IdentityInterface(fields=output_fields),\n name=\"outputnode\")\n\n workflow.connect(\n [(CSDstreamtrack, outputnode, [(\"tracked\", \"tracts_tck\")]),\n (csdeconv, outputnode,\n [(\"spherical_harmonics_image\", \"csdeconv\")]),\n (nonlinfit_node, outputnode, [(\"FA\", \"fa\")]),\n (coregister, outputnode, [(\"out_file\", \"fa_t1space\")]),\n (reorientBrain, outputnode, [(\"out_file\", \"t1_brain\")]),\n (reorientT1, outputnode, [(\"out_file\", \"t1\")]),\n (thalamus2precuneus2cortex_ROIs, outputnode, [(\"out_file\", \"rois\")]),\n (thalamus2precuneus2cortex, outputnode, [(\"filtered_tractographies\", \"filtered_tractographies\")]),\n (thalamus2precuneus2cortex, outputnode, [(\"matrix_file\", \"connectome\")]),\n (thalamus2precuneus2cortex, outputnode, [(\"fiber_labels_noorphans\", \"fiber_labels_noorphans\")]),\n (thalamus2precuneus2cortex, outputnode, [(\"fiber_length_file\", \"fiber_length_file\")]),\n (thalamus2precuneus2cortex, outputnode, [(\"fiber_label_file\", \"fiber_label_file\")]),\n (thalamus2precuneus2cortex, outputnode, [(\"intersection_matrix_mat_file\", \"intersection_matrix_mat_file\")]),\n (fdgpet_regions, outputnode, [(\"networks\", \"CMR_nodes\")]),\n (nonlinfit_node, outputnode, [(\"rgb_fa\", \"rgb_fa\")]),\n (nonlinfit_node, outputnode, [(\"MD\", \"md\")]),\n (write_csv_data, outputnode, [(\"out_file\", \"summary\")]),\n ])\n\n return workflow\n\n\ndef create_precoth_pipeline_step1(name=\"precoth_step1\", reg_pet_T1=True, auto_reorient=True):\n inputnode = pe.Node(\n interface=util.IdentityInterface(fields=[\"subjects_dir\",\n \"subject_id\",\n \"dwi\",\n \"bvecs\",\n \"bvals\",\n \"fdgpet\"]),\n name=\"inputnode\")\n\n nonlinfit_interface = util.Function(input_names=[\"dwi\", \"bvecs\", \"bvals\", \"base_name\"],\n output_names=[\"tensor\", \"FA\", \"MD\", \"evecs\", \"evals\", \"rgb_fa\", \"norm\", \"mode\", \"binary_mask\", \"b0_masked\"], function=nonlinfit_fn)\n\n nonlinfit_node = pe.Node(interface=nonlinfit_interface, name=\"nonlinfit_node\")\n erode_mask_firstpass = pe.Node(interface=mrtrix.Erode(),\n name='erode_mask_firstpass')\n erode_mask_firstpass.inputs.out_filename = \"b0_mask_median3D_erode.nii.gz\"\n erode_mask_secondpass = pe.Node(interface=mrtrix.Erode(),\n name='erode_mask_secondpass')\n erode_mask_secondpass.inputs.out_filename = \"b0_mask_median3D_erode_secondpass.nii.gz\"\n\n threshold_FA = pe.Node(interface=fsl.ImageMaths(), name='threshold_FA')\n threshold_FA.inputs.op_string = \"-thr 0.8 -uthr 0.99\"\n threshold_mode = pe.Node(interface=fsl.ImageMaths(), name='threshold_mode')\n threshold_mode.inputs.op_string = \"-thr 0.9 -fmedian -fmedian\"\n\n make_termination_mask = pe.Node(interface=fsl.ImageMaths(), name='make_termination_mask')\n make_termination_mask.inputs.op_string = \"-bin\"\n\n fast_seg_T1 = pe.Node(interface=fsl.FAST(), name='fast_seg_T1')\n fast_seg_T1.inputs.segments = True\n fast_seg_T1.inputs.probability_maps = True\n\n fix_wm_mask = pe.Node(interface=fsl.MultiImageMaths(), name='fix_wm_mask')\n fix_wm_mask.inputs.op_string = \"-mul %s\"\n\n fix_termination_mask = pe.Node(interface=fsl.MultiImageMaths(), name='fix_termination_mask')\n fix_termination_mask.inputs.op_string = \"-binv -mul %s\"\n\n wm_mask_interface = util.Function(input_names=[\"in_file\", \"out_filename\"],\n output_names=[\"out_file\"],\n function=wm_labels_only)\n make_wm_mask = pe.Node(interface=wm_mask_interface, name='make_wm_mask')\n\n MRmultiply = pe.Node(interface=mrtrix.MRMultiply(), name='MRmultiply')\n MRmultiply.inputs.out_filename = \"Eroded_FA.nii.gz\"\n\n MultFAbyMode = pe.Node(interface=mrtrix.MRMultiply(), name='MultFAbyMode')\n\n MRmult_merge = pe.Node(interface=util.Merge(2), name='MRmultiply_merge')\n MultFAbyMode_merge = pe.Node(interface=util.Merge(2), name='MultFAbyMode_merge')\n\n median3d = pe.Node(interface=mrtrix.MedianFilter3D(), name='median3D')\n\n FreeSurferSource = pe.Node(\n interface=nio.FreeSurferSource(), name='fssource')\n mri_convert_Brain = pe.Node(\n interface=fs.MRIConvert(), name='mri_convert_Brain')\n mri_convert_Brain.inputs.out_type = 'nii'\n mri_convert_Brain.inputs.no_change = True\n \n mri_convert_Ribbon = mri_convert_Brain.clone(\"mri_convert_Ribbon\")\n mri_convert_ROIs = mri_convert_Brain.clone(\"mri_convert_ROIs\")\n mri_convert_T1 = mri_convert_Brain.clone(\"mri_convert_T1\")\n \n mni_for_reg = op.join(os.environ[\"FSL_DIR\"],\"data\",\"standard\",\"MNI152_T1_1mm.nii.gz\")\n\n if auto_reorient:\n reorientBrain = pe.Node(interface=fsl.FLIRT(dof=6), name = 'reorientBrain')\n reorientBrain.inputs.reference = mni_for_reg\n reorientROIs = pe.Node(interface=fsl.ApplyXfm(), name = 'reorientROIs')\n reorientROIs.inputs.interp = \"nearestneighbour\"\n reorientROIs.inputs.reference = mni_for_reg\n reorientRibbon = reorientROIs.clone(\"reorientRibbon\")\n reorientRibbon.inputs.interp = \"nearestneighbour\"\n reorientT1 = reorientROIs.clone(\"reorientT1\")\n reorientT1.inputs.interp = \"trilinear\"\n\n if reg_pet_T1:\n reg_pet_T1 = pe.Node(interface=fsl.FLIRT(dof=6), name = 'reg_pet_T1')\n reg_pet_T1.inputs.cost = ('corratio')\n \n reslice_fdgpet = mri_convert_Brain.clone(\"reslice_fdgpet\")\n reslice_fdgpet.inputs.no_change = True\n\n extract_PreCoTh_interface = util.Function(input_names=[\"in_file\", \"out_filename\"],\n output_names=[\"out_file\"],\n function=extract_PreCoTh)\n thalamus2precuneus2cortex_ROIs = pe.Node(\n interface=extract_PreCoTh_interface, name='thalamus2precuneus2cortex_ROIs')\n\n\n workflow = pe.Workflow(name=name)\n workflow.base_output_dir = name\n\n workflow.connect(\n [(inputnode, FreeSurferSource, [(\"subjects_dir\", \"subjects_dir\")])])\n workflow.connect(\n [(inputnode, FreeSurferSource, [(\"subject_id\", \"subject_id\")])])\n\n workflow.connect(\n [(FreeSurferSource, mri_convert_T1, [('T1', 'in_file')])])\n workflow.connect(\n [(FreeSurferSource, mri_convert_Brain, [('brain', 'in_file')])])\n\n\n if auto_reorient:\n workflow.connect(\n [(mri_convert_T1, reorientT1, [('out_file', 'in_file')])])\n workflow.connect(\n [(mri_convert_Brain, reorientBrain, [('out_file', 'in_file')])])\n workflow.connect(\n [(reorientBrain, reorientROIs, [('out_matrix_file', 'in_matrix_file')])])\n workflow.connect(\n [(reorientBrain, reorientRibbon, [('out_matrix_file', 'in_matrix_file')])])\n workflow.connect(\n [(reorientBrain, reorientT1, [('out_matrix_file', 'in_matrix_file')])])\n\n workflow.connect(\n [(FreeSurferSource, mri_convert_ROIs, [(('aparc_aseg', select_aparc), 'in_file')])])\n \n if auto_reorient:\n workflow.connect(\n [(mri_convert_ROIs, reorientROIs, [('out_file', 'in_file')])])\n workflow.connect(\n [(reorientROIs, make_wm_mask, [('out_file', 'in_file')])])\n workflow.connect(\n [(reorientROIs, thalamus2precuneus2cortex_ROIs, [(\"out_file\", \"in_file\")])])\n else:\n workflow.connect(\n [(mri_convert_ROIs, make_wm_mask, [('out_file', 'in_file')])])\n workflow.connect(\n [(mri_convert_ROIs, thalamus2precuneus2cortex_ROIs, [(\"out_file\", \"in_file\")])])\n\n workflow.connect(\n [(FreeSurferSource, mri_convert_Ribbon, [(('ribbon', select_ribbon), 'in_file')])])\n if auto_reorient:\n workflow.connect(\n [(mri_convert_Ribbon, reorientRibbon, [('out_file', 'in_file')])])\n workflow.connect(\n [(reorientRibbon, make_termination_mask, [('out_file', 'in_file')])])\n else:\n workflow.connect(\n [(mri_convert_Ribbon, make_termination_mask, [('out_file', 'in_file')])]) \n\n if auto_reorient:\n workflow.connect(\n [(reorientBrain, fast_seg_T1, [('out_file', 'in_files')])])\n else:\n workflow.connect(\n [(mri_convert_Brain, fast_seg_T1, [('out_file', 'in_files')])])\n\n\n workflow.connect(\n [(inputnode, fast_seg_T1, [(\"subject_id\", \"out_basename\")])])\n workflow.connect([(fast_seg_T1, fix_termination_mask, [(('tissue_class_files', select_CSF), 'in_file')])])\n workflow.connect([(fast_seg_T1, fix_wm_mask, [(('tissue_class_files', select_WM), 'in_file')])])\n\n\n workflow.connect(\n [(make_termination_mask, fix_termination_mask, [('out_file', 'operand_files')])])\n workflow.connect(\n [(make_wm_mask, fix_wm_mask, [('out_file', 'operand_files')])])\n\n workflow.connect(inputnode, 'dwi', nonlinfit_node, 'dwi')\n workflow.connect(inputnode, 'subject_id', nonlinfit_node, 'base_name')\n workflow.connect(inputnode, 'bvecs', nonlinfit_node, 'bvecs')\n workflow.connect(inputnode, 'bvals', nonlinfit_node, 'bvals')\n\n if reg_pet_T1:\n workflow.connect([(inputnode, reg_pet_T1, [(\"fdgpet\", \"in_file\")])])\n if auto_reorient:\n workflow.connect(\n [(reorientBrain, reg_pet_T1, [(\"out_file\", \"reference\")])])\n workflow.connect(\n [(reorientROIs, reslice_fdgpet, [(\"out_file\", \"reslice_like\")])])\n else:\n workflow.connect(\n [(mri_convert_Brain, reg_pet_T1, [(\"out_file\", \"reference\")])])\n workflow.connect(\n [(mri_convert_ROIs, reslice_fdgpet, [(\"out_file\", \"reslice_like\")])])\n\n workflow.connect(\n [(reg_pet_T1, reslice_fdgpet, [(\"out_file\", \"in_file\")])])\n\n else:\n workflow.connect([(inputnode, reslice_fdgpet, [(\"fdgpet\", \"in_file\")])])\n if auto_reorient:\n workflow.connect(\n [(reorientROIs, reslice_fdgpet, [(\"out_file\", \"reslice_like\")])])\n else:\n workflow.connect(\n [(mri_convert_ROIs, reslice_fdgpet, [(\"out_file\", \"reslice_like\")])])\n\n workflow.connect([(nonlinfit_node, median3d, [(\"binary_mask\", \"in_file\")])])\n workflow.connect(\n [(median3d, erode_mask_firstpass, [(\"out_file\", \"in_file\")])])\n workflow.connect(\n [(erode_mask_firstpass, erode_mask_secondpass, [(\"out_file\", \"in_file\")])])\n workflow.connect([(nonlinfit_node, MRmult_merge, [(\"FA\", \"in1\")])])\n workflow.connect(\n [(erode_mask_secondpass, MRmult_merge, [(\"out_file\", \"in2\")])])\n workflow.connect([(MRmult_merge, MRmultiply, [(\"out\", \"in_files\")])])\n workflow.connect([(MRmultiply, threshold_FA, [(\"out_file\", \"in_file\")])])\n\n workflow.connect([(nonlinfit_node, threshold_mode, [(\"mode\", \"in_file\")])])\n workflow.connect([(threshold_mode, MultFAbyMode_merge, [(\"out_file\", \"in1\")])])\n workflow.connect([(threshold_FA, MultFAbyMode_merge, [(\"out_file\", \"in2\")])])\n workflow.connect([(MultFAbyMode_merge, MultFAbyMode, [(\"out\", \"in_files\")])])\n workflow.connect([(inputnode, MultFAbyMode, [(('subject_id', add_subj_name_to_sfmask), 'out_filename')])])\n\n workflow.connect([(inputnode, reslice_fdgpet, [(('subject_id', add_subj_name_to_fdgpet), 'out_file')])])\n workflow.connect([(inputnode, make_wm_mask, [(('subject_id', add_subj_name_to_wmmask), 'out_filename')])])\n workflow.connect([(inputnode, fix_wm_mask, [(('subject_id', add_subj_name_to_wmmask), 'out_file')])])\n workflow.connect([(inputnode, fix_termination_mask, [(('subject_id', add_subj_name_to_termmask), 'out_file')])])\n workflow.connect([(inputnode, thalamus2precuneus2cortex_ROIs, [(('subject_id', add_subj_name_to_rois), 'out_filename')])])\n if auto_reorient:\n workflow.connect([(inputnode, reorientT1, [(('subject_id', add_subj_name_to_T1), 'out_file')])])\n workflow.connect([(inputnode, reorientBrain, [(('subject_id', add_subj_name_to_T1brain), 'out_file')])])\n else:\n workflow.connect([(inputnode, mri_convert_T1, [(('subject_id', add_subj_name_to_T1), 'out_file')])])\n workflow.connect([(inputnode, mri_convert_Brain, [(('subject_id', add_subj_name_to_T1brain), 'out_file')])])\n\n output_fields = [\"single_fiber_mask\", \"fa\", \"rgb_fa\", \"md\", \"t1\", \"t1_brain\",\n \"wm_mask\", \"term_mask\", \"fdgpet\", \"rois\",\"mode\", \"tissue_class_files\", \"probability_maps\"]\n\n outputnode = pe.Node(\n interface=util.IdentityInterface(fields=output_fields),\n name=\"outputnode\")\n\n workflow.connect([(fast_seg_T1, outputnode, [(\"tissue_class_files\", \"tissue_class_files\")])])\n workflow.connect([(fast_seg_T1, outputnode, [(\"probability_maps\", \"probability_maps\")])])\n \n workflow.connect([\n (nonlinfit_node, outputnode, [(\"FA\", \"fa\")]),\n (nonlinfit_node, outputnode, [(\"rgb_fa\", \"rgb_fa\")]),\n (nonlinfit_node, outputnode, [(\"MD\", \"md\")]),\n (nonlinfit_node, outputnode, [(\"mode\", \"mode\")]),\n (MultFAbyMode, outputnode, [(\"out_file\", \"single_fiber_mask\")]),\n (fix_wm_mask, outputnode, [(\"out_file\", \"wm_mask\")]),\n (fix_termination_mask, outputnode, [(\"out_file\", \"term_mask\")]),\n (reslice_fdgpet, outputnode, [(\"out_file\", \"fdgpet\")]),\n (thalamus2precuneus2cortex_ROIs, outputnode, [(\"out_file\", \"rois\")]),\n ])\n\n if auto_reorient:\n workflow.connect([\n (reorientBrain, outputnode, [(\"out_file\", \"t1_brain\")]),\n (reorientT1, outputnode, [(\"out_file\", \"t1\")]),\n ])\n else:\n workflow.connect([\n (mri_convert_Brain, outputnode, [(\"out_file\", \"t1_brain\")]),\n (mri_convert_T1, outputnode, [(\"out_file\", \"t1\")]),\n ])\n return workflow\n\n\n\n\ndef create_precoth_pipeline_step2(name=\"precoth_step2\", tractography_type='probabilistic'):\n inputnode = pe.Node(\n interface=util.IdentityInterface(fields=[\"subjects_dir\",\n \"subject_id\",\n \"dwi\",\n \"bvecs\",\n \"bvals\",\n \"fdgpet\",\n \"dose\",\n \"weight\",\n \"delay\",\n \"glycemie\",\n \"scan_time\",\n \"single_fiber_mask\",\n \"fa\",\n \"rgb_fa\",\n \"md\",\n \"t1_brain\",\n \"t1\",\n \"wm_mask\",\n \"term_mask\",\n \"rois\",\n ]),\n name=\"inputnode\")\n\n coregister = pe.Node(interface=fsl.FLIRT(dof=12), name = 'coregister')\n coregister.inputs.cost = ('normmi')\n\n invertxfm = pe.Node(interface=fsl.ConvertXFM(), name = 'invertxfm')\n invertxfm.inputs.invert_xfm = True\n\n WM_to_FA = pe.Node(interface=fsl.ApplyXfm(), name = 'WM_to_FA')\n WM_to_FA.inputs.interp = 'nearestneighbour'\n TermMask_to_FA = WM_to_FA.clone(\"TermMask_to_FA\")\n\n rgb_fa_t1space = pe.Node(interface=fsl.ApplyXfm(), name = 'rgb_fa_t1space')\n md_to_T1 = pe.Node(interface=fsl.ApplyXfm(), name = 'md_to_T1')\n\n t1_dtispace = pe.Node(interface=fsl.ApplyXfm(), name = 't1_dtispace')\n\n fsl2mrtrix = pe.Node(interface=mrtrix.FSL2MRTrix(), name='fsl2mrtrix')\n fsl2mrtrix.inputs.invert_y = True\n\n fdgpet_regions = pe.Node(interface=RegionalValues(), name='fdgpet_regions')\n\n compute_cmr_glc_interface = util.Function(input_names=[\"in_file\", \"dose\", \"weight\", \"delay\",\n \"glycemie\", \"scan_time\"], output_names=[\"out_file\"], function=CMR_glucose)\n compute_cmr_glc = pe.Node(interface=compute_cmr_glc_interface, name='compute_cmr_glc')\n\n csdeconv = pe.Node(interface=mrtrix.ConstrainedSphericalDeconvolution(),\n name='csdeconv')\n\n estimateresponse = pe.Node(interface=mrtrix.EstimateResponseForSH(),\n name='estimateresponse')\n\n if tractography_type == 'probabilistic':\n CSDstreamtrack = pe.Node(\n interface=mrtrix.ProbabilisticSphericallyDeconvolutedStreamlineTrack(\n ),\n name='CSDstreamtrack')\n else:\n CSDstreamtrack = pe.Node(\n interface=mrtrix.SphericallyDeconvolutedStreamlineTrack(),\n name='CSDstreamtrack')\n\n CSDstreamtrack.inputs.minimum_tract_length = 50\n\n CSDstreamtrack.inputs.desired_number_of_tracks = 10000\n\n tck2trk = pe.Node(interface=mrtrix.MRTrix2TrackVis(), name='tck2trk')\n\n write_precoth_data_interface = util.Function(input_names=[\"dwi_network_file\", \"fdg_stats_file\", \"subject_id\"],\n output_names=[\"out_file\"],\n function=summarize_precoth)\n write_csv_data = pe.Node(\n interface=write_precoth_data_interface, name='write_csv_data')\n\n thalamus2precuneus2cortex = pe.Node(\n interface=cmtk.CreateMatrix(), name=\"thalamus2precuneus2cortex\")\n thalamus2precuneus2cortex.inputs.count_region_intersections = True\n\n workflow = pe.Workflow(name=name)\n workflow.base_output_dir = name\n\n workflow.connect([(inputnode, fsl2mrtrix, [(\"bvecs\", \"bvec_file\"),\n (\"bvals\", \"bval_file\")])])\n\n workflow.connect([(inputnode, fdgpet_regions, [(\"rois\", \"segmentation_file\")])])\n workflow.connect([(inputnode, compute_cmr_glc, [(\"fdgpet\", \"in_file\")])])\n workflow.connect([(inputnode, compute_cmr_glc, [(\"dose\", \"dose\")])])\n workflow.connect([(inputnode, compute_cmr_glc, [(\"weight\", \"weight\")])])\n workflow.connect([(inputnode, compute_cmr_glc, [(\"delay\", \"delay\")])])\n workflow.connect([(inputnode, compute_cmr_glc, [(\"glycemie\", \"glycemie\")])])\n workflow.connect([(inputnode, compute_cmr_glc, [(\"scan_time\", \"scan_time\")])])\n workflow.connect([(compute_cmr_glc, fdgpet_regions, [(\"out_file\", \"in_files\")])])\n\n workflow.connect([(inputnode, coregister,[(\"fa\",\"in_file\")])])\n workflow.connect([(inputnode, coregister,[('wm_mask','reference')])])\n workflow.connect([(inputnode, tck2trk,[(\"fa\",\"image_file\")])])\n \n workflow.connect([(inputnode, tck2trk,[(\"wm_mask\",\"registration_image_file\")])])\n workflow.connect([(coregister, tck2trk,[(\"out_matrix_file\",\"matrix_file\")])])\n \n workflow.connect([(coregister, invertxfm,[(\"out_matrix_file\",\"in_file\")])])\n\n workflow.connect([(inputnode, t1_dtispace,[(\"t1\",\"in_file\")])])\n workflow.connect([(invertxfm, t1_dtispace,[(\"out_file\",\"in_matrix_file\")])])\n workflow.connect([(inputnode, t1_dtispace,[(\"fa\",\"reference\")])])\n\n workflow.connect([(inputnode, rgb_fa_t1space,[(\"rgb_fa\",\"in_file\")])])\n workflow.connect([(coregister, rgb_fa_t1space,[(\"out_matrix_file\",\"in_matrix_file\")])])\n workflow.connect([(inputnode, rgb_fa_t1space,[('wm_mask','reference')])])\n\n workflow.connect([(inputnode, md_to_T1,[(\"md\",\"in_file\")])])\n workflow.connect([(coregister, md_to_T1,[(\"out_matrix_file\",\"in_matrix_file\")])])\n workflow.connect([(inputnode, md_to_T1,[('wm_mask','reference')])])\n\n workflow.connect([(invertxfm, WM_to_FA,[(\"out_file\",\"in_matrix_file\")])])\n workflow.connect([(inputnode, WM_to_FA,[(\"wm_mask\",\"in_file\")])])\n workflow.connect([(inputnode, WM_to_FA,[(\"fa\",\"reference\")])])\n \n workflow.connect([(invertxfm, TermMask_to_FA,[(\"out_file\",\"in_matrix_file\")])])\n workflow.connect([(inputnode, TermMask_to_FA,[(\"term_mask\",\"in_file\")])])\n workflow.connect([(inputnode, TermMask_to_FA,[(\"fa\",\"reference\")])])\n\n workflow.connect([(inputnode, estimateresponse, [(\"single_fiber_mask\", \"mask_image\")])])\n\n workflow.connect([(inputnode, estimateresponse, [(\"dwi\", \"in_file\")])])\n workflow.connect(\n [(fsl2mrtrix, estimateresponse, [(\"encoding_file\", \"encoding_file\")])])\n\n workflow.connect([(inputnode, csdeconv, [(\"dwi\", \"in_file\")])])\n #workflow.connect(\n # [(TermMask_to_FA, csdeconv, [(\"out_file\", \"mask_image\")])])\n workflow.connect(\n [(estimateresponse, csdeconv, [(\"response\", \"response_file\")])])\n workflow.connect(\n [(fsl2mrtrix, csdeconv, [(\"encoding_file\", \"encoding_file\")])])\n workflow.connect(\n [(WM_to_FA, CSDstreamtrack, [(\"out_file\", \"seed_file\")])])\n workflow.connect(\n [(TermMask_to_FA, CSDstreamtrack, [(\"out_file\", \"mask_file\")])])\n workflow.connect(\n [(csdeconv, CSDstreamtrack, [(\"spherical_harmonics_image\", \"in_file\")])])\n\n workflow.connect([(CSDstreamtrack, tck2trk, [(\"tracked\", \"in_file\")])])\n\n workflow.connect(\n [(tck2trk, thalamus2precuneus2cortex, [(\"out_file\", \"tract_file\")])])\n workflow.connect(\n [(inputnode, thalamus2precuneus2cortex, [(\"subject_id\", \"out_matrix_file\")])])\n workflow.connect(\n [(inputnode, thalamus2precuneus2cortex, [(\"subject_id\", \"out_matrix_mat_file\")])])\n\n workflow.connect(\n [(inputnode, thalamus2precuneus2cortex, [(\"rois\", \"roi_file\")])])\n workflow.connect(\n [(thalamus2precuneus2cortex, fdgpet_regions, [(\"intersection_matrix_file\", \"resolution_network_file\")])])\n\n workflow.connect(\n [(inputnode, write_csv_data, [(\"subject_id\", \"subject_id\")])])\n workflow.connect(\n [(fdgpet_regions, write_csv_data, [(\"stats_file\", \"fdg_stats_file\")])])\n workflow.connect(\n [(thalamus2precuneus2cortex, write_csv_data, [(\"intersection_matrix_file\", \"dwi_network_file\")])])\n\n output_fields = [\"csdeconv\", \"tracts_tck\", \"summary\", \"filtered_tractographies\",\n \"matrix_file\", \"connectome\", \"CMR_nodes\", \"cmr_glucose\", \"fiber_labels_noorphans\", \"fiber_length_file\",\n \"fiber_label_file\", \"fa_t1space\", \"rgb_fa_t1space\", \"md_t1space\", \"fa_t1xform\", \"t1_dtispace\",\n \"intersection_matrix_mat_file\", \"dti_stats\"]\n\n outputnode = pe.Node(\n interface=util.IdentityInterface(fields=output_fields),\n name=\"outputnode\")\n\n workflow.connect(\n [(CSDstreamtrack, outputnode, [(\"tracked\", \"tracts_tck\")]),\n (csdeconv, outputnode,\n [(\"spherical_harmonics_image\", \"csdeconv\")]),\n (coregister, outputnode, [(\"out_file\", \"fa_t1space\")]),\n (rgb_fa_t1space, outputnode, [(\"out_file\", \"rgb_fa_t1space\")]),\n (md_to_T1, outputnode, [(\"out_file\", \"md_t1space\")]),\n (t1_dtispace, outputnode, [(\"out_file\", \"t1_dtispace\")]),\n (coregister, outputnode, [(\"out_matrix_file\", \"fa_t1xform\")]),\n (thalamus2precuneus2cortex, outputnode, [(\"filtered_tractographies\", \"filtered_tractographies\")]),\n (thalamus2precuneus2cortex, outputnode, [(\"matrix_file\", \"connectome\")]),\n (thalamus2precuneus2cortex, outputnode, [(\"fiber_labels_noorphans\", \"fiber_labels_noorphans\")]),\n (thalamus2precuneus2cortex, outputnode, [(\"fiber_length_file\", \"fiber_length_file\")]),\n (thalamus2precuneus2cortex, outputnode, [(\"fiber_label_file\", \"fiber_label_file\")]),\n (thalamus2precuneus2cortex, outputnode, [(\"intersection_matrix_mat_file\", \"intersection_matrix_mat_file\")]),\n (thalamus2precuneus2cortex, outputnode, [(\"stats_file\", \"dti_stats\")]),\n (fdgpet_regions, outputnode, [(\"networks\", \"CMR_nodes\")]),\n (write_csv_data, outputnode, [(\"out_file\", \"summary\")]),\n (compute_cmr_glc, outputnode, [(\"out_file\", \"cmr_glucose\")]),\n ])\n\n return workflow\n"
] |
[
[
"scipy.io.loadmat",
"numpy.zeros"
]
] |
do-wie-ching/do-wie-ching-anti-theft-system-base-on-deep-learning
|
[
"15307e34f617c93a93921494bb9805e94af3c0b7"
] |
[
"pratice/activation.py"
] |
[
"import torch\nimport numpy as np\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\n\n\ntensor = torch.FloatTensor([[1, 2], [3, 4]])\n\nvariable = Variable(tensor, requires_grad=True)\n\nt_out = torch.mean(tensor * tensor)\nv_out = torch.mean(variable * variable)\n\nv_out.backward()\n#print(variable.grad)\n\n#激勵函數\nx = torch.linspace(-5, 5, 200)\nx = Variable(x)\nx_np = x.data.numpy()\n\ny_relu = F.relu(x).data.numpy()\ny_sigmoid = torch.sigmoid(x).data.numpy()\ny_tanh = torch.tanh(x).data.numpy()\ny_softplus = F.softplus(x).data.numpy()\n\n\n# plt to visualize these activation function\nplt.figure(1, figsize=(8, 6))\nplt.subplot(221)\nplt.plot(x_np, y_relu, c='red', label='relu')\nplt.ylim((-1, 5))\nplt.legend(loc='best')\n\nplt.subplot(222)\nplt.plot(x_np, y_sigmoid, c='red', label='sigmoid')\nplt.ylim((-0.2, 1.2))\nplt.legend(loc='best')\n\nplt.subplot(223)\nplt.plot(x_np, y_tanh, c='red', label='tanh')\nplt.ylim((-1.2, 1.2))\nplt.legend(loc='best')\n\nplt.subplot(224)\nplt.plot(x_np, y_softplus, c='red', label='softplus')\nplt.ylim((-0.2, 6))\nplt.legend(loc='best')\n\nplt.show()"
] |
[
[
"torch.mean",
"torch.linspace",
"matplotlib.pyplot.legend",
"torch.sigmoid",
"matplotlib.pyplot.ylim",
"torch.autograd.Variable",
"matplotlib.pyplot.plot",
"torch.tanh",
"matplotlib.pyplot.subplot",
"torch.FloatTensor",
"torch.nn.functional.relu",
"matplotlib.pyplot.show",
"torch.nn.functional.softplus",
"matplotlib.pyplot.figure"
]
] |
laturose/deepnlp
|
[
"3b0a8914468d641ac17836d8bec819af7df58f89"
] |
[
"deepnlp/ner_tagger.py"
] |
[
"#!/usr/bin/python\r\n# -*- coding:utf-8 -*-\r\n'''\r\n@author: xichen ding\r\n@date: 2016-11-15\r\n@rev: 2017-11-01\r\n'''\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\nfrom __future__ import unicode_literals # compatible with python3 unicode coding\r\n\r\nimport sys, os\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport glob\r\nimport pickle\r\n\r\nfrom dict_util import gen_prefix_dict\r\n\r\n# adding pos submodule to sys.path, compatible with py3 absolute_import\r\npkg_path = os.path.dirname(os.path.abspath(__file__))\r\nsys.path.append(pkg_path)\r\n\r\nfrom ner import ner_model as ner_model\r\nfrom ner import reader as ner_reader\r\n\r\nglobal global_ner_model\r\n\r\n### Define Constant\r\nTAG_NONE_ENTITY = \"nt\"\r\n\r\nENTITY_TAG_DICT = \"entity_tags.dic\"\r\n\r\nENTITY_TAG_DICT_PICKLE = \"entity_tags.dic.pkl\"\r\n\r\nDEFAULT_DICT_ZH_NAME = \"zh\"\r\n\r\n# User Define Function Disambiguation\r\ndef udf_default(word, tags, *args):\r\n \"\"\" Default get the first tag\r\n Return: tag, confidence\r\n \"\"\"\r\n if (len(tags) > 0):\r\n return tags[0], 1.0\r\n else:\r\n return TAG_NONE_ENTITY\r\n return tags[0], 1.0\r\n\r\ndef udf_disambiguation_cooccur(word, tags, context, tag_feat_dict, *args):\r\n \"\"\" Disambiguation based on cooccurence of context and tag_feat_dict\r\n Args: word: input word\r\n tags: multiple tags on word\r\n context: list of words surrounding current word of a window\r\n \"\"\"\r\n if (len(tag_feat_dict) == 0) or (len(tags) == 0):\r\n return None, 0.0\r\n\r\n num = len(tags)\r\n coocur_dict = {}\r\n coocur_count = []\r\n for tag in tags:\r\n feat_words = tag_feat_dict[tag] if tag in tag_feat_dict else []\r\n common = []\r\n for feat in feat_words:\r\n if feat in context:\r\n common.append(feat)\r\n coocur_dict[tag] = len(common) # How many occurence under current tags\r\n coocur_count.append(len(common))\r\n vec = np.array(coocur_count)\r\n total = np.sum(vec)\r\n prob_vec = []\r\n if total > 0.0:\r\n prob_vec = vec/total\r\n else:\r\n prob_vec = 0.0 * vec\r\n max_index = np.argmax(prob_vec)\r\n return tags[max_index], prob_vec[max_index]\r\n\r\ndef ensemble_udf(udfs, word, tags, *args):\r\n \"\"\" Embed Multiple UDFs to get the ambiguation tag\r\n \"\"\"\r\n tag_count_dict = {}\r\n for udf in udfs:\r\n tag, confidence = udf(word, tags, *args)\r\n if tag is not None:\r\n if tag in tag_count_dict:\r\n tag_count_dict[tag] = tag_count_dict[tag] + 1\r\n else:\r\n tag_count_dict[tag] = 1\r\n max_cnt = -1\r\n max_cnt_tag = TAG_NONE_ENTITY\r\n for tag in tag_count_dict.keys():\r\n cur_cnt = tag_count_dict[tag]\r\n if (cur_cnt > max_cnt):\r\n max_cnt = cur_cnt\r\n max_cnt_tag = tag\r\n return max_cnt_tag\r\n\r\nclass ModelLoader(object):\r\n\r\n def __init__(self, name, data_path, ckpt_path):\r\n self.name = name\r\n self.data_path = data_path\r\n self.ckpt_path = ckpt_path\r\n print(\"NOTICE: Starting new Tensorflow session...\")\r\n print(\"NOTICE: Initializing ner_tagger model...\")\r\n self.session = tf.Session()\r\n self.model = None\r\n self.var_scope = \"ner_var_scope\"\r\n self._init_ner_model(self.session, self.ckpt_path) # Initialization model\r\n self.__prefix_dict = {} # private member variable\r\n self._load_dict(name) # load model dict zh + dict_name\r\n \r\n def predict(self, words, tagset = []):\r\n ''' \r\n Args: words: list of string\r\n tagset: tags that is included in the final output, default [] means return all tags\r\n Return tuples of [(word, tag),...]\r\n '''\r\n model_tagging = self._predict_ner_tags_model(self.session, self.model, words, self.data_path)\r\n dict_tagging = self._predict_ner_tags_dict(words, merge = True, tagset = tagset, udfs = [udf_default])\r\n merge_tagging = self._merge_tagging(model_tagging, dict_tagging)\r\n return dict_tagging\r\n \r\n ## Define Config Parameters for NER Tagger\r\n def _init_ner_model(self, session, ckpt_path):\r\n \"\"\"Create ner Tagger model and initialize or load parameters in session.\"\"\"\r\n # initilize config\r\n config = ner_model.get_config(self.name)\r\n if config is None:\r\n print (\"WARNING: Input model name %s has no configuration...\" % self.name)\r\n config.batch_size = 1\r\n config.num_steps = 1 # iterator one token per time\r\n \r\n # Check if self.model already exist\r\n if self.model is None:\r\n with tf.variable_scope(self.var_scope, reuse = True):\r\n self.model = ner_model.NERTagger(is_training=True, config=config) # save object after is_training\r\n #else: # Model Graph Def already exist\r\n # print (\"DEBUG: Model Def already exists\")\r\n # update model parameters\r\n if len(glob.glob(ckpt_path + '.data*')) > 0: # file exist with pattern: 'ner.ckpt.data*'\r\n print(\"NOTICE: Loading model parameters from %s\" % ckpt_path)\r\n all_vars = tf.global_variables()\r\n model_vars = [k for k in all_vars if self.var_scope in k.name.split(\"/\")]\r\n tf.train.Saver(model_vars).restore(session, ckpt_path)\r\n else:\r\n print(\"NOTICE: Model not found, created with fresh parameters.\")\r\n session.run(tf.global_variables_initializer())\r\n \r\n def _predict_ner_tags_model(self, session, model, words, data_path):\r\n '''\r\n Define prediction function of ner Tagging\r\n return tuples (word, tag)\r\n '''\r\n word_data = ner_reader.sentence_to_word_ids(data_path, words)\r\n tag_data = [0]*len(word_data)\r\n state = session.run(model.initial_state)\r\n \r\n predict_id =[]\r\n for step, (x, y) in enumerate(ner_reader.iterator(word_data, tag_data, model.batch_size, model.num_steps)):\r\n fetches = [model.cost, model.final_state, model.logits]\r\n feed_dict = {}\r\n feed_dict[model.input_data] = x\r\n feed_dict[model.targets] = y\r\n for i, (c, h) in enumerate(model.initial_state):\r\n feed_dict[c] = state[i].c\r\n feed_dict[h] = state[i].h\r\n \r\n _, _, logits = session.run(fetches, feed_dict)\r\n predict_id.append(int(np.argmax(logits))) \r\n predict_tag = ner_reader.word_ids_to_sentence(data_path, predict_id)\r\n predict_taggedwords = list(zip(words, predict_tag))\r\n return predict_taggedwords\r\n \r\n # internal variable for disambiguation\r\n tag_feat_dict = {}\r\n def set_tag_feat_dict(self, tag_feat_dict):\r\n self.tag_feat_dict = tag_feat_dict\r\n\r\n def _predict_ner_tags_dict(self, words, merge = False, tagset = [],udfs = [udf_default]):\r\n \"\"\" search NER tags from the whole sentences with Maximum Length\r\n Args: words: list of string \r\n merge: boolean , if merge the segmentation results \r\n udfs: list of user defined functions\r\n \"\"\"\r\n words_merge = self._preprocess_segment(words) if merge else words\r\n tokens = []\r\n include_all = True if len(tagset) == 0 else False\r\n for i in range(len(words_merge)):\r\n word = words_merge[i]\r\n if word in self.__prefix_dict:\r\n tags = self.__prefix_dict[word]\r\n if (tags):\r\n # tag = tags[0] # To Do, Add Disambiguity function\r\n context = self._get_context_words(words_merge, i) # Getting surround context words\r\n tag = ensemble_udf(udfs, word, tags, context, self.tag_feat_dict) # Using Coocurrence to disambiguation\r\n # Check if current tags is included in tagset\r\n if (include_all):\r\n tokens.append((word, tag))\r\n else:\r\n if tag in tagset:\r\n tokens.append((word, tag))\r\n else:\r\n tokens.append((word, TAG_NONE_ENTITY))\r\n else:\r\n tokens.append((word, TAG_NONE_ENTITY))\r\n else:\r\n tokens.append((word, TAG_NONE_ENTITY))\r\n return tokens\r\n \r\n def _get_context_words(self, words, i, window = 4):\r\n \"\"\" Get context words: a list of words within window of the given word\r\n \"\"\"\r\n if (i >= len(words)):\r\n return None\r\n else:\r\n token_num = len(words)\r\n start_id = max(i - window, 0)\r\n end_id = min(i + window, (token_num - 1))\r\n context = words[start_id:i] + words[(i+1):(end_id+1)]\r\n return context\r\n \r\n # default setting\r\n max_iter = 1000\r\n def _preprocess_segment(self, words):\r\n \"\"\" Consolidate Words Segmentation and Merge words to get Maximum Length Segment\r\n \"\"\"\r\n token_num = len(words)\r\n start_id = 0\r\n words_new = []\r\n lineno = 0\r\n while (start_id < token_num and lineno < self.max_iter):\r\n lineno += 1\r\n #print (\"Start Id... %d \" % start_id)\r\n step = 0\r\n # Get Boundry\r\n while(step < (token_num - start_id)):\r\n segment = \"\".join(words[start_id:(start_id + step + 1)])\r\n if segment in self.__prefix_dict:\r\n step += 1\r\n else:\r\n break\r\n # Check if current word is in Dict or Not\r\n if (step == 0): # Current Word is not in Dictionary\r\n segment = \"\".join(words[start_id:(start_id + 1)]) # Current Word\r\n words_new.append(segment)\r\n #print (\"Current Segment %s with step %d\" % (segment, step))\r\n start_id += 1\r\n else: # At least one word is in Dictionary\r\n segment = \"\".join(words[start_id:(start_id + step)]) #\r\n words_new.append(segment)\r\n #print (\"Current Segment %s with step %d\" % (segment, step))\r\n start_id += step\r\n return words_new\r\n\r\n def _merge_tagging(self, model_tagging, dict_tagging):\r\n \"\"\" Merge tagging results of model and dict\r\n \"\"\"\r\n if (len(model_tagging) != len(dict_tagging)):\r\n print (\"WARNING: Model Tagging Sequence and Dict Tagging Sequence are different\")\r\n return None\r\n num = len(model_tagging)\r\n merge_tagging = []\r\n for i in range(num):\r\n word = model_tagging[i][0]\r\n model_tag = model_tagging[i][1]\r\n dict_tag = dict_tagging[i][1]\r\n if (dict_tag): # not None\r\n merge_tagging.append((word, dict_tag))\r\n else:\r\n merge_tagging.append((word, model_tag))\r\n return merge_tagging\r\n \r\n def _load_default_dict(self, name = 'zh'):\r\n ''' internal method to load new default prefict_dict\r\n default dict\r\n '''\r\n print (\"NOTICE: Start Loading Default Entity Tag Dictionary: %s ...\" % name)\r\n default_dict_pickle_path = os.path.join(pkg_path, \"ner/dict/\", name, ENTITY_TAG_DICT_PICKLE)\r\n if not os.path.exists(default_dict_pickle_path):\r\n print (\"ERROR: Input Pickle file doesn't exist:%s ...\" % default_dict_pickle_path)\r\n return\r\n else:\r\n fr = open(default_dict_pickle_path, 'rb')\r\n try:\r\n self.__prefix_dict = pickle.load(fr) # update to new dictionary\r\n print (\"NOTICE: Loading NER Tagger Prefix Dict successfully, Dict Size: %d ...\" % len(self.__prefix_dict))\r\n except:\r\n print (\"ERROR: Failed to load pickle file %s\" % default_dict_pickle_path)\r\n\r\n def _load_dict(self, dict_name):\r\n \"\"\" internal method during initialization\r\n load default zh dict and update input dict if necessary\r\n default dict zh + input dict\r\n \"\"\"\r\n self.__prefix_dict = {} # empty current dict\r\n self._load_default_dict(DEFAULT_DICT_ZH_NAME) # adding base dictionary\r\n if (dict_name != DEFAULT_DICT_ZH_NAME): # load 'zh' + dict_name\r\n # add new input dict\r\n dict_path = os.path.join(pkg_path, \"ner/dict/\", dict_name, ENTITY_TAG_DICT_PICKLE)\r\n if os.path.exists(dict_path):\r\n #pdict = gen_prefix_dict(dict_path)\r\n pdict = {}\r\n fr = open(dict_path, 'rb')\r\n try:\r\n pdict = pickle.load(fr)\r\n print (\"NOTICE: Loading Entity Tags Prefix Dict successfully, Dict Size: %d ...\" % len(pdict))\r\n except:\r\n print (\"ERROR: Failed to load pickle file %s\" % dict_path)\r\n # Check Entity Tag Conflicts\r\n pdict_clean = {} # Check Dict Conflict: Newly Loaded Dict Have confict with default ones\r\n pdict_confict = {}\r\n for k in pdict.keys():\r\n if k not in self.__prefix_dict:\r\n pdict_clean[k] = pdict[k]\r\n else:\r\n pdict_confict[k] = pdict[k]\r\n if (len(pdict_confict) > 0):\r\n print (\"WARNING: Newly loaded dict have conflict with default dict, Size: %d\" % len(pdict_confict))\r\n self.__prefix_dict.update(pdict_clean) # Merge Newly Added Dict to Default Dict\r\n print (\"NOTICE: Loading Dictionary Successfully...\")\r\n else:\r\n print (\"ERROR: Dict Path doesn't exist: %s\" % dict_path)\r\n\r\n def load_dict(self, dict_name):\r\n \"\"\" public method to load dict_name in the package\r\n default dict zh + input dict\r\n \"\"\"\r\n # add default dict\r\n self._load_dict(dict_name)\r\n\r\n def load_user_dict(self, path):\r\n \"\"\" public method to load user defined ner slots dictionary\r\n \"\"\"\r\n if os.path.exists(path):\r\n pdict = gen_prefix_dict(path)\r\n pdict_clean = {} # Check Dict Conflict: Newly Loaded Dict Have confict with default ones\r\n pdict_confict = {}\r\n for k in pdict.keys():\r\n if k not in self.__prefix_dict:\r\n pdict_clean[k] = pdict[k]\r\n else:\r\n pdict_confict[k] = pdict[k]\r\n if (len(pdict_confict) > 0):\r\n print (\"WARNING: Newly loaded dict have conflict with default dict: %d\" % len(pdict_confict))\r\n self.__prefix_dict.update(pdict_clean) # Merge Newly Added Dict to Default Dict\r\n print (\"NOTICE: Loading Dictionary Successfully...\")\r\n else:\r\n print (\"ERROR: User Dict Path doesn't exist: %s\" % path)\r\n\r\ndef load_model(name = 'zh'):\r\n ''' Args: name: model name;\r\n data_path e.g.: ./deepnlp/ner/data/zh\r\n ckpt_path e.g.: ./deepnlp/ner/ckpt/zh/ner.ckpt\r\n ckpt_file e.g.: ./deepnlp/ner/ckpt/zh/ner.ckpt.data-00000-of-00001\r\n '''\r\n data_path = os.path.join(pkg_path, \"ner/data\", name) # NER vocabulary data path\r\n ckpt_path = os.path.join(pkg_path, \"ner/ckpt\", name, \"ner.ckpt\") # NER model checkpoint path\r\n return ModelLoader(name, data_path, ckpt_path)\r\n"
] |
[
[
"tensorflow.global_variables",
"tensorflow.global_variables_initializer",
"numpy.argmax",
"tensorflow.Session",
"tensorflow.variable_scope",
"tensorflow.train.Saver",
"numpy.array",
"numpy.sum"
]
] |
yueqiw/ephys_analysis
|
[
"6b33e2e43052bcd91705411ded1256725632fcde"
] |
[
"current_clamp.py"
] |
[
"import os\nimport numpy as np\nimport pandas as pd\nimport six\nfrom collections import OrderedDict\n\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport seaborn as sns\nfrom matplotlib import gridspec, animation\nfrom PIL import Image, ImageDraw, ImageFont\n\nimport allensdk_0_14_2.ephys_features as ft\n\n# TODO: implement plotting functions under a class object \n\ndef load_current_step_add_itrace(abf_file, ihold, istart, istep, startend=None, filetype='abf', channels=[0]):\n '''\n Load current clamp recordings from pClamp .abf files with only voltage traces\n '''\n ch0 = channels[0]\n rec = stfio.read(abf_file)\n assert(rec[ch0].yunits == 'mV')\n\n data = OrderedDict()\n data['file_id'] = os.path.basename(abf_file).strip('.' + filetype)\n data['file_directory'] = os.path.dirname(abf_file)\n data['record_date'] = rec.datetime.date()\n data['record_time'] = rec.datetime.time()\n\n data['dt'] = rec.dt / 1000\n data['hz'] = 1./rec.dt * 1000\n data['time_unit'] = 's'\n\n data['n_channels'] = len(rec)\n data['channel_names'] = [rec[ch0].name, 'Current_simulated']\n data['channel_units'] = [rec[ch0].yunits, 'pA']\n data['n_sweeps'] = len(rec[ch0])\n data['sweep_length'] = len(rec[ch0][0])\n\n data['t'] = np.arange(0, data['sweep_length']) * data['dt']\n\n start_idx = ft.find_time_index(data['t'], startend[0])\n end_idx = ft.find_time_index(data['t'], startend[1])\n current = [np.zeros_like(data['t']) + ihold for i in range(data['n_sweeps'])]\n for i in range(data['n_sweeps']):\n current[i][start_idx:end_idx] += istart + istep * i\n\n data['voltage'] = rec[ch0]\n data['voltage'] = [x.asarray() for x in data['voltage']]\n data['current'] = current\n\n current_channel = stfio.Channel([stfio.Section(x) for x in current])\n current_channel.yunits = 'pA'\n current_channel.name = 'Current_simulated'\n chlist = [rec[ch0], current_channel]\n rec_with_current = stfio.Recording(chlist)\n rec_with_current.dt = rec.dt\n rec_with_current.xunits = rec.xunits\n rec_with_current.datetime = rec.datetime\n return rec_with_current, data\n\n\ndef plot_current_step(data, fig_height=6, x_scale=3.5, xlim=[0.3,3.2],\n startend=None, offset=[0.2, 0.4], lw_scale=1, alpha_scale=1,\n plot_gray_sweeps=True,\n blue_sweep=None, rheobase_sweep=None, sag_sweeps=[], vlim=[-145,60], ilim=[-95,150],\n spikes_sweep_id = None, spikes_t = None,\n other_features=None, trough_name = 'spikes_trough_5w',\n bias_current = 0.0,\n highlight = 'deepskyblue',\n highlight_rheobase=sns.color_palette(\"muted\").as_hex()[2],\n highlight_sag=sns.color_palette(\"muted\").as_hex()[4],\n skip_sweep=1, skip_point=10, save=False,\n rasterized=True):\n '''\n Plot overlayed sweeps in current clamp protocol, with one sweep in blue color\n If detected spikes are provided, also plot detected spikes.\n '''\n\n plt.style.use('ggplot')\n\n fig_width = fig_height\n if (spikes_sweep_id is not None) and (spikes_t is not None):\n fig_height *= 4.0/3.0\n n_plots = 3\n height_ratios = [1,3,1]\n else:\n n_plots = 2\n height_ratios = [3,1]\n\n if startend is not None:\n assert(type(startend) is list and len(startend) == 2)\n start = startend[0] - offset[0]\n end = startend[1] + offset[1]\n xlim = [start, end]\n length = end - start\n figsize = (length * x_scale * fig_width / 6., fig_height)\n else:\n figsize = (fig_width, fig_height)\n\n fig = plt.figure(figsize=figsize)\n fig.patch.set_alpha(0)\n gs = gridspec.GridSpec(n_plots, 1, height_ratios=height_ratios)\n\n axes = [plt.subplot(gs[x]) for x in range(n_plots)]\n\n indices = [x for x in range(data['n_sweeps']) if x % skip_sweep ==0 or x == data['n_sweeps']-1]\n # print(indices)\n\n if blue_sweep is not None:\n assert(isinstance(blue_sweep, (int, np.integer)))\n if not blue_sweep in indices:\n indices.append(blue_sweep)\n else:\n blue_sweep = indices[-2]\n\n if rheobase_sweep is not None:\n assert(isinstance(rheobase_sweep, (int, np.integer)))\n if not rheobase_sweep in indices:\n indices.append(rheobase_sweep)\n\n for i in indices[::-1]:\n if i == rheobase_sweep:\n color = highlight_rheobase\n lw=1.25 * lw_scale\n size=8 * lw_scale\n alpha=1 * alpha_scale\n elif i == blue_sweep or i == data['n_sweeps'] + blue_sweep:\n color = highlight\n lw=1.25 * lw_scale\n size=8 * lw_scale\n alpha=1 * alpha_scale\n elif i in sag_sweeps:\n color = highlight_sag\n lw=1 * lw_scale\n size=8 * lw_scale\n alpha=1 * alpha_scale\n else:\n color = 'gray'\n lw=0.2 * lw_scale\n size=3 * lw_scale\n alpha=0.6 * alpha_scale\n if not plot_gray_sweeps:\n continue\n\n axes[-2].plot(data['t'][::skip_point], data['voltage'][i][::skip_point],\n color=color, lw=lw, alpha=alpha, rasterized=rasterized)\n if i == rheobase_sweep and not other_features is None:\n threshold_t = other_features['spikes_threshold_t'][spikes_sweep_id==i]\n threshold_v = other_features['spikes_threshold_v'][spikes_sweep_id==i]\n trough_t = other_features[trough_name + '_t'][spikes_sweep_id==i]\n trough_v = other_features[trough_name + '_v'][spikes_sweep_id==i]\n axes[-2].scatter(threshold_t, threshold_v, marker='_', s=30, lw=1, c=\"black\", alpha=alpha)\n axes[-2].scatter(trough_t, trough_v, marker='+', s=30, lw=1, c=\"black\", alpha=alpha)\n\n axes[-1].plot(data['t'][::skip_point], data['current'][i][::skip_point] - bias_current,\n color=color, lw=lw, alpha=alpha, rasterized=rasterized)\n\n if n_plots == 3:\n spikes = spikes_t[spikes_sweep_id==i]\n axes[0].scatter(spikes, np.ones_like(spikes) * i, marker='o', s=size, c=color, alpha=alpha)\n\n\n axes[-2].set_ylim(vlim)\n axes[-2].set_ylabel('Membrane Voltage (mV)', fontsize=16)\n axes[-2].set_xticklabels([])\n axes[-1].set_ylim(ilim)\n axes[-1].set_ylabel('Current (pA)', fontsize=16)\n axes[-1].set_xlabel('Time (s)', fontsize=16)\n axes[0].set_ylim([1, data['n_sweeps']])\n axes[0].set_xticklabels([])\n axes[0].set_ylabel('Sweeps', fontsize=16)\n\n for ax in axes:\n ax.set_xlim(xlim)\n ax.yaxis.set_label_coords(-0.64/figsize[0],0.5)\n ax.patch.set_alpha(0)\n ax.grid(False)\n for loc in ['top', 'right', 'bottom', 'left']:\n ax.spines[loc].set_visible(False)\n\n plt.tight_layout()\n if save is True:\n plt.savefig(os.path.join(data['file_directory'], data['file_id']) + '.png', dpi=300)\n #plt.savefig(os.path.join(data['file_directory'], data['file_id']) + '.svg')\n plt.savefig(os.path.join(data['file_directory'], data['file_id']) + '.pdf', dpi=300)\n\n return fig\n\n\ndef animate_current_step(data, fig_height=6, x_scale=3.5, xlim=[0.3,3.2],\n startend=None, offset=[0.2, 0.4],\n vlim=[-145,60], ilim=[-95,150],\n spikes_sweep_id = None, spikes_t = None,\n bias_current = 0.0, highlight = 'deepskyblue',\n skip_point=10, save=False, save_filepath=None, fps=2.5, dpi=100, blit=True):\n '''\n Make animated GIF containing all the sweeps in current clamp protocol.\n If detected spikes are provided, also plot detected spikes.\n\n Note the slow speed of this function is due to image -> gif/mp4 conversion, not creating animation.\n Creating animation takes < 1s. Saving it takes 5 - 10s.\n '''\n fig_width = fig_height\n if (spikes_sweep_id is not None) and (spikes_t is not None):\n fig_height *= 4.0/3.0\n n_plots = 3\n height_ratios = [1,3,1]\n else:\n n_plots = 2\n height_ratios = [3,1]\n\n plt.style.use('ggplot')\n if startend is not None:\n assert(type(startend) is list and len(startend) == 2)\n start = startend[0] - offset[0]\n end = startend[1] + offset[1]\n xlim = [start, end]\n length = end - start\n figsize = (length * x_scale * fig_width / 6., fig_height)\n else:\n figsize = (fig_width, fig_height)\n\n fig = plt.figure(figsize=figsize)\n\n gs = gridspec.GridSpec(n_plots, 1, height_ratios=height_ratios)\n axes = [plt.subplot(gs[x]) for x in range(n_plots)]\n\n # plot background traces in light gray\n color = 'gray'\n lw=0.2\n size=2\n alpha=0.6\n for i in range(data['n_sweeps']):\n axes[-2].plot(data['t'][::skip_point], data['voltage'][i][::skip_point], color=color, lw=lw, alpha=alpha)\n axes[-1].plot(data['t'][::skip_point], data['current'][i][::skip_point] - bias_current, color=color, lw=lw, alpha=alpha)\n if n_plots == 3:\n spikes = spikes_t[spikes_sweep_id==i]\n axes[0].plot(spikes, np.ones_like(spikes) * i, marker='o', markersize=size, ls='', color=color, alpha=alpha)\n\n axes[-2].set_ylim(vlim)\n axes[-2].set_ylabel('Membrane Voltage (mV)', fontsize=16)\n axes[-2].set_xticklabels([])\n axes[-1].set_ylim(ilim)\n axes[-1].set_ylabel('Current (pA)', fontsize=16)\n axes[-1].set_xlabel('Time (s)', fontsize=16)\n if n_plots == 3:\n axes[0].set_ylim([1, data['n_sweeps']])\n axes[0].set_xticklabels([])\n axes[0].set_ylabel('Sweeps', fontsize=16)\n for ax in axes:\n ax.set_xlim(xlim)\n ax.yaxis.set_label_coords(-0.64/figsize[0],0.5)\n ax.patch.set_alpha(0)\n\n plt.tight_layout()\n\n # initialize plots of highlighted traces\n color = highlight\n lw=1.5\n size=3\n alpha=1\n plot_2, = axes[-2].plot([], [], color=color, lw=lw, alpha=alpha)\n plot_1, = axes[-1].plot([], [], color=color, lw=lw, alpha=alpha)\n if n_plots == 3:\n plot_0, = axes[0].plot([], [], marker='o', markersize=size, ls='', color=color, alpha=alpha)\n\n def init_animation():\n return plot_0, plot_1, plot_2\n\n # animate the highlighted traces\n def animate(j):\n plot_2.set_data(data['t'][::skip_point], data['voltage'][j][::skip_point])\n plot_1.set_data(data['t'][::skip_point], data['current'][j][::skip_point] - bias_current)\n if n_plots == 3:\n spikes = spikes_t[spikes_sweep_id==j]\n plot_0.set_data(spikes, np.ones_like(spikes) * j)\n return plot_0, plot_1, plot_2\n\n anim = animation.FuncAnimation(fig, animate, init_func=init_animation, frames=data['n_sweeps'], blit=blit)\n if save:\n if save_filepath is None:\n raise ValueError(\"Please provide save path (gif or mp4).\")\n elif save_filepath.endswith('.gif'):\n # use default dpi=100. Setting other dpi values will produce wierd-looking plots.\n anim.save(save_filepath, writer='imagemagick', fps=fps, dpi=dpi)\n elif save_filepath.endswith('.mp4'):\n anim.save(save_filepath, writer='ffmpeg', fps=fps, dpi=dpi)\n\n return fig, anim\n\n\ndef plot_fi_curve(stim_amp, firing_rate, figsize=(4,4), save_filepath = None, color=sns.color_palette(\"muted\").as_hex()[0]):\n '''\n Plot F-I curve\n '''\n mpl.rcParams.update(mpl.rcParamsDefault)\n fig, ax = plt.subplots(1,1,figsize=figsize)\n ax.plot(stim_amp, firing_rate, marker='o', linewidth=1.5, markersize=8, color=color)\n fig.gca().spines['right'].set_visible(False)\n fig.gca().spines['top'].set_visible(False)\n ax.set_ylabel('Spikes per second', fontsize=18)\n ax.set_xlabel('Current (pA)', fontsize=18)\n ax.yaxis.set_label_coords(-0.22 * 4 / figsize[0],0.5)\n ax.tick_params(axis = 'both', which = 'major', labelsize = 14)\n ax.patch.set_alpha(0)\n fig.tight_layout()\n if save_filepath is not None:\n fig.savefig(save_filepath, dpi=200)\n return fig\n\ndef plot_vi_curve(stim_amp, voltage, figsize=(4,4), save_filepath = None, color=\"gray\"):\n '''\n Plot V-I curve\n '''\n mpl.rcParams.update(mpl.rcParamsDefault)\n fig, ax = plt.subplots(1,1,figsize=figsize)\n ax.plot(stim_amp, voltage, marker='o', linewidth=1.5, markersize=8, color=color)\n fig.gca().spines['right'].set_visible(False)\n fig.gca().spines['top'].set_visible(False)\n ax.set_ylabel('Voltage (mV)', fontsize=18)\n ax.set_xlabel('Current (pA)', fontsize=18)\n ax.yaxis.set_label_coords(-0.22 * 4 / figsize[0],0.5)\n ax.tick_params(axis = 'both', which = 'major', labelsize = 14)\n ax.patch.set_alpha(0)\n fig.tight_layout()\n if save_filepath is not None:\n fig.savefig(save_filepath, dpi=200)\n return fig\n\n\ndef plot_first_spike(data, features, time_zero='threshold', figsize=(4,4), lw_scale=1,\n window=None, vlim=[-80, 60], color=sns.color_palette(\"muted\").as_hex()[2],\n other_markers = dict(),\n save_filepath = None, rasterized=False):\n '''\n Plot the first action potential. Time window is something like:\n Inputs\n -----\n data: raw data of sweeps loaded by load_current_step()\n features: dictionary from extract_istep_features()\n time_zero: whether to use threshold or peak time\n window: time range in ms. such as [t-10, t+40] ms\n\n Returns\n -------\n figure object\n '''\n assert(time_zero in ['threshold', 'peak'])\n if time_zero == 'threshold':\n t0 = features['spikes_threshold_t'][0]\n if window is None:\n window = [-10, 40]\n elif time_zero == 'peak':\n t0 = features['spikes_peak_t'][0]\n if window is None:\n window = [-15, 35]\n\n ap_window = [t0 + x * 0.001 for x in window]\n start, end = [ft.find_time_index(data['t'], x) for x in ap_window]\n t = (data['t'][start:end] - data['t'][start]) * 1000 + window[0]\n v = data['voltage'][features['rheobase_index']][start:end]\n\n mpl.rcParams.update(mpl.rcParamsDefault)\n fig, ax = plt.subplots(1,1,figsize=figsize)\n ax.plot(t, v, color=color, lw=2 * lw_scale, rasterized=rasterized)\n\n threshold_time = (features['spikes_threshold_t'][0] - t0) * 1000\n ax.hlines(features['ap_threshold'], window[0], threshold_time,\n linestyles='dotted', color='grey')\n\n for i, (feature, col) in enumerate(other_markers.items()):\n feature_t = (features[feature + '_t'][0] - t0) * 1000\n feature_v = features[feature + '_v'][0]\n if feature_t > window[1]:\n continue\n ax.scatter(feature_t, vlim[0] + i*2 + 2, marker='+', s=50, lw=1.5, c=col)\n\n\n ax.set_ylim(vlim)\n fig.gca().spines['right'].set_visible(False)\n fig.gca().spines['top'].set_visible(False)\n ax.set_ylabel('Voltage (mV)', fontsize=18)\n ax.set_xlabel('Time (ms)', fontsize=18)\n ax.yaxis.set_label_coords(-0.22 * 4 / figsize[0],0.5)\n ax.tick_params(axis = 'both', which = 'major', labelsize = 14)\n ax.patch.set_alpha(0)\n ax.grid(False)\n for loc in ['top', 'right', 'bottom', 'left']:\n ax.spines[loc].set_visible(False)\n fig.tight_layout()\n\n if save_filepath is not None:\n fig.savefig(save_filepath, dpi=200)\n return fig\n\ndef plot_phase_plane(data, features, filter=None, figsize=(4, 4), window=[-50, 200], lw_scale=1,\n vlim=[-80, 60], dvdtlim=[-80, 320],\n color=sns.color_palette(\"muted\").as_hex()[1],\n save_filepath=None, rasterized=False):\n t0 = features['spikes_threshold_t'][0]\n ap_window = [t0 + x * 0.001 for x in window]\n\n if len(features['spikes_sweep_id']) > 1 and \\\n features['spikes_sweep_id'][1] == features['spikes_sweep_id'][0]:\n ap_window[1] = min(ap_window[1], features['spikes_threshold_t'][1])\n\n start, end = [ft.find_time_index(data['t'], x) for x in ap_window]\n t = (data['t'][start:end] - data['t'][start]) * 1000 + window[0]\n v = data['voltage'][features['rheobase_index']][start:end]\n # dvdt = ft.calculate_dvdt(v, t, filter=filter) * 1000\n dvdt = ft.calculate_dvdt(v, data['t'][start:end], filter) # filter=10 or 5\n\n mpl.rcParams.update(mpl.rcParamsDefault)\n fig, ax = plt.subplots(1,1,figsize=figsize)\n ax.plot(v[0:-1], dvdt, color=color, lw=2 * lw_scale, rasterized=rasterized)\n\n ax.set_xlim(vlim)\n ax.set_ylim(dvdtlim)\n fig.gca().spines['right'].set_visible(False)\n fig.gca().spines['top'].set_visible(False)\n ax.set_xlabel('Voltage (mV)', fontsize=18)\n ax.set_ylabel('dV/dt (V/s)', fontsize=18)\n ax.yaxis.set_label_coords(-0.22 * 4 / figsize[0],0.5)\n ax.tick_params(axis = 'both', which = 'major', labelsize = 14)\n ax.patch.set_alpha(0)\n ax.grid(False)\n for loc in ['top', 'right', 'bottom', 'left']:\n ax.spines[loc].set_visible(False)\n fig.tight_layout()\n\n if save_filepath is not None:\n fig.savefig(save_filepath, dpi=200)\n return fig\n\n\ndef plot_first_spike_dvdt(data, features, time_zero='threshold', figsize=(4,4),\n filter_dvdt=10,\n window=None, ylim=None,\n color=\"gray\",\n save_filepath = None, rasterized=False):\n '''\n Plot dv/dt of the first action potential. Time window is something like:\n Inputs\n -----\n data: raw data of sweeps loaded by load_current_step()\n features: dictionary from extract_istep_features()\n time_zero: whether to use threshold or peak time\n window: time range in ms. such as [t-10, t+40] ms\n\n Returns\n -------\n figure object\n '''\n assert(time_zero in ['threshold', 'peak'])\n if time_zero == 'threshold':\n t0 = features['spikes_threshold_t'][0]\n if window is None:\n window = [-10, 40]\n elif time_zero == 'peak':\n t0 = features['spikes_peak_t'][0]\n if window is None:\n window = [-15, 35]\n\n ap_window = [t0 + x * 0.001 for x in window]\n start, end = [ft.find_time_index(data['t'], x) for x in ap_window]\n t = (data['t'][start:end] - data['t'][start]) * 1000 + window[0]\n v = data['voltage'][features['rheobase_index']][start:end]\n\n #dvdt = ft.calculate_dvdt(v, t, filter=filter_dvdt) * 1000 need filter=0.01 or 0.005\n dvdt = ft.calculate_dvdt(v, data['t'][start:end], filter_dvdt) # filter=10 or 5\n\n mpl.rcParams.update(mpl.rcParamsDefault)\n fig, ax = plt.subplots(1,1,figsize=figsize)\n ax.plot(t[:-1], dvdt, color=color, lw=2, rasterized=rasterized)\n\n threshold_time = (features['spikes_threshold_t'][0] - t0) * 1000\n dvdt_thres_index = ft.find_time_index(data['t'], features['spikes_threshold_t'][0]) - start\n ax.hlines(dvdt[dvdt_thres_index], window[0], threshold_time,\n linestyles='dotted', color='grey')\n\n ax.set_ylim(ylim)\n fig.gca().spines['right'].set_visible(False)\n fig.gca().spines['top'].set_visible(False)\n ax.set_ylabel('dv/dt (mV/ms)', fontsize=18)\n ax.set_xlabel('Time (ms)', fontsize=18)\n ax.yaxis.set_label_coords(-0.22 * 4 / figsize[0],0.5)\n ax.tick_params(axis = 'both', which = 'major', labelsize = 14)\n ax.patch.set_alpha(0)\n ax.grid(False)\n for loc in ['top', 'right', 'bottom', 'left']:\n ax.spines[loc].set_visible(False)\n fig.tight_layout()\n\n if save_filepath is not None:\n fig.savefig(save_filepath, dpi=200)\n return fig\n\n\n\ndef plot_first_spike_2nd_derivative(data, features, time_zero='threshold', figsize=(4,4),\n filter_dvdt=10,\n window=None, ylim=None,\n color=\"gray\",\n save_filepath = None, rasterized=False):\n '''\n Plot dv/dt of the first action potential. Time window is something like:\n Inputs\n -----\n data: raw data of sweeps loaded by load_current_step()\n features: dictionary from extract_istep_features()\n time_zero: whether to use threshold or peak time\n window: time range in ms. such as [t-10, t+40] ms\n\n Returns\n -------\n figure object\n '''\n assert(time_zero in ['threshold', 'peak'])\n if time_zero == 'threshold':\n t0 = features['spikes_threshold_t'][0]\n if window is None:\n window = [-10, 40]\n elif time_zero == 'peak':\n t0 = features['spikes_peak_t'][0]\n if window is None:\n window = [-15, 35]\n\n ap_window = [t0 + x * 0.001 for x in window]\n start, end = [ft.find_time_index(data['t'], x) for x in ap_window]\n t = (data['t'][start:end] - data['t'][start]) * 1000 + window[0]\n v = data['voltage'][features['rheobase_index']][start:end]\n\n #dvdt = ft.calculate_dvdt(v, t, filter=filter_dvdt) * 1000\n #d2vdt2 = ft.calculate_dvdt(dvdt, t[:-1], filter=filter_dvdt) * 1000\n dvdt = ft.calculate_dvdt(v, data['t'][start:end], filter_dvdt)\n d2vdt2 = ft.calculate_dvdt(dvdt, data['t'][start:end-1], filter_dvdt)\n\n mpl.rcParams.update(mpl.rcParamsDefault)\n fig, ax = plt.subplots(1,1,figsize=figsize)\n ax.plot(t[:-2], d2vdt2, color=color, lw=1.5, rasterized=rasterized)\n\n threshold_time = (features['spikes_threshold_t'][0] - t0) * 1000\n dvdt_thres_index = ft.find_time_index(data['t'], features['spikes_threshold_t'][0]) - start\n ax.hlines(d2vdt2[dvdt_thres_index], window[0], threshold_time,\n linestyles='dotted', color='grey')\n\n ax.set_ylim(ylim)\n fig.gca().spines['right'].set_visible(False)\n fig.gca().spines['top'].set_visible(False)\n ax.set_ylabel('d2v/dt2 (mV/ms^2)', fontsize=18)\n ax.set_xlabel('Time (ms)', fontsize=18)\n ax.yaxis.set_label_coords(-0.22 * 4 / figsize[0],0.5)\n ax.tick_params(axis = 'both', which = 'major', labelsize = 14)\n ax.patch.set_alpha(0)\n ax.grid(False)\n for loc in ['top', 'right', 'bottom', 'left']:\n ax.spines[loc].set_visible(False)\n fig.tight_layout()\n\n if save_filepath is not None:\n fig.savefig(save_filepath, dpi=200)\n return fig\n\ndef combine_vertical(images, scale = 1):\n # combine multiple PIL images\n # roughtly same width\n height = sum([x.size[1] for x in images])\n width = max([x.size[0] for x in images])\n combined = Image.new('RGB', (width, height), (255,255,255))\n\n y_offset = 0\n for im in images:\n if len(im.split()) > 3:\n combined.paste(im, (0, y_offset), mask=im.split()[3])\n else:\n combined.paste(im, (0, y_offset))\n y_offset += im.size[1]\n if scale != 1:\n combined = combined.resize([int(x * scale) for x in combined.size], resample=Image.BICUBIC)\n return combined\n\n\ndef combine_horizontal(images, scale = 1, same_size = False):\n # combine multiple PIL images\n if not same_size:\n min_height = min([x.size[1] for x in images])\n min_i = np.argmin([x.size[1] for x in images])\n scales = [min_height / x.size[1] for i, x in enumerate(images)]\n resized = images.copy()\n\n for i in range(len(resized)):\n if i != min_i:\n resized[i] = resized[i].resize([int(x * scales[i]) for x in resized[i].size], resample=Image.BICUBIC)\n else:\n resized = images\n\n width = sum([x.size[0] for x in resized])\n height = max([x.size[1] for x in resized])\n combined = Image.new('RGB', (width, height), (255,255,255))\n\n x_offset = 0\n for im in resized:\n if len(im.split()) > 3:\n combined.paste(im, (x_offset,0), mask=im.split()[3])\n else:\n combined.paste(im, (x_offset,0))\n x_offset += im.size[0]\n if scale != 1:\n combined = combined.resize([int(x * scale) for x in combined.size], resample=Image.BICUBIC)\n\n return combined\n\n\ndef draw_text_on_image(image, text_list, location_list=[(0,0)], font_path='Arial.ttf', font_size=20):\n image = image.copy()\n font = ImageFont.truetype(font_path, size=font_size)\n d = ImageDraw.Draw(image)\n for text, location in zip(text_list, location_list):\n d.text(location, text, font=font, fill=(0,0,0))\n return image\n"
] |
[
[
"matplotlib.pyplot.tight_layout",
"numpy.ones_like",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplot",
"matplotlib.rcParams.update",
"matplotlib.gridspec.GridSpec",
"matplotlib.animation.FuncAnimation",
"numpy.argmin",
"numpy.zeros_like",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure"
]
] |
vidheyoza/TwitterStockSentiment
|
[
"848b9bcb1150d868a32ee03d91ece0e1bbb0af69"
] |
[
"main.py"
] |
[
"import datetime as dt\nimport math\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nfrom sklearn import preprocessing\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nimport yfinance as yf\nimport tweepy\nfrom textblob import TextBlob\n\nimport constants as ct\nfrom Tweet import Tweet\n\nstyle.use('ggplot')\n\n\ndef check_stock_symbol(companies_file='nasdaq_list.csv'):\n df = pd.read_csv(companies_file)\n ticker = 'AAPL'\n c_name = ''\n\n while c_name == '':\n ticker = input('Enter a stock symbol to retrieve data from: ').upper()\n for index in range(len(df)):\n if df['Symbol'][index] == ticker:\n c_name = df['Name'][index]\n\n return c_name, ticker\n\n\ndef get_stock_data(ticker, from_date, to_date):\n data = yf.download(tickers=ticker, start=from_date, end=to_date)\n df = pd.DataFrame(data=data)\n\n df = df[['Open', 'High', 'Low', 'Close', 'Volume']]\n df['HighLoad'] = (df['High'] - df['Close']) / df['Close'] * 100.0\n df['Change'] = (df['Close'] - df['Open']) / df['Open'] * 100.0\n\n df = df[['Close', 'HighLoad', 'Change', 'Volume']]\n return df\n\n\ndef stock_forecasting(df):\n forecast_col = 'Close'\n forecast_out = int(math.ceil(0.1 * len(df)))\n df['Label'] = df[[forecast_col]].shift(-forecast_out)\n\n X = np.array(df.drop(['Label'], axis=1))\n X = preprocessing.scale(X)\n X_forecast = X[-forecast_out:]\n X = X[:-forecast_out]\n\n df.dropna(inplace=True)\n y = np.array(df['Label'])\n\n # print(X)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)\n\n clf = LinearRegression(n_jobs=-1)\n clf.fit(X_train, y_train)\n accuracy = clf.score(X_test, y_test)\n forecast = clf.predict(X_forecast)\n\n df['Prediction'] = np.nan\n\n last_date = df.iloc[-1].name\n last_date = dt.datetime.strptime(str(last_date), \"%Y-%m-%d %H:%M:%S\")\n\n # print(df.index)\n\n for pred in forecast:\n last_date += dt.timedelta(days=1)\n df.loc[last_date.strftime(\"%Y-%m-%d\")] = [np.nan for _ in range(len(df.columns) - 1)] + [pred]\n return df, forecast_out\n\n\ndef forecast_plot(df, ticker):\n plt.plot(df.index, df['Close'], color='black', label='Close')\n plt.plot(df.index, df['Prediction'], color='green', label='Prediction')\n\n plt.legend()\n plt.xlabel('Date')\n plt.ylabel('Price')\n plt.xticks(rotation=90)\n\n plt.savefig('plots/' + ticker + '.png', bbox_inches='tight')\n plt.show()\n\n\ndef retrieving_tweets_polarity(query):\n auth = tweepy.OAuthHandler(ct.consumer_key, ct.consumer_secret)\n auth.set_access_token(ct.access_token, ct.access_token_secret)\n user = tweepy.API(auth)\n\n tweets = tweepy.Cursor(user.search, q=str(query), tweet_mode='extended', lang='en').items(ct.num_of_tweets)\n\n tweet_list = []\n global_polarity = 0\n for tweet in tweets:\n tw = tweet.full_text\n blob = TextBlob(tw)\n polarity = 0\n for sentence in blob.sentences:\n polarity += sentence.sentiment.polarity\n global_polarity += sentence.sentiment.polarity\n tweet_list.append(Tweet(tw, polarity))\n print(Tweet(tw, polarity))\n # print(\"Polarity: \", polarity)\n\n global_polarity = global_polarity / len(tweet_list)\n return global_polarity\n\n\ndef recommending(df, forecast_out, global_polarity):\n print('Market Sentiment: ', global_polarity)\n if df.iloc[-forecast_out - 1]['Close'] < df.iloc[-1]['Prediction']:\n if global_polarity > 0:\n print(\n \"According to the predictions and twitter sentiment analysis -> Investing in %s is a GREAT idea!\" % str(\n symbol))\n elif global_polarity < 0:\n print(\"According to the predictions and twitter sentiment analysis -> Investing in %s is a BAD idea!\" % str(\n symbol))\n else:\n print(\"According to the predictions and twitter sentiment analysis -> Investing in %s is a BAD idea!\" % str(\n symbol))\n\n\nif __name__ == \"__main__\":\n (company_name, symbol) = check_stock_symbol()\n if company_name != '':\n # Setup timeline from today till 2 years ago\n actual_date = dt.date.today()\n past_date = actual_date - dt.timedelta(days=(365 * 2))\n actual_date = actual_date.strftime(\"%Y-%m-%d\")\n past_date = past_date.strftime(\"%Y-%m-%d\")\n\n print(\"Retrieving Stock Data from introduced symbol...\")\n dataframe = get_stock_data(symbol, past_date, actual_date)\n\n print(\"Forecasting stock DataFrame...\")\n (dataframe, forecast_price) = stock_forecasting(dataframe)\n\n print(\"Plotting existing and forecasted values...\")\n forecast_plot(dataframe, symbol)\n\n print(\"Retrieving %s related tweets polarity...\" % symbol)\n polarity = retrieving_tweets_polarity(company_name)\n\n print(\"Generating recommendation based on prediction & polarity...\")\n recommending(dataframe, forecast_price, polarity)\n"
] |
[
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.style.use",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.xlabel",
"sklearn.preprocessing.scale",
"numpy.array",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.